Add Batch f6eca2a9-6852-46d0-87f5-25e91d98d85d
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_content_list.json +3 -0
- 3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_model.json +3 -0
- 3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_origin.pdf +3 -0
- 3dp33dsceneperceptionviaprobabilisticprogramming/full.md +222 -0
- 3dp33dsceneperceptionviaprobabilisticprogramming/images.zip +3 -0
- 3dp33dsceneperceptionviaprobabilisticprogramming/layout.json +3 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_content_list.json +3 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_model.json +3 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_origin.pdf +3 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/full.md +267 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/images.zip +3 -0
- 3dposetransferwithcorrespondencelearningandmeshrefinement/layout.json +3 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_content_list.json +3 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_model.json +3 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_origin.pdf +3 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/full.md +313 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/images.zip +3 -0
- 3dsiamesevoxeltobevtrackerforsparsepointclouds/layout.json +3 -0
- a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_content_list.json +3 -0
- a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_model.json +3 -0
- a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_origin.pdf +3 -0
- a3dgenerativemodelforstructurebaseddrugdesign/full.md +252 -0
- a3dgenerativemodelforstructurebaseddrugdesign/images.zip +3 -0
- a3dgenerativemodelforstructurebaseddrugdesign/layout.json +3 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_content_list.json +3 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_model.json +3 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_origin.pdf +3 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/full.md +319 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/images.zip +3 -0
- abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/layout.json +3 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_content_list.json +3 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_model.json +3 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_origin.pdf +3 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/full.md +319 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/images.zip +3 -0
- abiasedgraphneuralnetworksamplerwithnearoptimalregret/layout.json +3 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_content_list.json +3 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_model.json +3 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_origin.pdf +3 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/full.md +387 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/images.zip +3 -0
- abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/layout.json +3 -0
- acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_content_list.json +3 -0
- acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_model.json +3 -0
- acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_origin.pdf +3 -0
- acausallensforcontrollabletextgeneration/full.md +324 -0
- acausallensforcontrollabletextgeneration/images.zip +3 -0
- acausallensforcontrollabletextgeneration/layout.json +3 -0
- acentrallimittheoremfordifferentiallyprivatequeryanswering/becf66b0-4033-4827-93d2-82b61ec71945_content_list.json +3 -0
- acentrallimittheoremfordifferentiallyprivatequeryanswering/becf66b0-4033-4827-93d2-82b61ec71945_model.json +3 -0
3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07ede6fd7498b350799441802a757821f62e605582a05b804d4f42c40eac73f3
|
| 3 |
+
size 83377
|
3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97907f8751bfdb5d493d8a298d3d9888818e64081e27b37f8f1007238b2ecf1a
|
| 3 |
+
size 98115
|
3dp33dsceneperceptionviaprobabilisticprogramming/917057a9-37cc-4d2d-b034-76ea68e138b2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc90435756a4cf94718a238ff65daf8cf4175ac597a722199b8f0eb15d13b3d8
|
| 3 |
+
size 17207768
|
3dp33dsceneperceptionviaprobabilisticprogramming/full.md
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3DP3: 3D Scene Perception via Probabilistic Programming
|
| 2 |
+
|
| 3 |
+
Nishad Gohtoskar
|
| 4 |
+
|
| 5 |
+
Marco Cusumano-Towner
|
| 6 |
+
|
| 7 |
+
Ben Zinberg
|
| 8 |
+
|
| 9 |
+
Matin Ghavamizadeh<sup>1</sup>
|
| 10 |
+
|
| 11 |
+
Falk Pollok²
|
| 12 |
+
|
| 13 |
+
Austin Garrett<sup>1</sup>
|
| 14 |
+
|
| 15 |
+
Joshua B. Tenenbaum<sup>1</sup>
|
| 16 |
+
|
| 17 |
+
Dan Gutfreund2
|
| 18 |
+
|
| 19 |
+
Vikash K. Mansinghka
|
| 20 |
+
|
| 21 |
+
$^{1}$ MIT
|
| 22 |
+
|
| 23 |
+
$^{2}$ MIT-IBM Watson AI Lab
|
| 24 |
+
|
| 25 |
+
{nishad,marcoct,bzinberg,mghavami,jbt,vkm}@mit.edu
|
| 26 |
+
|
| 27 |
+
{falk.pollok,austin garrett}@ibm.com dgutfre@us.ibm.com
|
| 28 |
+
|
| 29 |
+
# Abstract
|
| 30 |
+
|
| 31 |
+
We present 3DP3, a framework for inverse graphics that uses inference in a structured generative model of objects, scenes, and images. 3DP3 uses (i) voxel models to represent the 3D shape of objects, (ii) hierarchical scene graphs to decompose scenes into objects and the contacts between them, and (iii) depth image likelihoods based on real-time graphics. Given an observed RGB-D image, 3DP3's inference algorithm infers the underlying latent 3D scene, including the object poses and a parsimonious joint parametrization of these poses, using fast bottom-up pose proposals, novel involutive MCMC updates of the scene graph structure, and, optionally, neural object detectors and pose estimators. We show that 3DP3 enables scene understanding that is aware of 3D shape, occlusion, and contact structure. Our results demonstrate that 3DP3 is more accurate at 6DoF object pose estimation from real images than deep learning baselines and shows better generalization to challenging scenes with novel viewpoints, contact, and partial observability.
|
| 32 |
+
|
| 33 |
+
# 1 Introduction
|
| 34 |
+
|
| 35 |
+
A striking feature of human visual intelligence is our ability to learn representations of novel objects from a limited amount of data and then robustly percieve 3D scenes containing those objects. We can immediately generalize across large variations in viewpoint, occlusion, lighting, and clutter. How might we develop computational vision systems that can do the same?
|
| 36 |
+
|
| 37 |
+
This paper presents a generative model for 3D scene perception, called 3DP3. Object shapes are learned via probabilistic inference in a voxel occupancy model that coarsely captures 3D shape and uncertainty due to self-occlusion (Section 4). Scenes are modeled via hierarchical 3D scene graphs that can explain planar contacts between objects without forcing scenes to fit rigid structural assumptions (Section 3). Images are modeled by real-time graphics and robust likelihoods on point clouds. We cast 3D scene understanding as approximate probabilistic inference in this generative model. We develop a novel inference algorithm that combines data-driven Metropolis-Hastings kernels over object poses, involutive MCMC kernels over scene graph structure, pseudo-marginal integration over uncertain object shape, and existing deep learning object detectors and pose estimators (Section 5). This architecture leverages inference in the generative model to provide common sense constraints that fix errors made by bottom-up neural detectors. Our experiments show that 3DP3 is more accurate and robust than deep learning baselines at 6DoF pose estimation for challenging synthetic and real-world scenes (Section 6). Our model and inference algorithm are implemented in the Gen [11] probabilistic programming system.
|
| 38 |
+
|
| 39 |
+
# 2 Related Work
|
| 40 |
+
|
| 41 |
+
Analysis-by-synthesis approaches to computer vision A long line of work has interpreted computer vision as the inverse problem to computer graphics [22, 39, 26, 23]. This 'analysis-by-synthesis' approach has been used for various tasks including character recognition, CAPTCHA-breaking, lane detection, object pose estimation, and human pose estimation [40, 36, 27, 30, 18, 31]. To our knowledge, our work is the first to use an analysis-by-synthesis approach to infer a hierarchical 3D object-based representation of real multi-object scenes while exploiting inductive biases about the contacts between objects.
|
| 42 |
+
|
| 43 |
+
Hierarchical latent 3D scene representations We use a scene graph representation [41] that is closely related to hierarchical scene graph representations in computer graphics [9]. Unlike in graphics, we address the inverse problem of inferring hierarchical scene graphs from observed image data. Inferring hierarchical 3D scene graphs from RGB or depth images in a probabilistic framework is relatively unexplored. One concurrent<sup>1</sup> and independent work, Generative Scene Graph Networks (GSGN [13]), proposes a variational autoencoder architecture for decomposing images into objects and parts using a tree-structured latent scene graph that is similar to our scene graph representation. However, GSGN learns RGB appearance models of objects and their parts, uses an inference network instead of a hybrid of data-driven and model-based inference, was not evaluated on real images or scenes, and uses more restricted scene graphs that cannot represent objects with independent 6DoF pose. GSGN builds on an earlier deep generative model [17] that generates multi-object scenes but does not model dependencies between object poses and was not quantitatively evaluated on real 3D scenes. Incorporating a learned inference network for jointly proposing scene graphs into our framework is an interesting area for future work. The term 'scene graph' has also been used in computer vision to refer to various less related graph representations of scenes [3, 8, 32].
|
| 44 |
+
|
| 45 |
+
Probabilistic programming for computer vision Prior work has used probabilistic programs to represent generative models of images and implemented inference in these models using probabilistic programming systems [27, 25]. Unlike these prior works, which relied on manually specified and/or semi-parametric shape models, 3DP3 learns object shapes non-parametrically. 3DP3 also models occlusion of one 3D object by another; uses a novel hierarchical scene graph prior that allows for dependencies between object poses in the prior; uses a novel involutive MCMC [10] kernel for inferring scene graph structure; and uses a novel pseudo-marginal approach for handling uncertainty about object shape during inference. We also present a proof of concept that our system can infer the presence and pose of fully occluded objects.
|
| 46 |
+
|
| 47 |
+
6DoF object pose estimation We use 6DoF estimation of object pose from RGBD images as an example application. Registration of point clouds [5] can be used to estimate the 6DoF pose of objects with known 3D geometry from depth images. Many recent 6DoF object pose estimators use deep learning [38, 35] and many also take depth images [37, 34]. Some pose estimation methods model scene structure, contact relationships, stability, or other semantic information [21, 8, 24, 16, 4], and some use probabilistic inference [15, 7, 19, 14]. To our knowledge, we present the first 6DoF pose estimator that uses Bayesian inference about the structure of hierarchical 3D scene graphs.
|
| 48 |
+
|
| 49 |
+
Learning models of novel 3D objects Classic algorithms for structure-from-motion infer a 3D model of a scene from multiple images [33, 1]. Our approach for learning the shape of novel 3D objects produces coarse-grained probabilistic voxel models of objects that can represent uncertainty about the occupancy of self-occluded volumes. Integrating other representations of object shape and object appearance [29] with our scene graph representation is a promising area of future work.
|
| 50 |
+
|
| 51 |
+
# 3 3DP3 generative modeling framework
|
| 52 |
+
|
| 53 |
+
The core of 3DP3 is a generative modeling framework that represents a scene in terms of discrete objects, the 3D shape of each object, and a hierarchical structure called a scene graph that relates the poses (position and orientation) of the objects. This section describes 3DP3's object shape and scene graph latent representations, a family of prior distributions on these latent representations, and an observation model for image-based observations of scenes. Figure 1 shows the combined generative model written as a probabilistic program.
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
(a) Inferring a hierarchical 3D scene graph from an RGB-D image with 3DP3. Our model knows that objects often lay flat on other objects, which allows for the depth pixels of one object to inform the pose of other objects. Our algorithm also infers when this knowledge is relevant (e.g. the clamp on the left, represented by the purple node, is laying flat on the box), and when it is not (e.g. the clamp on the right, represented by the red node, is not laying flat on any other object).
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
(b) 3DP3 uses a structured generative model of 3D scenes, represented as a probabilistic program. The model uses a prior over object shapes that can be learned from data, a prior over scene structure that is a probability distribution on graphs, a traversal of the scene graph starting at the world node $r$ to compute object poses, and a robust likelihood model for depth images. In the graph at right, the world node $r$ (not shown) is the parent of the grey node (box) and the red node (right clamp) because those objects are not laying flat on other objects.
|
| 60 |
+
Figure 1: (a) A scene graph inference task and (b) the 3DP3 generative model.
|
| 61 |
+
|
| 62 |
+
# 3.1 Objects
|
| 63 |
+
|
| 64 |
+
The most basic element of our generative modeling framework are rigid objects. The first stage in our generative model prior encodes uncertainty about the 3D shape of $M$ types of rigid objects that may or may not be encountered in any given scene.
|
| 65 |
+
|
| 66 |
+
Voxel 3D object shapes We model the coarse 3D shape of rigid objects using a voxel grid with dimensions $h, w, l \in \mathbb{N}$ and cells indexed by $(i, j, \ell) \in [h] \times [w] \times [l]$ . Each cell has dimension $s \times s \times s$ for resolution $s \in \mathbb{R}^+$ , so that the entire voxel grid represents the cuboid $[0, h \cdot s] \times [0, w \cdot s] \times [0, l \cdot s]$ . All objects are assumed to fit within the cuboid. An object's shape is defined by a binary assignment $\mathbf{O} \in \{0, 1\}^{h \times w \times l}$ of occupancy states to each cell in the voxel grid, where $O_{ij\ell} = 1$ indicates that cell $(i, j, \ell)$ is occupied and $O_{ij\ell} = 0$ indicates it is free. Each object also has a finite set of contact planes through which the object may be in flush contact with the contact planes of other objects in physically stable scenes. For example, in Figure 2, the table has a contact plane for its top surface, the yellow sugar box has six contact planes, one for each of its six faces, and the bottom contact plane of the sugar box is in flush contact with the top contact plane of the table. The pose of a contact plane relative to its object is a function of the object shape $\mathbf{O}$ . To simplify notation, we denote the set of contact planes for any object by $F$ .
|
| 67 |
+
|
| 68 |
+
Prior distributions on 3D object shape We assume there are $M$ distinct object types, and each object type $m \in \{1, \ldots, M\}$ has an a-priori unknown shape, denoted $\mathbf{O}^{(m)}$ . Let $\mathbf{O}^{(1:M)} :=$
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
Figure 2: Our hierarchical scene graphs encode a tree of coordinate frames representing entities in a scene and their geometric relationships (e.g. flush contact between faces of two objects).
|
| 72 |
+
|
| 73 |
+
$(\mathbf{O}^{(1)}, \ldots, \mathbf{O}^{(M)})$ . The prior distribution on the shape of each object type $m$ is denoted $p(\mathbf{O}^{(m)})$ . Although our inference algorithm (Section 5) only requires the ability to sample jointly from $p(\mathbf{O}^{(1:M)})$ , we assume shapes of object types are independent in the prior ( $p(\mathbf{O}^{(1:M)}) = \prod_{m=1}^{M} p(\mathbf{O}^{(m)})$ ). Section 4 shows how to learn a specific shape prior $p(\mathbf{O}^{(m)})$ for an object type from depth images.
|
| 74 |
+
|
| 75 |
+
# 3.2 Scenes
|
| 76 |
+
|
| 77 |
+
Given a collection of $M$ known object types and their shapes, our model generates scenes with $N$ objects by randomly selecting an object type for each object and then sampling a 6DoF object pose for each object. Instead of assuming that object poses are independent, our model encodes an inductive bias about the regularities in real-world scenes: objects are often resting in flush contact with other objects (e.g. see Figure 2). We jointly sample dependent object poses using a flexible hierarchical scene graph, while maintaining uncertainty over the structure of the graph.
|
| 78 |
+
|
| 79 |
+
Hierarchical scene graphs We model the geometric state of a scene as a scene graph $\mathcal{G}$ (Figure 2), which is a tuple $\mathcal{G} = (G, \theta)$ where $G = (V, E)$ is a directed rooted tree and $\theta$ are parameters. The vertices $V := \{r, v_1, \ldots, v_N\}$ represent $N + 1$ 3D coordinate frames, with $r$ representing the world coordinate frame. An edge $(u, v) \in E$ indicates that coordinate frame $v$ is parametrized relative to frame $u$ , with parameters $\theta_v$ . The 6DoF pose of frame $v$ relative to frame $u$ with pose $\mathbf{x}_u \in SE(3)$ is given by a function $\Delta \mathbf{x}_v$ , where $\Delta \mathbf{x}_v(\theta_v) \in SE(3)$ and $\mathbf{x}_v := \mathbf{x}_u \cdot \Delta \mathbf{x}_v(\theta_v)$ . Here, $\cdot$ is the $SE(3)$ group operation, and the world coordinate frame is defined as the identity element $(\mathbf{x}_r := I)$ .
|
| 80 |
+
|
| 81 |
+
Modeling flush contact between rigid objects While the vertices of scene graphs $\mathcal{G}$ can represent arbitrary coordinate frames in a scene (e.g. the coordinate frames of articulated joints, object poses), in the remainder of this paper we assume that each vertex $v\in V\setminus \{r\}$ corresponds to the pose of a rigid object. We index objects by $1,\ldots ,N$ , with corresponding vertices $v_{1},\dots,v_{N}$ . We assume that each object $i$ has an object type $c_{i}\in \{1,\dots ,M\}$ . For vertices $v$ that are children of the root vertex $r$ $\theta_v\in SE(3)$ defines the absolute 6DoF pose of the corresponding object $(\Delta \mathbf{x}_v(\theta_v) = \theta_v)$ . For vertices $v$ that are children of a non-root vertex $u$ , the parameters take the form $\theta_v = (f_v,f_v',a_v,b_v,z_v,\phi_v)$ and represent a contact relationship between the two objects: $f_{v}$ and $f_{v}^{\prime}$ indicate which contact planes of the parent and child objects, respectively, are in contact. $(a_v,b_v)\in \mathbb{R}^2$ is the in-plane offset of the origin of plane $f_{v}$ of object $v$ from the origin of plane $f_{v}^{\prime}$ of object $u$ $z_{v}\in \mathbb{R}$ is the perpendicular distance of the origin of plane $f_{v}$ of object $v$ from plane $f_{v}^{\prime}$ of object $u$ $\phi_v\in S^2\times S^1$ represents the deviation of the normal vectors of the two contact planes from anti-parallel (in $S^2$ ) and a relative in-plane rotation of the two contact planes (in $S^1$ ). The relative pose $\Delta \mathbf{x}_v(\theta_v)$ of $v$ with respect to $u$ is the composition (in $SE(3)$ ) of three relative poses: (i) $v$ with respect to its plane $f_{v}$ , (ii) $v$ 's plane $f_{v}$ with respect to $u$ 's plane $f_{v}^{\prime}$ , and (iii) $u$ 's plane $f_{v}^{\prime}$ with respect to $u$ . The 6DoF poses of all objects ( $\mathbf{x}_v$ for $v\in V\setminus \{r\}$ ) are computed by traversing the scene graph while taking products of relative poses along paths from the root $r$
|
| 82 |
+
|
| 83 |
+
Prior distributions on scene graphs We now describe our prior on scene graphs, given object models $\mathbf{O}^{(1:M)}$ . We assume the number of objects $N$ in the scene is known (see the supplement for a generalization to unknown $N$ ). We first sample the types $c_{i}\in \{1,\ldots ,M\}$ of all objects from an
|
| 84 |
+
|
| 85 |
+
exchangeable distribution $p(\mathbf{c})$ where $\mathbf{c} \coloneqq (c_1, \ldots, c_N)$ . This includes as a special case distributions where all types are represented at most once among the objects $(\sum_{i=1}^{N} \mathbf{1}[c_i = c] \leq 1)$ , which is the case in our experiments. Next, we sample the scene graph structure $G$ from $p(G)$ . We experiment with two priors $p(G)$ : (i) a uniform distribution on the set of $(N + 1)^{N - 1}$ directed trees that are rooted at a vertex $r$ , and (ii) $\delta_{G_0}(G)$ , where $G_0$ is a graph on $N + 1$ vertices where $(r, v) \in E$ for all $v \in V \setminus \{r\}$ so that each object vertex has an independent 6DoF pose. For objects whose parent is $r$ (the world coordinate frame), we sample the pose $\theta_v \sim p_{\mathrm{unif}}$ , which samples the translation component uniformly from a cuboid scene extent, and the orientation uniformly over $SO(3)$ . For objects whose parent is another object $u$ , we sample the choice of contact planes $(f_v, f_v') \in F \times F$ uniformly, $(a_v, b_v) \sim \mathrm{Uniform}([-50\mathrm{cm}, 50\mathrm{cm}]^2)$ , $z_v \sim \mathrm{N}(0, 1\mathrm{cm})$ , the $S^2$ component of $\phi_v$ from a von Mises-Fisher (vMF) distribution concentrated $(\kappa = 250)$ on anti-parallel plane normals, and the $S^1$ component from $\mathrm{Uniform}(S^1)$ . We denote this distribution $p_{\mathrm{cont}}(\theta_v)$ . Note that the parameters of $p_{\mathrm{cont}}$ were not tuned or tailored in any detailed way—they were chosen heuristically based on the rough dimensions of table-top objects. The resulting prior over all of the latent variables is:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
p(\mathbf{O}^{(1:M)},\mathbf{c},G,\boldsymbol {\theta}) = \left(\prod_{m = 1}^{M}p(\mathbf{O}^{(m)})\right)\frac{1}{(N + 1)^{N - 1}} p(\mathbf{c})\prod_{\substack{v\in V:\\ (r,v)\in E}}p_{\text{unif}}(\theta_{v})\prod_{\substack{(u,v)\in E:\\ u\neq r}}p_{\text{cont}}(\theta_{v}) \tag{1}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
# 3.3 Images
|
| 92 |
+
|
| 93 |
+
Our generative model uses an observation model that generate synthetic image data given object shapes $\mathbf{O}^{(1:M)}$ and a scene graph $\mathcal{G}$ containing $N$ objects. We now describe the observation model for depth images that is used in our main experiments (Section 6).
|
| 94 |
+
|
| 95 |
+
Likelihood model for depth images We first convert an observed depth image into a point cloud $\mathbf{Y}$ . To model a point cloud $\mathbf{Y} \in \mathbb{R}^{K \times 3}$ with $K$ points denoted $\mathbf{y}_i \in \mathbb{R}^3$ , we use a likelihood model based on rendering a synthetic depth image of the scene graph. Specifically, given the object models $\mathbf{O}^{(m)}$ for each $m \in \{1, \dots, M\}$ , the object types $\mathbf{c}$ , the scene graph $\mathcal{G}$ , and the camera intrinsic and extrinsic parameters relative to the world frame, we (i) compute meshes from each $\mathbf{O}^{(m)}$ , (ii) compute the 6DoF poses $(\mathbf{x}_v)$ of objects with respect to the world frame by traversing the scene graph $\mathcal{G}$ , and (iii) render a depth image $\tilde{\mathbf{I}}$ of $\mathcal{G}$ using an OpenGL depth buffer, and (iv) unproject the rendered depth image to obtain a point cloud $\tilde{\mathbf{Y}}$ with $\tilde{K}$ points ( $\tilde{K}$ is the number of pixels in the depth image). We then generate an observed point cloud $\mathbf{Y} \in \mathbb{R}^{K \times 3}$ by drawing each point from a mixture:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
p \left(\mathbf {Y} \mid \mathbf {O} ^ {(1: M)}, \mathbf {c}, G, \boldsymbol {\theta}\right) := \prod_ {i = 1} ^ {K} \left(C \cdot \frac {1}{B} + \frac {1 - C}{\tilde {K}} \sum_ {j = 1} ^ {\tilde {K}} \frac {\mathbf {1} [ \| \mathbf {y} _ {i} - \tilde {\mathbf {y}} _ {j} \| _ {2} \leq r ]}{\frac {4}{3} \pi r ^ {3}}\right) \tag {2}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
for some $0 < C < 1$ and some $r > 0$ . The components of this mixture are uniform distributions over the balls of radius $r$ centered at each point in $\tilde{\mathbf{Y}}$ (with weights $(1 - C) / \tilde{K}$ ) and a uniform distribution over the scene bounding volume $B$ (weight $C$ ).<sup>2</sup>
|
| 102 |
+
|
| 103 |
+
# 4 Learning object shape models
|
| 104 |
+
|
| 105 |
+
3DP3 does not require hard-coded shape models. Instead, it uses probabilistic inference to learn non-parametric models of 3D object shape $p(\mathbf{O}^{(m)})$ that account for uncertainty due to self-occlusion. We focus on the restricted setting of learning from scenes containing a single isolated object ( $N = 1$ ) of known type ( $c_1$ ). Our approach works best for views that lead to minimal uncertainty about the exterior shape of the object; more general, flexible treatments of shape learning and shape uncertainty are beyond the scope of this paper.
|
| 106 |
+
|
| 107 |
+
First, we group the depth images by the object type $(c_{1})$ , so that we have $M$ independent learning problems. Let $\mathbf{I}_{1:T} \coloneqq (\mathbf{I}_1, \ldots, \mathbf{I}_T)$ denote the depth observations for one object type, with object shape denoted $\mathbf{O}$ . The learning algorithm uses Bayesian inference in another generative model $p'$ . The posterior $p'(\mathbf{O}|\mathbf{I}_{1:T})$ produced by this algorithm becomes the prior $p(\mathbf{O})$ used in Section 3.1.
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
Figure 3: Learning a voxel-based shape models $p(\mathbf{O}^{(m)})$ for a novel object from a set of 5 depth images. Our shape priors capture uncertainty about voxel occupancy due to self-occlusion (right).
|
| 111 |
+
|
| 112 |
+
We start with a uninformed prior distribution $p^{\prime}(\mathbf{O}) \coloneqq \prod_{i = 1}^{h}\prod_{j = 1}^{w}\prod_{\ell = 1}^{l}p_{\mathrm{occ}}^{O_{ij\ell}}(1 - p_{\mathrm{occ}})^{(1 - O_{ij\ell})}$ on the 3D shape of an object type, for a per-voxel occupancy probability $p_{\mathrm{occ}}$ (in our experiments, 0.5). We learn about the object's shape by observing a sequence of depth images $\mathbf{I}_{1:T}$ that contain views of the object, which is assumed to be static relative to other contents of the scene, which we call the 'map' $\mathbf{M}$ . (In our experiments the map contains the novel object, a floor, a ceiling, and four walls of a rectangular room). We posit the following joint distribution over object shape $(\mathbf{O})$ and the observed depth images, conditioned on the map $(\mathbf{M})$ and the poses of the camera relative to the map over time $(\mathbf{x}_1,\dots ,\mathbf{x}_T\in SE(3))$ : $p^{\prime}(\mathbf{O},\mathbf{I}_{1:T}|\mathbf{M},\mathbf{x}_{1:T}) \coloneqq p^{\prime}(\mathbf{O})\prod_{t = 1}^{T}p^{\prime}(\mathbf{I}_t|\mathbf{O},\mathbf{M},\mathbf{x}_t)$ .
|
| 113 |
+
|
| 114 |
+
The likelihood $p'$ is a depth image likelihood on a latent 3D voxel occupancy grid (see supplement for details). For this model, we can compute $p'(\mathbf{O}|\mathbf{M},\mathbf{x}_{1:T},\mathbf{I}_{1:T}) = \prod_{ij\ell}p'(O_{ij\ell}|\mathbf{M},\mathbf{x}_{1:T},\mathbf{I}_{1:T})$ exactly using ray marching to decide if a voxel cell is occupied, unoccupied, or unobserved (due to being occluded by another occupied cell), and the resulting distribution on $\mathbf{O}$ can be compactly represented as an array of probabilities $(\in [0,1]^{h\times w\times l})$ . However, in real-world scenarios the map $\mathbf{M}$ and the camera poses $\mathbf{x}_{1:T}$ are not known with certainty. To handle this, our algorithm takes as input uncertain beliefs about $\mathbf{M}$ and $\mathbf{x}_{1:T}$ ( $q_{\mathrm{SLAM}}(\mathbf{M},\mathbf{x}_{1:T}) \approx p'(\mathbf{M},\mathbf{x}_{1:T}|\mathbf{I}_{1:T})$ ) that are produced by a separate probabilistic SLAM (simultaneous localization and mapping) module, and take the form of a weighted collection of $K$ particles $(\mathbf{M}^{(k)},\mathbf{x}_{1:T}^{(k)})$ : $q_{\mathrm{SLAM}}(\mathbf{M},\mathbf{x}_{1:T}) = \sum_{k=1}^{K}w_k\delta_{\mathbf{M}^{(k)}}(\mathbf{M})\delta_{\mathbf{x}_{1:T}^{(k)}}(\mathbf{x}_{1:T})$ . Various approaches to probabilistic SLAM can be used; we implemented it using sequential Monte Carlo (SMC) in Gen (more detail in supplement). From the beliefs $q_{\mathrm{SLAM}}(\mathbf{M},\mathbf{x}_{1:T})$ produced by SLAM, we approximate the object shape posterior via:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\hat {p} ^ {\prime} (\mathbf {O} | \mathbf {I} _ {1: T}) := \iint p ^ {\prime} (\mathbf {O} | \mathbf {M}, \mathbf {x} _ {1: T}, \mathbf {I} _ {1: T}) q _ {\mathrm {S L A M}} (\mathbf {M}, \mathbf {x} _ {1: T}) d \mathbf {M} d \mathbf {x} _ {1: T} = \sum_ {k = 1} ^ {K} w _ {k} p ^ {\prime} (\mathbf {O} | \mathbf {M} ^ {(k)}, \mathbf {x} _ {1: T} ^ {(k)}, \mathbf {I} _ {1: T})
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
Note that while $p^{\prime}(\mathbf{O}|\mathbf{M}^{(k)},\mathbf{x}_{1:T}^{(k)},\mathbf{I}_{1:T})$ for each $k$ can be compactly represented, the mixture distribution $\hat{p}^{\prime}(\mathbf{O}|\mathbf{I}_{1:T})$ lacks the conditional independencies that make this possible. To produce a more compact representation of beliefs about the object's shape, we fit a variational approximation $q_{\varphi}(\mathbf{O})$ that assumes independence among voxels $(q_{\varphi}(\mathbf{O}) := \prod_{i\in [h]}\prod_{j\in [w]}\prod_{\ell \in [l]}\varphi_{ij\ell}^{O_{ij\ell}}\cdot (1 - \varphi_{ij\ell})^{(1 - O_{ij\ell})})$ to $\hat{p}^{\prime}(\mathbf{O}|\mathbf{I}_{1:T})$ using $\varphi^{*} := \arg \min_{\varphi}\mathrm{KL}(\hat{p}^{\prime}(\mathbf{O}|\mathbf{I}_{1:T})||q_{\varphi}(\mathbf{O}))$ (see supplement for details). This choice of variational family is sufficient for representing uncertainty about the occupancy of voxels in the interior of an object shape. Note that our shape-learning experiments did not result in significant uncertainty about the exterior shape of objects<sup>3</sup>, and in the presence of such uncertainty, a less severe variational approximation may be needed for robust inference of scene graphs from depth images. Fig. 3 shows input depth images $(\mathbf{I}_{1:T})$ and resulting shape prior learned from $T = 5$ observations. After learning these shape distributions $q_{\varphi}(\mathbf{O}) \approx \hat{p}^{\prime}(\mathbf{O}|\mathbf{I}_{1:T})$ for each distinct object type, we use them as the shape priors $p(\mathbf{O}_i)$ within the generative model of Section 3. The supplement includes the results of a quantitative evaluation of the accuracy of shape learning.
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Figure 4: A reversible transition between scene graph structure $G$ and scene graph structure $G'$ .
|
| 124 |
+
|
| 125 |
+
# 5 Building blocks for approximate inference algorithms
|
| 126 |
+
|
| 127 |
+
This section first describes a set of building blocks for approximate inference algorithms that are based on the generative model of Section 3. We then describe how to combine these components into a scene graph inference algorithm that we evaluate in Section 6.
|
| 128 |
+
|
| 129 |
+
Trained object detectors It is possible to infer the types of objects in the scene (c) via Bayesian inference in the generative model (see supplement for an example that infers c as well as $N$ in a scene with a fully occluded object, via Bayesian inference). However, for scenes where objects are not fully or nearly-fully occluded, and where object types have dissimilar appearance, it is possible to train fast object detectors that produce an accurate point estimate of c given an RGB image.
|
| 130 |
+
|
| 131 |
+
Trained pose estimators In scenes without full or nearly-full occlusion, it is also possible to employ trained pose estimation methods [37] to give independent estimates of the 6DoF pose of each object instance in the image. However, inferring pose is more challenging than inferring c, and occlusion, self-occlusion, and symmetries can introduce significant pose uncertainty. Therefore, we only use trained pose estimators (e.g. [37]) to (optionally) initialize the poses of objects before Bayesian inference in the generative model, using the building blocks below.
|
| 132 |
+
|
| 133 |
+
Data-driven Metropolis-Hastings kernels on object pose We employ Metropolis-Hastings (MH) kernels, parametrized by choice of object $i\in \{1,\dots ,N\}$ , that take as input a scene graph $\mathcal{G}$ , propose new values $(\theta_{v_i}^{\prime})$ for the scene graph parameters of object $i$ , construct a new proposed scene graph $\mathcal{G}'$ , and then accept or reject the move from $\mathcal{G}$ to $\mathcal{G}'$ based on the MH rule. For objects $v$ whose parent is the world frame $((r,v)\in E)$ , we use a data-driven proposal distribution centered on an estimate $(\hat{\mathbf{x}}_v)$ of the 6DoF object pose obtained with ICP (a spherical normal distribution concentrated around the estimated position, and a vMF distribution concentrated around the estimated orientation). We also use kernels with random-walk proposals centered on the current pose. For objects whose parent is another object $((u,v)\in E$ for $u\neq r$ ), we use a random-walk proposal on parameters $(a_{v_i},b_{v_i},z_{v_i})$ . Note that when the pose of an object is changed in the proposed graph $\mathcal{G}'$ , the pose of any descendant objects is also changed. Each of these MH kernels is invariant with respect to $p(G,\boldsymbol {\theta}|\mathbf{c},\mathbf{Y})$ .
|
| 134 |
+
|
| 135 |
+
Involutive MCMC kernel on scene graph structure To infer the scene graph structure $G$ , we employ a family of involutive MCMC kernels [10] that propose a new graph structure $G'$ while keeping the poses $(\mathbf{x}_v)$ of all objects fixed. The kernel takes a graph structure $G$ and proposes a new graph structure $G'$ (Figure 4) by: (i) randomly sampling a node $v \in V \setminus \{r\}$ to 'sever' from the tree, (ii) randomly choosing a node $u \in V \setminus \{v\}$ that is not a descendant of the severed node on which to graft $v$ , (iii) forming a new directed graph $G'$ over vertices $V$ by grafting $v$ to $u$ ; by Lemma O.7.1 the resulting graph $G'$ is also a tree. Note that there is an involution $g$ on the set of all pairs $(G, v, u)$ satisfying the above constraints. That is, if $(G', v', u') = g(G, v, u)$ then $(G, v, u) = g(G', v', u')$ . (This implies, for example, that $u'$ is the parent of $v$ in $G$ .) Note that this set of transitions is capable of changing the parent vertex of an object to a different parent object, changing the parent vertex of an object from the root (world frame) to any other object, or changing the parent vertex from another object to the root, depending on the random choice of $v$ and $u$ . We compute new values for parameters $(\theta_v)$ for the severed node $v$ and possibly other vertices such that the poses of all vertices are unchanged. See supplement for the full kernel and a proof that it is invariant w.r.t. $p(G, \theta | \mathbf{c}, \mathbf{Y})$ .
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
Figure 5: Qualitative comparison between DenseFusion's pose estimates (top row) and estimates from 3DP3-based algorithm that is initialized with DenseFusion (bottom row) for YCB-Video frames where DenseFusion gives incorrect results. 3DP3's depth-rendering likelihood and scene graph prior can correct large errors made by DenseFusion.
|
| 139 |
+
|
| 140 |
+
Approximately Rao-Blackwellizing object shape via pseudo-marginal MCMC The acceptance probability expressions for our involutive MCMC and MH kernels targeting $p(G, \theta | \mathbf{c}, \mathbf{Y})$ include factors of the form $p(\mathbf{Y} | \mathbf{c}, G, \theta)$ , which is an intractable sum over the latent object models: $p(\mathbf{Y} | \mathbf{c}, G, \theta) = \sum_{\mathbf{O}^{(1:M)}} p(\mathbf{O}^{(1:M)}) p(\mathbf{Y} | \mathbf{O}^{(1:M)}, \mathbf{c}, G, \theta)$ . To overcome this challenge, we employ a pseudo-marginal MCMC approach [2] that uses unbiased estimates of $p(\mathbf{Y} | \mathbf{c}, G, \theta)$ obtained via likelihood weighting (that is, sampling several times from $p(\mathbf{O}^{(1:M)})$ and averaging the resulting $p(\mathbf{Y} | \mathbf{O}^{(1:M)}, \mathbf{c}, G, \theta)$ ). The resulting MCMC kernels are invariant with respect to an extended target distribution of which $p(G, \theta | \mathbf{c}, \mathbf{Y})$ is a marginal (see supplement for details). We implemented an optimization where we sampled 5 values for $\mathbf{O}^{(1:M)}$ and used these samples within every estimate of $p(\mathbf{Y} | \mathbf{c}, G, \theta)$ instead of sampling new values for each estimate. Because our learned shape priors did not have significant exterior shape uncertainty, this optimization did not negatively impact the results.
|
| 141 |
+
|
| 142 |
+
Scene graph inference and implementation The end-to-end scene graph inference algorithm has three stages. First, we obtain $\mathbf{c}$ from either an object detector or because it is given as part of the task (this is the case in our experiments; see Section 6 for details). Second, we obtain initial estimates $\hat{\mathbf{x}}_v$ of 6DoF object poses $\mathbf{x}_v$ for all object vertices $v$ via maximum-a-posteriori (MAP) inference in a restricted variant of the generative model with graph structure $G$ fixed to $G_{0}$ (so there are no edges between object vertices). This MAP inference stage uses the data-driven Metropolis-Hastings kernels on poses, and (optionally) trained pose estimators (see Section 6 for the details, which differ between experiments). Third, we use the estimated poses to initialize an MCMC algorithm targeting $p(G,\boldsymbol {\theta}|\mathbf{c},\mathbf{Y})$ with state $G\gets G_0$ and $\theta_v\gets \hat{\mathbf{x}}_v$ for each $v\in V\setminus \{r\}$ . The Markov chain is a cycle of the involutive MCMC kernel described above with a mixture of the Metropolis-Hastings kernels described above, uniformly mixed over objects. We wrote the probabilistic program of Figure 1 in Gen's built-in modeling language. We implemented the data-driven and involutive MCMC kernels, and pseudo-marginal likelihood, and integrated all components together, using Gen's programmable inference support. Our code is available at https://github.com/probcomp/ThreeDP3.
|
| 143 |
+
|
| 144 |
+
# 6 Experiments
|
| 145 |
+
|
| 146 |
+
We evaluate our scene graph inference algorithm on the YCB-Video [6] dataset consisting of real RGB-D images and YCB-Challenging, our own synthetic dataset of scenes containing novel viewpoints, occlusions, and contact structure. We use the evaluation protocol of the Benchmark for 6DoF Object Pose Estimation (BOP) Challenge [20], in which an RGB-D image and the number of objects in the scene and their types are given, and the task is to estimate the 6DoF pose of each object.
|
| 147 |
+
|
| 148 |
+
# 6.1 Pose estimation from real RGB-D images
|
| 149 |
+
|
| 150 |
+
YCB-Video is a standard robotics dataset for training and evaluating 3D perception systems [6]. We first learn shape priors (Section 4) from just 5 synthetic images for each object type. We use DenseFusion [37], a neural 6DoF pose estimator, for pose initialization in the MAP phase of our inference algorithm. To measure pose estimation accuracy, we use the average closest point distance (ADD-S [38, 37]) which estimates the average closest point distance between points on the object model placed at the predicted pose and points on the model placed at the ground-truth pose. Table 1
|
| 151 |
+
|
| 152 |
+
<table><tr><td></td><td></td><td colspan="3">0.5cm Threshold</td><td colspan="3">1.0cm Threshold</td><td colspan="3">2.0cm Threshold</td></tr><tr><td rowspan="2">Object Type</td><td rowspan="2"># of Scenes</td><td colspan="3">Accuracy</td><td colspan="3">Accuracy</td><td colspan="3">Accuracy</td></tr><tr><td>3DP3</td><td>3DP3*</td><td>DF</td><td>3DP3</td><td>3DP3*</td><td>DF</td><td>3DP3</td><td>3DP3*</td><td>DF</td></tr><tr><td>002/master_chef_can</td><td>1006</td><td>0.74</td><td>0.79</td><td>0.84</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>003_cracker_box</td><td>868</td><td>0.90</td><td>0.83</td><td>0.79</td><td>0.99</td><td>0.98</td><td>0.97</td><td>0.99</td><td>0.99</td><td>0.99</td></tr><tr><td>004_sugar_box</td><td>1182</td><td>1.00</td><td>0.99</td><td>0.98</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>005_tomato_soup_can</td><td>1440</td><td>0.95</td><td>0.93</td><td>0.93</td><td>0.97</td><td>0.97</td><td>0.97</td><td>0.97</td><td>0.97</td><td>0.97</td></tr><tr><td>006_mustard_bottle</td><td>357</td><td>0.99</td><td>0.98</td><td>0.94</td><td>0.99</td><td>0.99</td><td>0.98</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>007_tuna_fish_can</td><td>1148</td><td>0.81</td><td>0.80</td><td>0.91</td><td>1.00</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>008_pudding_box</td><td>214</td><td>1.00</td><td>0.97</td><td>0.70</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>009_gelatin_box</td><td>214</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>010_potted.meat_can</td><td>766</td><td>0.80</td><td>0.78</td><td>0.79</td><td>0.89</td><td>0.88</td><td>0.87</td><td>0.93</td><td>0.93</td><td>0.92</td></tr><tr><td>011_banana</td><td>379</td><td>0.98</td><td>0.96</td><td>0.82</td><td>1.00</td><td>1.00</td><td>0.97</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>019_pitcher_base</td><td>570</td><td>1.00</td><td>0.99</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>021_bleach_cleanser</td><td>1029</td><td>0.94</td><td>0.88</td><td>0.80</td><td>1.00</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>024_bowl</td><td>406</td><td>0.93</td><td>0.87</td><td>0.50</td><td>0.96</td><td>0.96</td><td>0.56</td><td>0.96</td><td>0.96</td><td>0.94</td></tr><tr><td>025_mug</td><td>636</td><td>0.89</td><td>0.89</td><td>0.92</td><td>0.98</td><td>0.98</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>035_power_d Drill</td><td>1057</td><td>0.98</td><td>0.96</td><td>0.88</td><td>0.99</td><td>0.99</td><td>0.98</td><td>0.99</td><td>0.99</td><td>0.99</td></tr><tr><td>036木质_block</td><td>242</td><td>0.36</td><td>0.33</td><td>0.07</td><td>0.96</td><td>0.93</td><td>0.88</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>037_scissors</td><td>181</td><td>0.75</td><td>0.69</td><td>0.20</td><td>0.87</td><td>0.84</td><td>0.70</td><td>0.99</td><td>0.99</td><td>0.98</td></tr><tr><td>040_large Marker</td><td>648</td><td>1.00</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>051_large_clamp</td><td>712</td><td>0.68</td><td>0.64</td><td>0.25</td><td>0.71</td><td>0.70</td><td>0.33</td><td>0.79</td><td>0.79</td><td>0.79</td></tr><tr><td>052extra_large_clamp</td><td>682</td><td>0.33</td><td>0.27</td><td>0.12</td><td>0.38</td><td>0.34</td><td>0.17</td><td>0.69</td><td>0.70</td><td>0.74</td></tr><tr><td>061 Foambrick</td><td>288</td><td>0.26</td><td>0.24</td><td>0.01</td><td>1.00</td><td>1.00</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 1: Accuracy results on the real YCB-Video test set, for accuracy thresholds $0.5\mathrm{cm}$ , $1.0\mathrm{cm}$ , and $2.0\mathrm{cm}$ , and per object type. 3DP3 is our full scene graph inference algorithm and 3DP3* is an ablation that does not infer contact relationships. '# of Scenes' = The number of test images in which that object appears, out of the total 2,949 images. 'DF' = DenseFusion [37], a deep learning baseline.
|
| 155 |
+
|
| 156 |
+
shows the quantitative results. For almost all objects, our algorithm (3DP3) is more accurate than an ablation $(3\mathrm{DP3}^{*})$ that fixes the structure so that there are no contact relationships, and the ablation is more accurate than DenseFusion. This suggests that both the rendering-based likelihood and inference of structure contribute to 3DP3's more accurate 6DoF pose estimation. Figure 5 shows examples of corrections that 3DP3 makes to DenseFusion's estimates.
|
| 157 |
+
|
| 158 |
+
# 6.2 Generalization to challenging scenes
|
| 159 |
+
|
| 160 |
+
Next, we evaluated our algorithm's performance on challenging scenes containing novel viewpoints, occlusions, and contact structure. Our synthetic YCB-Challenging dataset consists of 2000 RGB-D images containing objects from the YCB object set [6] in the following 4 categories of challenging scenes: (i) Single object: Single object in contact with table, (ii) Stacked: Stack of two objects on a table, (iii) Partial view: Single object not fully in field-of-view, (iv) Partially Occluded: One object partially occluded by another. For this experiment, the MAP stage of our algorithm uses an alternative initialization (see supplement) that does not use DenseFusion. We evaluate 3DP3 and the 3DP3* ablation alongside DenseFusion [37] and another state-of-the-art baseline, Robust6D [34]. For most scenes and objects, our approach significantly outperforms the baselines (Table 2). In Table 3, we assess 3DP3's robustness by inspecting the error distribution at the 1st, 2nd, and 3rd quartile for each scene type and object type. At Q3, 3DP3 consistently outperforms the baselines and we find that the drop in performance from Q1 and Q3 is less for 3DP3 than the baselines.
|
| 161 |
+
|
| 162 |
+
# 7 Discussion
|
| 163 |
+
|
| 164 |
+
This paper presented 3DP3, a framework for generative modeling, learning, and inference with structured scenes and image data; and showed that it improves the accuracy of 6DoF object pose estimation in cluttered scenes. We used probabilistic programs to conceive of our generative model and represent it concisely; and we used a probabilistic programming system [11] with programmable inference [28] to manage the complexity of our inference and learning algorithm implementations. The current work has several limitations: Our algorithm runs $\approx 20\mathrm{x}$ slower than the DenseFusion baseline. Our shape-learning algorithm requires that the training scenes contain only the single novel object, whose identity is known across training frames. Adding the ability to segment and learn models of novel objects in cluttered scenes and automatically train object detectors and pose estimators for these objects from short RGB-D video sequences, is an ongoing direction of work. The model also does not yet incorporate some important prior knowledge about scenes—interpenetration of objects is permitted, and constraints on physical stability are not incorporated. More experiments are also needed to understand the implications of a Bayesian treatment of 3D scene perception.
|
| 165 |
+
|
| 166 |
+
<table><tr><td colspan="3"></td><td colspan="4">0.5cm Threshold</td><td colspan="4">1.0cm Threshold</td><td colspan="4">2.0cm Threshold</td></tr><tr><td rowspan="2">Scene Type</td><td rowspan="2">Object Type</td><td rowspan="2"># of Scenes</td><td colspan="4">Accuracy</td><td colspan="4">Accuracy</td><td colspan="4">Accuracy</td></tr><tr><td>3DP3</td><td>3DP3*</td><td>DF</td><td>R6D</td><td>3DP3</td><td>3DP3*</td><td>DF</td><td>R6D</td><td>3DP3</td><td>3DP3*</td><td>DF</td><td>R6D</td></tr><tr><td rowspan="5">Single Object</td><td>002/master_chef_can</td><td>94</td><td>0.99</td><td>0.95</td><td>0.45</td><td>0.03</td><td>1.00</td><td>1.00</td><td>0.69</td><td>0.46</td><td>1.00</td><td>1.00</td><td>1.00</td><td>0.98</td></tr><tr><td>003_cracker_box</td><td>92</td><td>0.55</td><td>0.39</td><td>0.16</td><td>0.00</td><td>0.98</td><td>0.98</td><td>0.39</td><td>0.02</td><td>1.00</td><td>1.00</td><td>0.78</td><td>0.42</td></tr><tr><td>004_sugar_box</td><td>109</td><td>0.90</td><td>0.87</td><td>0.17</td><td>0.00</td><td>1.00</td><td>1.00</td><td>0.72</td><td>0.32</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>005_tomato_soup_can</td><td>108</td><td>0.88</td><td>0.81</td><td>0.18</td><td>0.00</td><td>1.00</td><td>1.00</td><td>0.36</td><td>0.07</td><td>1.00</td><td>1.00</td><td>0.86</td><td>0.74</td></tr><tr><td>006_mustard_bottle</td><td>97</td><td>0.86</td><td>0.79</td><td>0.48</td><td>0.01</td><td>1.00</td><td>1.00</td><td>0.57</td><td>0.36</td><td>1.00</td><td>1.00</td><td>0.81</td><td>0.89</td></tr><tr><td rowspan="5">Stacked</td><td>002/master_chef_can</td><td>190</td><td>0.86</td><td>0.79</td><td>0.28</td><td>0.02</td><td>0.94</td><td>0.93</td><td>0.56</td><td>0.39</td><td>0.95</td><td>0.95</td><td>1.00</td><td>0.98</td></tr><tr><td>003_cracker_box</td><td>204</td><td>0.41</td><td>0.24</td><td>0.16</td><td>0.00</td><td>0.85</td><td>0.81</td><td>0.41</td><td>0.04</td><td>0.97</td><td>0.96</td><td>0.76</td><td>0.40</td></tr><tr><td>004_sugar_box</td><td>214</td><td>0.63</td><td>0.61</td><td>0.14</td><td>0.01</td><td>0.92</td><td>0.91</td><td>0.61</td><td>0.33</td><td>0.94</td><td>0.94</td><td>0.99</td><td>0.99</td></tr><tr><td>005_tomato_soup_can</td><td>193</td><td>0.67</td><td>0.52</td><td>0.13</td><td>0.00</td><td>0.89</td><td>0.86</td><td>0.28</td><td>0.06</td><td>0.90</td><td>0.88</td><td>0.75</td><td>0.66</td></tr><tr><td>006_mustard_bottle</td><td>199</td><td>0.73</td><td>0.60</td><td>0.44</td><td>0.03</td><td>0.94</td><td>0.90</td><td>0.54</td><td>0.30</td><td>0.94</td><td>0.94</td><td>0.85</td><td>0.88</td></tr><tr><td rowspan="5">Partial View</td><td>002/master_chef_can</td><td>106</td><td>0.81</td><td>0.80</td><td>0.11</td><td>0.00</td><td>1.00</td><td>1.00</td><td>0.30</td><td>0.04</td><td>1.00</td><td>1.00</td><td>0.67</td><td>0.42</td></tr><tr><td>003_cracker_box</td><td>99</td><td>0.18</td><td>0.16</td><td>0.00</td><td>0.00</td><td>0.60</td><td>0.57</td><td>0.01</td><td>0.00</td><td>0.82</td><td>0.80</td><td>0.14</td><td>0.04</td></tr><tr><td>004_sugar_box</td><td>111</td><td>0.63</td><td>0.59</td><td>0.00</td><td>0.00</td><td>0.89</td><td>0.89</td><td>0.08</td><td>0.04</td><td>1.00</td><td>1.00</td><td>0.73</td><td>0.68</td></tr><tr><td>005_tomato_soup_can</td><td>87</td><td>0.34</td><td>0.33</td><td>0.00</td><td>0.00</td><td>0.72</td><td>0.71</td><td>0.13</td><td>0.00</td><td>0.83</td><td>0.82</td><td>0.40</td><td>0.13</td></tr><tr><td>006_mustard_bottle</td><td>97</td><td>0.55</td><td>0.62</td><td>0.08</td><td>0.00</td><td>0.87</td><td>0.86</td><td>0.23</td><td>0.00</td><td>0.96</td><td>0.95</td><td>0.37</td><td>0.26</td></tr><tr><td rowspan="5">Partially Occluded</td><td>002/master_chef_can</td><td>130</td><td>0.71</td><td>0.52</td><td>0.04</td><td>0.00</td><td>0.93</td><td>0.90</td><td>0.13</td><td>0.02</td><td>0.99</td><td>0.99</td><td>0.22</td><td>0.12</td></tr><tr><td>003_cracker_box</td><td>500</td><td>0.37</td><td>0.35</td><td>0.59</td><td>0.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>0.02</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>004_sugar_box</td><td>117</td><td>0.02</td><td>0.01</td><td>0.06</td><td>0.00</td><td>0.30</td><td>0.27</td><td>0.40</td><td>0.12</td><td>0.94</td><td>0.93</td><td>0.84</td><td>0.77</td></tr><tr><td>005_tomato_soup_can</td><td>124</td><td>0.04</td><td>0.00</td><td>0.01</td><td>0.00</td><td>0.31</td><td>0.23</td><td>0.14</td><td>0.06</td><td>0.81</td><td>0.75</td><td>0.50</td><td>0.49</td></tr><tr><td>006_mustard_bottle</td><td>129</td><td>0.70</td><td>0.43</td><td>0.55</td><td>0.03</td><td>0.84</td><td>0.74</td><td>0.95</td><td>0.29</td><td>0.94</td><td>0.90</td><td>1.00</td><td>0.99</td></tr></table>
|
| 167 |
+
|
| 168 |
+
Table 2: Accuracy results on our synthetic YCB-Challenging data set. We report the number of scenes over which this accuracy is computed for each object and scene type. Accuracy is shown for 3DP3 and 3DP3*, which are our full method and an ablation that does not model contact relationships, respectively, and two deep learning baselines (DenseFusion (DF) [37] and Robust6D (R6D) [34]).
|
| 169 |
+
|
| 170 |
+
<table><tr><td colspan="2"></td><td colspan="3">Tomato Soup</td><td colspan="3">Cracker Box</td><td colspan="3">Potted Meat</td><td colspan="3">Sugar Box</td><td colspan="3">Master Chef</td></tr><tr><td rowspan="2">Scene Type</td><td rowspan="2">Method</td><td colspan="3">ADD-S</td><td colspan="3">ADD-S</td><td colspan="3">ADD-S</td><td colspan="3">ADD-S</td><td colspan="3">ADD-S</td></tr><tr><td>Q1</td><td>Q2</td><td>Q3</td><td>Q1</td><td>Q2</td><td>Q3</td><td>Q1</td><td>Q2</td><td>Q3</td><td>Q1</td><td>Q2</td><td>Q3</td><td>Q1</td><td>Q2</td><td>Q3</td></tr><tr><td rowspan="4">Single object</td><td>3DP3 (ours)</td><td>0.35</td><td>0.39</td><td>0.41</td><td>0.43</td><td>0.49</td><td>0.54</td><td>0.36</td><td>0.40</td><td>0.45</td><td>0.38</td><td>0.43</td><td>0.48</td><td>0.37</td><td>0.43</td><td>0.48</td></tr><tr><td>3DP3* (ours)</td><td>0.35</td><td>0.40</td><td>0.43</td><td>0.47</td><td>0.52</td><td>0.62</td><td>0.36</td><td>0.39</td><td>0.44</td><td>0.40</td><td>0.45</td><td>0.49</td><td>0.35</td><td>0.41</td><td>0.49</td></tr><tr><td>DenseFusion</td><td>0.35</td><td>0.55</td><td>1.11</td><td>0.65</td><td>1.35</td><td>1.72</td><td>0.58</td><td>0.88</td><td>1.02</td><td>0.67</td><td>1.25</td><td>1.72</td><td>0.32</td><td>0.61</td><td>1.85</td></tr><tr><td>Robust6D</td><td>0.84</td><td>1.05</td><td>1.29</td><td>1.65</td><td>2.22</td><td>2.90</td><td>0.97</td><td>1.09</td><td>1.21</td><td>1.25</td><td>1.61</td><td>2.02</td><td>0.83</td><td>1.48</td><td>1.89</td></tr><tr><td rowspan="4">Stacked</td><td>3DP3 (ours)</td><td>0.37</td><td>0.42</td><td>0.46</td><td>0.46</td><td>0.52</td><td>0.60</td><td>0.39</td><td>0.45</td><td>0.60</td><td>0.40</td><td>0.46</td><td>0.52</td><td>0.40</td><td>0.45</td><td>0.51</td></tr><tr><td>3DP3* (ours)</td><td>0.38</td><td>0.42</td><td>0.48</td><td>0.50</td><td>0.60</td><td>0.79</td><td>0.40</td><td>0.46</td><td>0.64</td><td>0.43</td><td>0.49</td><td>0.61</td><td>0.41</td><td>0.47</td><td>0.56</td></tr><tr><td>DenseFusion</td><td>0.49</td><td>0.87</td><td>1.21</td><td>0.66</td><td>1.33</td><td>1.97</td><td>0.63</td><td>0.92</td><td>1.16</td><td>0.93</td><td>1.42</td><td>1.96</td><td>0.37</td><td>0.66</td><td>1.83</td></tr><tr><td>Robust6D</td><td>0.84</td><td>1.15</td><td>1.36</td><td>1.68</td><td>2.21</td><td>2.86</td><td>0.92</td><td>1.11</td><td>1.26</td><td>1.37</td><td>1.72</td><td>2.21</td><td>0.94</td><td>1.38</td><td>1.82</td></tr><tr><td rowspan="4">Partial view</td><td>3DP3 (ours)</td><td>0.34</td><td>0.40</td><td>0.47</td><td>0.54</td><td>0.76</td><td>1.56</td><td>0.36</td><td>0.45</td><td>0.59</td><td>0.47</td><td>0.55</td><td>1.80</td><td>0.36</td><td>0.47</td><td>0.57</td></tr><tr><td>3DP3* (ours)</td><td>0.33</td><td>0.40</td><td>0.47</td><td>0.56</td><td>0.90</td><td>1.54</td><td>0.37</td><td>0.45</td><td>0.63</td><td>0.46</td><td>0.59</td><td>1.81</td><td>0.36</td><td>0.46</td><td>0.58</td></tr><tr><td>DenseFusion</td><td>0.79</td><td>1.52</td><td>2.10</td><td>2.33</td><td>2.93</td><td>3.81</td><td>1.26</td><td>1.65</td><td>2.07</td><td>1.52</td><td>2.14</td><td>2.78</td><td>1.05</td><td>2.22</td><td>2.71</td></tr><tr><td>Robust6D</td><td>1.43</td><td>2.25</td><td>2.93</td><td>3.40</td><td>4.03</td><td>4.77</td><td>1.51</td><td>1.83</td><td>2.13</td><td>2.24</td><td>2.99</td><td>4.30</td><td>1.97</td><td>2.50</td><td>3.27</td></tr><tr><td rowspan="4">Partially Occluded</td><td>3DP3 (ours)</td><td>0.36</td><td>0.42</td><td>0.52</td><td>0.48</td><td>0.52</td><td>0.55</td><td>0.91</td><td>1.25</td><td>1.57</td><td>0.89</td><td>1.69</td><td>1.97</td><td>0.36</td><td>0.42</td><td>0.55</td></tr><tr><td>3DP3* (ours)</td><td>0.39</td><td>0.49</td><td>0.64</td><td>0.48</td><td>0.53</td><td>0.58</td><td>0.97</td><td>1.29</td><td>1.66</td><td>1.03</td><td>1.72</td><td>1.99</td><td>0.43</td><td>0.58</td><td>1.01</td></tr><tr><td>DenseFusion</td><td>-</td><td>-</td><td>-</td><td>0.41</td><td>0.48</td><td>0.58</td><td>0.81</td><td>1.11</td><td>1.53</td><td>1.47</td><td>2.01</td><td>3.18</td><td>0.38</td><td>0.48</td><td>0.64</td></tr><tr><td>Robust6D</td><td>-</td><td>-</td><td>-</td><td>1.30</td><td>1.48</td><td>1.61</td><td>1.15</td><td>1.46</td><td>1.87</td><td>1.48</td><td>2.02</td><td>3.24</td><td>0.94</td><td>1.10</td><td>1.33</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 3: Robustness of inference. We quantify the ADD-S error at 1st, 2nd, and 3rd quartiles for each scene type and object type in the synthetic dataset of hard scenes. A value of - indicates the method made no prediction for the object's pose. 3DP3* denotes an ablated version of our method without inference of the scene graph structure and thus object-object contact.
|
| 173 |
+
|
| 174 |
+
# 8 Acknowledgements
|
| 175 |
+
|
| 176 |
+
The authors acknowledge Javier Felip Leon (Intel) for helpful discussions and a prototype depth renderer, and Omesh Tickoo (Intel) for helpful discussions. This work was funded in part by the DARPA Machine Common Sense program (Award ID: 030523-00001); by the Singapore DSTA / MIT SCC collaboration; by Intel's Probabilistic Computing Center; and by philanthropic gifts from the Aphorism Foundation and the Siegel Family Foundation. We thank Alex Lew, Tan Zhi-Xuan, Feras Saad, Cameron Freer, McCoy Becker, Sam Witty, and George Matheos for helpful feedback.
|
| 177 |
+
|
| 178 |
+
# References
|
| 179 |
+
|
| 180 |
+
[1] Sameer Agarwal, Noah Snavely, Steven M Seitz, and Richard Szeliski. Bundle adjustment in the large. In Proceedings of the European Conference on Computer Vision (ECCV), pages 29-42. Springer, 2010.
|
| 181 |
+
[2] Christophe Andrieu, Gareth O Roberts, et al. The pseudo-marginal approach for efficient monte carlo computations. The Annals of Statistics, 37(2):697-725, 2009.
|
| 182 |
+
[3] Iro Armeni, Zhi-Yang He, JunYoung Gwak, Amir R Zamir, Martin Fischer, Jitendra Malik, and Silvio Savarese. 3d scene graph: A structure for unified semantics, 3d space, and camera. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5664-5673, 2019.
|
| 183 |
+
[4] Sid Yingze Bao and Silvio Savarese. Semantic structure from motion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2025-2032. IEEE, 2011.
|
| 184 |
+
[5] Paul J Besl and Neil D McKay. Method for registration of 3d shapes. In *Sensor Fusion IV: Control Paradigms and Data Structures*, volume 1611, pages 586–606. International Society for Optics and Photonics, 1992.
|
| 185 |
+
[6] Berk Calli, Aaron Walsman, Arjun Singh, Siddhartha Srinivasa, Pieter Abbeel, and Aaron M. Dollar. Benchmarking in manipulation research: Using the yale-cmu-berkeley object and model set. IEEE Robotics Automation Magazine, 22(3):36-52, 2015.
|
| 186 |
+
[7] Xiaotong Chen, Rui Chen, Zhiqiang Sui, Zhefan Ye, Yanqi Liu, R Iris Bahar, and Odest Chadwicke Jenkins. Grip: Generative robust inference and perception for semantic robot manipulation in adversarial environments. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 3988-3995. IEEE, 2019.
|
| 187 |
+
[8] Yixin Chen, Siyuan Huang, Tao Yuan, Siyuan Qi, Yixin Zhu, and Song-Chun Zhu. Holistic++ scene understanding: Single-view 3d holistic scene parsing and human pose estimation with human-object interaction and physical commonsense. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8648-8657, 2019.
|
| 188 |
+
[9] James H. Clark. Hierarchical geometric models for visible surface algorithms. Commun. ACM, 19(10):547-554, October 1976.
|
| 189 |
+
[10] Marco Cusumano-Towner, Alexander K Lew, and Vikash K Mansinghka. Automating involutive mcmc using probabilistic and differentiable programming. arXiv preprint arXiv:2007.09871, 2020.
|
| 190 |
+
[11] Marco F Cusumano-Towner, Feras A Saad, Alexander K Lew, and Vikash K Mansinghka. Gen: a general-purpose probabilistic programming system with programmable inference. In Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation, pages 221-236, 2019.
|
| 191 |
+
[12] Fei Deng, Zhuo Zhi, and Sungjin Ahn. Generative hierarchical models for parts, objects, and scenes. arXiv preprint arXiv:1910.09119, 2019.
|
| 192 |
+
[13] Fei Deng, Zhuo Zhi, Donghun Lee, and Sungjin Ahn. Generative scene graph networks. In International Conference on Learning Representations (ICLR), 2021.
|
| 193 |
+
[14] Xinke Deng, Arsalan Mousavian, Yu Xiang, Fei Xia, Timothy Bretl, and Dieter Fox. Poserbpf: A rao-blackwellized particle filter for 6-d object pose tracking. IEEE Transactions on Robotics, 2021.
|
| 194 |
+
[15] Karthik Desingh, Shiyang Lu, Anthony Opipari, and Odest Chadwicke Jenkins. Efficient nonparametric belief propagation for pose estimation and manipulation of articulated objects. Science Robotics, 4(30), 2019.
|
| 195 |
+
[16] Yilun Du, Zhijian Liu, Hector Basevi, Ales Leonardis, Bill Freeman, Josh Tenenbaum, and Jiajun Wu. Learning to exploit stability for 3d scene parsing. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018.
|
| 196 |
+
|
| 197 |
+
[17] SM Eslami, Nicolas Heess, Theophane Weber, Yuval Tassa, David Szepesvari, Geoffrey E Hinton, et al. Attend, infer, repeat: Fast scene understanding with generative models. Advances in Neural Information Processing Systems, 29:3225-3233, 2016.
|
| 198 |
+
[18] Dileep George, Wolfgang Lehrach, Ken Kansky, Miguel Lázaro-Gredilla, Christopher Laan, Bhaskara Marthi, Xinghua Lou, Zhaoshi Meng, Yi Liu, Huayan Wang, Alex Lavin, and D. Scott Phoenix. A generative vision model that trains with high data efficiency and breaks text-based captchas. Science, 358(6368):eaag2612, 2017.
|
| 199 |
+
[19] Jared Glover, Gary Bradski, and Radu Bogdan Rusu. Monte carlo pose estimation with quaternion kernels and the bingham distribution. In Robotics: science and systems, volume 7, page 97, 2012.
|
| 200 |
+
[20] Tomáš Hodař, Frank Michel, Eric Brachmann, Wadim Kehl, Anders Glent Buch, Dirk Kraft, Bertram Drost, Joel Vidal, Stephan Ihrke, Xenophon Zabulis, Caner Sahin, Fabian Manhardt, Federico Tombari, Tae-Kyun Kim, Jiří Matas, and Carsten Rother. BOP: Benchmark for 6D object pose estimation. Proceedings of the European Conference on Computer Vision (ECCV), 2018.
|
| 201 |
+
[21] Siyuan Huang, Siyuan Qi, Yixin Zhu, Yinxue Xiao, Yuanlu Xu, and Song-Chun Zhu. Holistic 3d scene parsing and reconstruction from a single rgb image. In Proceedings of the European Conference on Computer Vision (ECCV), pages 187-203, 2018.
|
| 202 |
+
[22] D Knill D Kersten and A Yuille. Introduction: A bayesian formulation of visual perception. Perception as Bayesian inference, pages 1-21, 1996.
|
| 203 |
+
[23] Daniel Kersten, Pascal Mamassian, and Alan Yuille. Object perception as bayesian inference. Annu. Rev. Psychol., 55:271-304, 2004.
|
| 204 |
+
[24] Nilesh Kulkarni, Ishan Misra, Shubham Tulsiani, and Abhinav Gupta. 3d-relnet: Joint object and relational network for 3d prediction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2212-2221, 2019.
|
| 205 |
+
[25] Tejas D Kulkarni, Pushmeet Kohli, Joshua B Tenenbaum, and Vikash Mansinghka. Picture: A probabilistic programming language for scene perception. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4390-4399, 2015.
|
| 206 |
+
[26] Tai Sing Lee and David Mumford. Hierarchical bayesian inference in the visual cortex. JOSA A, 20(7):1434-1448, 2003.
|
| 207 |
+
[27] Vikash K Mansinghka, Tejas D Kulkarni, Yura N Perov, and Josh Tenenbaum. Approximate bayesian image interpretation using generative probabilistic graphics programs. Advances in Neural Information Processing Systems, 26:1520-1528, 2013.
|
| 208 |
+
[28] Vikash K Mansinghka, Ulrich Schaechtle, Shivam Handa, Alexey Radul, Yutian Chen, and Martin Rinard. Probabilistic programming with programmable inference. In Proceedings of the 39th ACM SIGPLAN Conference on Programming Language Design and Implementation, pages 603-616, 2018.
|
| 209 |
+
[29] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), pages 405-421. Springer, 2020.
|
| 210 |
+
[30] Pol Moreno, Christopher KI Williams, Charlie Nash, and Pushmeet Kohli. Overcoming occlusion with inverse graphics. In Proceedings of the European Conference on Computer Vision (ECCV), pages 170-185. Springer, 2016.
|
| 211 |
+
[31] Lukasz Romaszko, Christopher KI Williams, Pol Moreno, and Pushmeet Kohli. Vision-as-inverse-graphics: Obtaining a rich 3d explanation of a scene from a single image. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pages 851–859, 2017.
|
| 212 |
+
|
| 213 |
+
[32] Antoni Rosinol, Arjun Gupta, Marcus Abate, Jingnan Shi, and Luca Carlone. 3d dynamic scene graphs: Actionable spatial perception with places, objects, and humans. arXiv preprint arXiv:2002.06289, 2020.
|
| 214 |
+
[33] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016.
|
| 215 |
+
[34] Meng Tian, Liang Pan, Marcelo H Ang Jr, and Gim Hee Lee. Robust 6d object pose estimation by learning rgb-d features. In International Conference on Robotics and Automation (ICRA), 2020.
|
| 216 |
+
[35] Jonathan Tremblay, Thang To, Balakumar Sundaralingam, Yu Xiang, Dieter Fox, and Stan Birchfield. Deep object pose estimation for semantic robotic grasping of household objects. In Conference on Robot Learning (CoRL), 2018.
|
| 217 |
+
[36] Zhuowen Tu and Song-Chun Zhu. Image segmentation by data-driven markov chain monte carlo. IEEE Transactions on pattern analysis and machine intelligence, 24(5):657-673, 2002.
|
| 218 |
+
[37] Chen Wang, Danfei Xu, Yuke Zhu, Roberto Martin-Martin, Cewu Lu, Li Fei-Fei, and Silvio Savarese. Densefusion: 6d object pose estimation by iterative dense fusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3343–3352, 2019.
|
| 219 |
+
[38] Yu Xiang, Tanner Schmidt, Venkatraman Narayanan, and Dieter Fox. Poseconn: A convolutional neural network for 6d object pose estimation in cluttered scenes. 2018.
|
| 220 |
+
[39] Alan Yuille and Daniel Kersten. Vision as bayesian inference: Analysis by synthesis? Trends in cognitive sciences, 10(7):301-308, 2006.
|
| 221 |
+
[40] Song Chun Zhu and Alan Yuille. Region competition: Unifying snakes, region growing, and bayes/mdl for multiband image segmentation. IEEE transactions on pattern analysis and machine intelligence, 18(9):884-900, 1996.
|
| 222 |
+
[41] Ben Zinberg, Marco Cusumano-Towner, and Vikash K Mansinghka. Structured differentiable models of 3d scenes via generative scene graphs. In Workshop on Perception as Generative Reasoning, NeurIPS, Submitted September 2019 and presented December 2019.
|
3dp33dsceneperceptionviaprobabilisticprogramming/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26f8e49b7f7f74e71cac7ca3baad27897ed63cdebc651b003f94162c7dc9e443
|
| 3 |
+
size 674625
|
3dp33dsceneperceptionviaprobabilisticprogramming/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95a53fe6256e8ff201fa0d9526d675733c9d53bcab552c45a2f284a8607075a9
|
| 3 |
+
size 483850
|
3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2b3aa941bdb9ea7f532c551c6cd0b6005fed891095e03daecb45efd290c56b2
|
| 3 |
+
size 65420
|
3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67d664b8fc37510862d5fcff15facc430bfb4ef895471e8979ca571f0ed5dfc3
|
| 3 |
+
size 81567
|
3dposetransferwithcorrespondencelearningandmeshrefinement/b0b60720-8d5e-49a5-8f88-e759ee2519c0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70f0e5b8e95f4a701fd5ed5a64f280d07cdd076c9330ae58b7874510bceeb0de
|
| 3 |
+
size 1845066
|
3dposetransferwithcorrespondencelearningandmeshrefinement/full.md
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Pose Transfer with Correspondence Learning and Mesh Refinement
|
| 2 |
+
|
| 3 |
+
Chaoyue Song $^{1}$ , Jiacheng Wei $^{2}$ , Ruibo Li $^{1,3}$ , Fayao Liu $^{4}$ and Guosheng Lin $^{1,3*}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ S-Lab, Nanyang Technological University, Singapore
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Computer Science and Engineering, Nanyang Technological University, Singapore
|
| 10 |
+
|
| 11 |
+
$^{4}$ Institute for Inforcomm Research, A*STAR, Singapore
|
| 12 |
+
|
| 13 |
+
{chaoyue.song, gslin}@ntu.edu.sg
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
3D pose transfer is one of the most challenging 3D generation tasks. It aims to transfer the pose of a source mesh to a target mesh and keep the identity (e.g., body shape) of the target mesh. Some previous works require key point annotations to build reliable correspondence between the source and target meshes, while other methods do not consider any shape correspondence between sources and targets, which leads to limited generation quality. In this work, we propose a correspondence-refinement network to achieve the 3D pose transfer for both human and animal meshes. The correspondence between source and target meshes is first established by solving an optimal transport problem. Then, we warp the source mesh according to the dense correspondence and obtain a coarse warped mesh. The warped mesh will be better refined with our proposed Elastic Instance Normalization, which is a conditional normalization layer and can help to generate high-quality meshes. Extensive experimental results show that the proposed architecture can effectively transfer the poses from source to target meshes and produce better results with satisfied visual performance than state-of-the-art methods. Our code and data are available at https://github.com/ChaoyueSong/3d-corenet.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
3D pose transfer has been drawing a lot of attention from the vision and graphics community. It has potential applications in 3D animated movies and games by generating new poses for existing shapes and animation sequences. 3D pose transfer is a learning-driven generation task which is similar to style transfer on 2D images. As shown in Figure 1, pose transfer takes two inputs, one is identity mesh that provides mesh identity (e.g., body shape), the other is pose mesh that provides the pose of mesh. It aims at transferring the pose of a source pose mesh to a target identity mesh and keeping the identity of the target identity mesh.
|
| 22 |
+
|
| 23 |
+
A fundamental problem for previous methods is to build reliable correspondence between source and target meshes. It can be very challenging when the source and target meshes have significant differences. Most of the previous methods try to solve it with the help of user effort or other additional inputs, such as key point annotations [3, 34, 41], etc. Unfortunately, it is time-consuming to obtain such additional inputs that will limit the usage in practice. In [37], they proposed to implement pose transfer without correspondence learning. Their method is convenient but the performance will be degraded since they do not consider the correspondence between meshes. In this work, we propose a $C$ o r s p o r d e n c e - R E f i n e m e n t N e t w o r k (3D-C o r e N e t) to solve the pose transfer problem for both the human and animal meshes. Like [37], our method does not need key point annotations or other
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: Pose transfer results generated by our 3D-CoreNet. In the first two rows, the human identity and pose meshes are from SMPL [23], in the last two rows, the animal identity and pose meshes are from SMAL [46].
|
| 27 |
+
|
| 28 |
+
additional inputs. We learn the shape correspondence between identity and pose meshes first, then we warp the pose mesh to a coarse warped output according to the correspondence. Finally, the warped mesh will be refined to have a better visual performance. Our method does not require the two meshes to have the same number or order of vertices.
|
| 29 |
+
|
| 30 |
+
For the correspondence learning module, we treat the shape correspondence learning as an optimal transport problem to learn the correspondence between meshes. Our network takes vertex coordinates of identity and pose meshes as inputs. We extract deep features at each vertex using point cloud convolutions and compute a matching cost between the vertex sets with the extracted features. Our goal is to minimize the matching cost to get an optimal matching matrix. With the optimal matching matrix, we warp the pose mesh and obtain a coarse warped mesh. We then refine the warped output with a set of elastic instance normalization residual blocks, the modulation parameters in the normalization layers are learned with our proposed Elastic Instance Normalization (ElaIN). In order to generate smoother meshes with more details, we introduce a channel-wise weight in ElaIN to adaptively blend feature statistics of original features and the learned parameters from external data, which help to keep the consistency and continuity of the original features.
|
| 31 |
+
|
| 32 |
+
Our contributions can be summarized as follows:
|
| 33 |
+
|
| 34 |
+
- We solve the 3D pose transfer problem with our proposed correspondence-refinement network. To the best of our knowledge, our method is the first to learn the correspondence between different meshes and refine the generated meshes jointly in the 3D pose transfer task.
|
| 35 |
+
- We learn the shape correspondence by solving an optimal transport problem without any key point annotations and generate high-quality final meshes with our proposed elastic instance normalization in the refinement module.
|
| 36 |
+
- Through extensive experiments, we demonstrate that our method outperforms state-of-the-art methods quantitatively and qualitatively on both human and animal meshes.
|
| 37 |
+
|
| 38 |
+
# 2 Related work
|
| 39 |
+
|
| 40 |
+
# 2.1 Deep learning methods on 3D data
|
| 41 |
+
|
| 42 |
+
The representations of 3D data are various, like point clouds, voxels and meshes. 3DShapeNets [40] and VoxNet [25] propose to learn on volumetric grids. Their methods cannot be applied on complex data due to the sparsity of data and computation cost of 3D convolution. PointNet [29] uses a shared MLP on every point followed by a global max-pooling. Following PointNet, some hierarchical architectures have been proposed to aggregate local neighborhood information with MLPs [20, 30]. [13, 35] proposed mesh variational autoencoder to learn mesh features whose methods consume a large amount of computing resources due to their fully-connected networks. Many works use graph convolutions with mesh down- and up-sampling layers [15], like CoMA [31], CAPE [24] based on ChebyNet [10], and [45] based on SpiralNet [21], SpiralNet++ [16], etc. They all need a template to implement their hierarchical structure which is not applicable to real-world problems. In this work, we use mesh as the representation of 3D shape and shared weights convolution layers in the network.
|
| 43 |
+
|
| 44 |
+
# 2.2 3D pose transfer
|
| 45 |
+
|
| 46 |
+
Deformation transfer in graphics aims to apply the deformation exhibited by a source mesh onto a different target mesh [34]. 3D pose transfer aims to generate a new mesh based on the knowledge of a pair of source and target meshes. In [3, 34, 41, 42], the methods all require to label the corresponding landmarks first to deal with the differences between meshes. Baran et al. [2] proposed a method that infers a semantic correspondence between different poses of two characters with the guidance of example mesh pairs. Chu et al. [8] proposed to use a few examples to generate results which will make it difficult to automatically transfer pose. For this problem, Gao et al. [14] proposed to use cycle consistency to achieve the pose transfer. However, their method cannot deal with new identities due to the limitations of the visual similarity metric. In [37], they solved the pose transfer via the latest technique for image style transfer. Their work does not need other guidance, but the performance is also restrained since they do not learn any correspondence. To solve the problems, our network learns the correspondence and refines the generated meshes jointly.
|
| 47 |
+
|
| 48 |
+
# 2.3 Correspondence learning
|
| 49 |
+
|
| 50 |
+
In CoCosNet [44], they introduced a correspondence network based on the correlation matrix between images without any constraints. To learn a better matching, we proposed to use optimal transport to learn the correspondence between meshes. Recently, optimal transport has received great attention in various computer vision tasks. Courty et al. [9] perform the alignment of the representations in the source and target domains by learning a transportation plan. Su et al. [33] compute the optimal transport map to deal with the surface registration and shape space problem. Other applications include generative model [1, 6, 11, 39], scene flow [28], semantic correspondence [22] and etc.
|
| 51 |
+
|
| 52 |
+
# 2.4 Conditional normalization layers
|
| 53 |
+
|
| 54 |
+
After normalizing the activation value, conditional normalization uses the modulation parameters calculated from the external data to denormalize it. Adaptive Instance Normalization (AdaIN) [18] aligns the mean and variance between content and style image which achieves arbitrary style transfer. Soft-AdaIN [7] introduces a channel-wise weight to blend feature statistics of content and style image to preserve more details for the results. Spatially-Adaptive Normalization (SPADE) [27] can better preserve semantic information by not washing away it when applied to segmentation masks. SPAdaIN [37] changes batch normalization [19] in SPADE to instance normalization [36] for 3D pose transfer. However, it will break the consistency and continuity of the feature map when doing the denormalization, which has a bad influence on the mesh smoothness and detail preservation. To address this problem, our ElaIN introduces an adaptive weight to implement the denormalization.
|
| 55 |
+
|
| 56 |
+
# 3 Method
|
| 57 |
+
|
| 58 |
+
Given a source pose mesh and a target identity mesh, our goal is to transfer the pose of source mesh to the target mesh and keep the identity of the target mesh. In this section, we will introduce our end-to-end Correspondence-Refinement Network (3D-CoreNet) for 3D pose transfer.
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Figure 2: The architecture of 3D-CoreNet. With the extracted features, the shape correspondence between identity and pose meshes is first established by solving an optimal transport problem. Then, we warp the pose mesh according to the optimal matching matrix $\mathbf{T}_m$ and obtain a coarse warped mesh. The warped mesh will be better refined with our proposed ElaIN in the refinement module.
|
| 62 |
+
|
| 63 |
+
A 3D mesh can be represented as $M(I, P, O)$ , where $I$ denotes the mesh identity, $P$ represents the pose of the mesh, $O$ is the vertex order of the mesh. Given two meshes $M_{id} = M(I_1, P_1, O_1)$ and $M_{pose} = M(I_2, P_2, O_2)$ , we aim to transfer the pose of $M_{pose}$ to $M_{id}$ and generate the output mesh $M_{output} = M'(I_1, P_2, O_1)$ . In our 3D-CoreNet, we take $V_{id} \in \mathbb{R}^{N_{id} \times 3}$ and $V_{pose} \in \mathbb{R}^{N_{pose} \times 3}$ as inputs, which are the $(x, y, z)$ coordinates of the mesh vertices. $N_{id}$ and $N_{pose}$ denote the number of vertices of identity mesh and pose mesh respectively.
|
| 64 |
+
|
| 65 |
+
As shown in Figure 2, the vertices of the meshes are first fed into the network to extract multi-scale deep features. We calculate the optimal matching matrix with the vertex feature maps by solving an optimal transport problem, then we warp the pose mesh according to the matrix and obtain the warped mesh. Finally, the warped mesh is refined to the final output mesh with our proposed elastic instance normalization (ElaIN). The output mesh combines the pose from the source mesh and the identity from the target. And it inherits the vertex order from the identity mesh.
|
| 66 |
+
|
| 67 |
+
# 3.1 Correspondence learning
|
| 68 |
+
|
| 69 |
+
Given an identity mesh and a pose mesh, our correspondence learning module calculates an optimal matching matrix, each element of the matching matrix represents the similarities between two vertices in the two meshes. The first step in our shape correspondence learning is to compute a correlation matrix with the extracted features, which is based on cosine similarity and denotes the matching similarities between any two positions from different meshes. However, the matching scores in the correlation matrix are calculated without any additional constraints. To learn a better matching, we solve this problem from a global perspective by modeling it as an optimal transport problem.
|
| 70 |
+
|
| 71 |
+
Correlation matrix We first introduce our feature extractor which aims to extract features for the unordered input vertices. Close to [37], our feature extractor consists of 3 stacked $1 \times 1$ convolution and Instance Normalization layers, the activation functions applied are all LeakyReLU. Given the extracted vertex feature maps $f_{id} \in \mathbb{R}^{D \times N_{id}}$ , $f_{pose} \in \mathbb{R}^{D \times N_{pose}}$ of identity and pose meshes ( $D$ is the channel-wise dimension), a popular method to compute correlation matrix is using the cosine similarity [43, 44]. Concretely, we compute the correlation matrix $\mathbf{C} \in \mathbb{R}^{N_{id} \times N_{pose}}$ as:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\mathbf {C} (i, j) = \frac {f _ {i d} (i) ^ {\top} f _ {p o s e} (j)}{\| f _ {i d} (i) \| \| f _ {p o s e} (j) \|} \tag {1}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $\mathbf{C}(i,j)$ denotes the individual matching score between $f_{id}(i)$ and $f_{\text{pose}}(j) \in \mathbb{R}^{\mathbb{D}}$ , $f_{id}(i)$ and $f_{\text{pose}}(j)$ represent the channel-wise feature of $f_{id}$ at position i and $f_{\text{pose}}$ at j.
|
| 78 |
+
|
| 79 |
+
Optimal transport problem To learn a better matching with additional constraints in this work, we model our shape correspondence learning as an optimal transport problem. We first define a matching matrix $\mathbf{T} \in \mathbb{R}_+^{N_{id} \times N_{pose}}$ between identity and pose meshes. Then we can get the total correlation as $\sum_{ij} \mathbf{C}(i,j) \mathbf{T}(i,j)$ . The aim will be maximizing the total correlation score to get an optimal matching matrix $\mathbf{T}_m$ .
|
| 80 |
+
|
| 81 |
+
We treat the correspondence learning between identity and pose meshes as the transport of mass. A mass which is equal to $N_{id}^{-1}$ will be assigned to each vertex in the identity mesh, and each vertex in pose mesh will receive the mass $N_{pose}^{-1}$ from identity mesh through the built correspondence between vertices. Then if we define $\mathbf{Z} = 1 - \mathbf{C}$ as the cost matrix, our goal can be formulated as a standard optimal transport problem by minimizing the total matching cost,
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\mathbf {T} _ {m} = \underset {\mathbf {T} \in \mathbb {R} _ {+} ^ {N _ {i d} \times N _ {p o s e}}} {\arg \min } \sum_ {i j} \mathbf {Z} (i, j) \mathbf {T} (i, j) \quad s. t. \quad \mathbf {T 1} _ {N _ {p o s e}} = \mathbf {1} _ {N _ {i d}} N _ {i d} ^ {- 1}, \quad \mathbf {T} ^ {\top} \mathbf {1} _ {N _ {i d}} = \mathbf {1} _ {N _ {p o s e}} N _ {p o s e} ^ {- 1}. \tag {2}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where $\mathbf{1}_{N_{id}}\in \mathbb{R}^{N_{id}}$ and $\mathbf{1}_{N_{pose}}\in \mathbb{R}^{N_{pose}}$ are vectors whose elements are all 1. The first constraint in Eq. 2 means that the mass of each vertex in $M_{id}$ will be entirely transported to some of the vertices in $M_{pose}$ . And each vertex in $M_{pose}$ will receive a mass $N_{pose}^{-1}$ from some of the vertices in $M_{id}$ with the second constraint. This problem can be solved by the Sinkhorn-Knopp algorithm [32]. The details of the solved process will be given in the supplementary material.
|
| 88 |
+
|
| 89 |
+
With the matching matrix, we can warp the pose mesh and obtain the vertex coordinates $V_{\text{warp}} \in \mathbb{R}^{N_{id} \times 3}$ of the warped mesh,
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
V _ {w a r p} (i) = \sum_ {j} \mathbf {T} _ {m} (i, j) V _ {p o s e} (j) \tag {3}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
the warped mesh $M_{\text{warp}}$ inherits the number and order of vertex from identity mesh and can be reconstructed with the face information of identity mesh as shown in Figure 2.
|
| 96 |
+
|
| 97 |
+
# 3.2 Mesh refinement
|
| 98 |
+
|
| 99 |
+
In this section, we introduce our mesh refinement module which refines the warped mesh to the desired output progressively.
|
| 100 |
+
|
| 101 |
+
Elastic instance normalization Previous conditional normalization layers [18, 27, 37] used in different tasks always calculated their denormalization parameters only with the external data. We argue that it may break the consistency and continuity of the original features. Inspired by [7], we propose Elastic Instance Normalization (ElaIN) which blends the statistics of original features and the learned parameters from external data adaptively and elastically.
|
| 102 |
+
|
| 103 |
+
As shown in Figure 2, the warped mesh is flatter than we desired and is kind of out of shape, but it inherits the pose from the source mesh successfully. Therefore, we refine the warped mesh with the identity feature maps to get a better final output. Here, we let $h^i \in \mathbb{R}^{S^i \times D^i \times N^i}$ as the activation value before the $i$ -th normalization layer, where $S^i$ is the batch size, $D^i$ is the dimension of feature channel and $N^i$ is the number of vertex. At first, we normalize the feature maps of the warped mesh with instance normalization and the mean and standard deviation are calculated across spatial dimension $(n \in N^i)$ for each sample $s \in S^i$ and each channel $d \in D^i$ ,
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\mu^ {i} = \frac {1}{N ^ {i}} \sum_ {n} h _ {w a r p} ^ {i} \tag {4}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\sigma^ {i} = \sqrt {\frac {1}{N ^ {i}} \sum_ {n} \left(h _ {w a r p} ^ {i} - \mu^ {i}\right) ^ {2} + \epsilon} \tag {5}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
then the feature maps of the identity mesh are fed into a $1 \times 1$ convolution layer to get $h_{id}^{i}$ , which shares the same size with $h_{warp}^{i}$ . We calculate the mean of $h_{warp}^{i}$ , $h_{id}^{i}$ to make them $S^{i} \times D^{i} \times 1$ tensors. The tensors are then concatenated in channel dimension to get a $S^{i} \times (2D^{i}) \times 1$ tensor.
|
| 114 |
+
|
| 115 |
+
A fully-connected layer is employed to compute an adaptive weight $w(h_{\text{warp}}^i, h_{id}^i) \in \mathbb{R}^{S^i \times D^i \times 1}$ with the concatenated tensor. With $w(h_{\text{warp}}^i, h_{id}^i)$ , we can define the modulation parameters of our normalization layer.
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\gamma^ {\prime} \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right) = w \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right) \gamma^ {i} + \left(1 - w \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right)\right) \sigma^ {i},
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\beta^ {\prime} \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right) = w \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right) \beta^ {i} + \left(1 - w \left(h _ {w a r p} ^ {i}, h _ {i d} ^ {i}\right)\right) \mu^ {i}. \tag {6}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
where $\gamma^i$ and $\beta^i$ are learned from the identity feature $h_{id}^i$ with two convolution layers. Finally, we can scale the normalized $h_{warp}^i$ with $\gamma^\prime$ and shift it with $\beta^\prime$ ,
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\operatorname {E l a I N} \left(h _ {\text {w a r p}} ^ {i}, h _ {i d} ^ {i}\right) = \gamma^ {\prime} \left(h _ {\text {w a r p}} ^ {i}, h _ {i d} ^ {i}\right) \left(\frac {h _ {\text {w a r p}} ^ {i} - \mu^ {i}}{\sigma^ {i}}\right) + \beta^ {\prime} \left(h _ {\text {w a r p}} ^ {i}, h _ {i d} ^ {i}\right) \tag {7}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
Mesh refinement module Our mesh refinement module is designed to refine the warped mesh progressively. Following [27, 37, 44], We design the ElaIN residual block with our ElaIN in the form of ResNet blocks [17]. As shown in Figure 2, our architecture contains $l$ ElaIN residual blocks. Each of them consists of our proposed ElaIN followed by a simple convolution layer and LeakyReLU. With the ElaIN residual blocks, the warped mesh is refined to our desired high-quality output. Please refer to the supplementary material for the detailed architecture of ElaIN.
|
| 132 |
+
|
| 133 |
+
# 3.3 Loss function
|
| 134 |
+
|
| 135 |
+
We jointly train the correspondence learning module along with mesh refinement module by minimizing the following loss functions,
|
| 136 |
+
|
| 137 |
+
Reconstruction loss Following [37], we train our network with the supervision of the ground truth mesh $M_{gt} = M(I_1, P_2, O_1)$ . We first process the ground truth mesh to have the same vertex order as the identity mesh. Then we define the reconstruction loss by calculating the point-wise $L2$ distance between the vertices of $M_{output}$ and $M_{gt}$ ,
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\mathcal {L} _ {\text {r e c}} = \left\| V _ {\text {o u t p u t}} - V _ {\text {g t}} \right\| _ {2} ^ {2} \tag {8}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $V_{output}$ and $V_{gt} \in \mathbb{R}^{N_{id} \times 3}$ are the vertices of $M_{output}$ and $M_{gt}$ respectively. Notice that they all share the same size and order with the vertices of the identity mesh. With the reconstruction loss, the mesh predicted by our model will be closer to the ground truth.
|
| 144 |
+
|
| 145 |
+
Edge loss In this work, we also introduce edge loss which is often used in 3D mesh generation tasks [26, 37, 38]. Since the reconstruction loss does not consider the connectivity of mesh vertices, the generated mesh may suffer from flying vertices and overlong edges. Edge loss can help penalize flying vertices and generate smoother surfaces. For every $v \in V_{output}$ , let $\mathcal{N}(v)$ be the neighbor of $v$ , the edge loss can be defined as,
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {L} _ {e d g} = \sum_ {v} \sum_ {p \in \mathcal {N} (v)} \| v - p \| _ {2} ^ {2} \tag {9}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
then we can train our network with the combined loss function $\mathcal{L}$
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
\mathcal {L} = \lambda_ {r e c} \mathcal {L} _ {r e c} + \mathcal {L} _ {e d g} \tag {10}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
where $\lambda_{rec}$ denotes the weight of reconstruction loss.
|
| 158 |
+
|
| 159 |
+
# 4 Experiment
|
| 160 |
+
|
| 161 |
+
Datasets. For the human mesh dataset, we use the same dataset generated by SMPL [23] as [37]. This dataset consists of 30 identities with 800 poses. Each mesh has 6890 vertices. For the training data, we randomly choose 4000 pairs (identity and pose meshes) from 16 identities with 400 poses and shuffle them every epoch. The ground truth meshes will be determined according to the identity and pose parameters from the pairs. Before feeding into our network, every mesh will be shuffled randomly to be close to the real-world problem. Notice that the ground truth mesh will share the
|
| 162 |
+
|
| 163 |
+
Table 1: Quantitative comparison with other methods. We compare our method with DT (needs key point annotations) and NPT using PMD, CD, EMD as our evaluation metrics on both human and animal data. For them, the lower is better. The PMD and CD are in units of $10^{-3}$ and the EMD is in units of $10^{-2}$ .
|
| 164 |
+
|
| 165 |
+
<table><tr><td></td><td>Annotation</td><td>Dataset</td><td>PMD</td><td>CD</td><td>EMD</td></tr><tr><td>DT [34]</td><td>Key points</td><td></td><td>0.15</td><td>0.35</td><td>2.21</td></tr><tr><td>NPT [37]</td><td>-</td><td>SMPL [23]</td><td>0.66</td><td>1.42</td><td>4.22</td></tr><tr><td>Ours</td><td>-</td><td></td><td>0.08</td><td>0.22</td><td>1.89</td></tr><tr><td>DT [34]</td><td>Key points</td><td></td><td>13.37</td><td>35.77</td><td>15.90</td></tr><tr><td>NPT [37]</td><td>-</td><td>SMAL [46]</td><td>6.75</td><td>14.52</td><td>11.65</td></tr><tr><td>Ours</td><td>-</td><td></td><td>2.26</td><td>4.05</td><td>7.28</td></tr></table>
|
| 166 |
+
|
| 167 |
+
same vertex order with identity mesh for the convenience of supervised training and evaluation. For all input meshes, we shift them to the center according to their bounding boxes. When doing the test, we evaluate our model with 14 new identities with 200 unseen poses. We randomly choose 400 pairs for testing. They will be pre-processed in the same manner as the training data. To further evaluate the generalization of our model, we also test our model with FAUST [5] and MG-dataset [4] in the experiment.
|
| 168 |
+
|
| 169 |
+
For the animal mesh dataset, we generate animal training and test data using SMAL model [46]. This dataset has 41 identities with 600 poses. The 41 identities are 21 felidae animals (1 cat, 5 cheetahs, 8 lions, 7 tigers), 5 canidae animals (2 dogs, 1 fox, 1 wolf, 1 hyena), 8 equidae animals (1 deer, 1 horse, 6 zebras), 4 bovidae animals (4 cows), 3 hippopotamidae animals (3 hippos). Every mesh has 3889 vertices. For the training data, we randomly choose 11600 pairs from 29 identities (16 felidae animals, 3 canidae animals, 6 equidae animals, 2 bovidae animals, 2 hippopotamidae animals) with 400 poses. For the test data, we randomly choose 400 pairs from other 12 identities (5 felidae animals, 2 canidae animals, 2 equidae animals, 2 bovidae animals, 1 hippopotamidae animal) with 200 poses. All the inputs are pre-processed in the same manner as we do in the human mesh.
|
| 170 |
+
|
| 171 |
+
Evaluation metrics. Following [37], we use Point-wise Mesh Euclidean Distance (PMD) as one of our evaluation metrics. PMD is the $L2$ distance between the vertices of the output mesh and the ground truth mesh. We also evaluate our model with Chamfer Distance (CD) and Earth Mover's Distance (EMD) proposed in [12]. For PMD, CD and EMD, the lower is better.
|
| 172 |
+
|
| 173 |
+
Implementation details. $\lambda_{rec}$ in the loss function is set as 2000. We implement our model with Pytorch and use Adam optimizer. Please refer to the supplementary material for the details of the network. Our model is trained for 200 epochs on one RTX 3090 GPU, the learning rate is fixed at $1e - 4$ in the first 100 epochs and decays $1e - 6$ each epoch after 100 epochs. The batch size is 8.
|
| 174 |
+
|
| 175 |
+
# 4.1 Comparison with the state-of-the-arts
|
| 176 |
+
|
| 177 |
+
In this section, we compare our method with Deformation Transfer (DT) [34] and Neural Pose Transfer (NPT) [37]. DT needs to rely on the corresponding points labeled by user and a reference mesh, as the additional inputs. Therefore, we test DT with the reference mesh and 11, 19 labeling points on animal data and human data respectively. For [37], their method does not consider any correspondence. We train their model using the implementations provided by the authors. The qualitative results tested on SMPL [23] and SMAL [46] are shown in Figure 3 and Figure 4. As we can see, when tested on human data, DT and our method can produce better results that are close to the ground truth. However, it is very time-consuming for DT when dealing with a new identity and pose mesh pair. The results generated by NPT always do not learn the right pose and are not very smooth on the arms or legs since they do not consider any correspondence. When tested on animal data, DT fails to transfer the pose even if we add more labeling points. Their method does not work when the identity of the mesh pairs are very different. NPT can produce very flat legs and wrong direction faces. In comparison, our method still produces satisfactory results efficiently.
|
| 178 |
+
|
| 179 |
+
We adopt Point-wise Mesh Euclidean Distance (PMD), Chamfer Distance (CD) and Earth Mover's Distance (EMD) to evaluate the generated results of different methods. All metrics are calculated
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Figure 3: Qualitative comparison of different methods on human data. The identity and pose meshes are from SMPL [23]. Our method and DT (needs key point annotations) can generate better results than NPT when doing pose transfer on human meshes. The results generated by NPT are always not smooth on the arms or legs. Since DT needs user to label the key point annotations, our method is more efficient and practical than DT.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 4: Qualitative comparison of different methods on animal data. The identity and pose meshes are from SMAL [46]. Our method produces more successful results when doing pose transfer on different animal meshes. Although DT has key point annotations, it still fails to transfer the pose when the identity of the mesh pairs are very different. NPT produces very flat legs and wrong direction faces.
|
| 186 |
+
|
| 187 |
+
between the ground truth and the predicted results. The quantitative results are shown in Table 1. Our 3D-CoreNet outperforms other methods in all metrics over two datasets. When doing pose transfer on animal data that contains more different identities, our method has more advantages.
|
| 188 |
+
|
| 189 |
+
Table 2: Ablation study. We use all 3 measurements here. For them, the lower is better. The PMD and CD are in units of $10^{-3}$ and the EMD is in units of $10^{-2}$ . w/o means without this component. In the third and fourth column, we only use our correspondence learning module without refinement. Corr (C) uses the correlation matrix and Corr $(\mathbf{T}_m)$ uses the optimal matching matrix to learn the correspondence. In w/o ElaIN, we replace our ElaIN with SPAdaIN in [37] to compare them.
|
| 190 |
+
|
| 191 |
+
<table><tr><td>Dataset</td><td></td><td>Corr (C)</td><td>Corr (Tm)</td><td>w/o ElaIN</td><td>w/o Ledges</td><td>Full model</td></tr><tr><td rowspan="3">SMPL [23]</td><td>PMD</td><td>0.46</td><td>0.44</td><td>0.15</td><td>0.14</td><td>0.08</td></tr><tr><td>CD</td><td>1.39</td><td>1.28</td><td>0.37</td><td>0.34</td><td>0.22</td></tr><tr><td>EMD</td><td>3.49</td><td>3.42</td><td>2.57</td><td>2.28</td><td>1.89</td></tr></table>
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 5: Ablation study results. We test 5 variants on SMPL [23]. The first two are tested without refinement, Corr (C) uses the correlation matrix and Corr $(\mathbf{T}_m)$ uses the optimal matching matrix to learn the correspondence. The model does not perform well without refinement. Using the optimal matching matrix has a better performance than using correlation matrix. In the third column, the surface of the mesh has clear artifacts and is not smooth when we replace ElaIN with SPAdaIN.
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Figure 6: Pose transfer results on human data from different datasets. We test our model on FAUST [5] and MG-dataset [4] which contain different human meshes with SMPL [23]. Our method still has a good performance. Please refer to the supplementary material for more generated results.
|
| 198 |
+
|
| 199 |
+
# 4.2 Ablation study
|
| 200 |
+
|
| 201 |
+
In this section, we study the effectiveness of several components in our 3D-CoreNet on human data. At first, we test our model without the refinement module. We only use our correspondence module with the correlation matrix $\mathbf{C}$ or the optimal matching matrix $\mathbf{T}_m$ respectively. Here, the warped mesh will be viewed as the final output and the reconstruction loss will be calculated between the warped mesh and the ground truth. Then we will compare our ElaIN with SPAdaIN in [37] to verify the effectiveness of ElaIN. And we also test the importance of edge loss $\mathcal{L}_{edg}$ .
|
| 202 |
+
|
| 203 |
+
The results are shown in Table 2 and Figure 5. We evaluate the variants with PMD, CD and EMD. As we can see, when we do not add our refinement module, the model does not perform well both qualitatively and quantitatively. And using the optimal matching matrix has a better performance than using correlation matrix. When we replace our ElaIN with SPAdaIN, the surface of the mesh has clear artifacts and is not smooth. The metrics are also worse than the full model. We can know that ElaIN is very helpful in generating high quality results. We also evaluate the importance of $\mathcal{L}_{edg}$ . The connection between vertices will be better and smoother with the edge loss.
|
| 204 |
+
|
| 205 |
+
# 4.3 Generalization capability
|
| 206 |
+
|
| 207 |
+
To evaluate the generalization capability of our method, we evaluate it on FAUST [5] and MG-dataset [4] in this section. Human meshes in FAUST have the same number of vertices as SMPL [23] and have more unseen identities. In MG-dataset, the human meshes are all dressed which have 27554 vertices each and have more realistic details. As shown in Figure 6, our method can also have a good performance on FAUST and MG-dataset. In the first group, we transfer the pose from FAUST to the identity in SMPL. In the second group, we transfer the pose from SMPL to the identity in MG-dataset. Both of them transfer the pose and keep the identity successfully.
|
| 208 |
+
|
| 209 |
+
# 5 Conclusion
|
| 210 |
+
|
| 211 |
+
In this paper, we propose a correspondence-refinement network (3D-CoreNet) to transfer the pose of source mesh to the target mesh while retaining the identity of the target mesh. 3D-CoreNet learns the correspondence between different meshes and refine the generated meshes jointly. Our method generates high-quality meshes with the proposed ElaIN for refinement. Compared to other methods, our model learns the correspondence without key point labeling and achieves better performance when working on both human and animal meshes. In the future, we will try to achieve the 3D pose transfer in an unsupervised manner.
|
| 212 |
+
|
| 213 |
+
# Acknowledgements
|
| 214 |
+
|
| 215 |
+
This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner. This work is supported by A*STAR through the Industry Alignment Fund - Industry Collaboration Projects Grant. This work is also supported by the National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No: AISG-RP-2018-003), and the MOE Tier-1 research grants: RG28/18 (S) and RG95/20.
|
| 216 |
+
|
| 217 |
+
# References
|
| 218 |
+
|
| 219 |
+
[1] Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pages 214-223. PMLR, 2017.
|
| 220 |
+
[2] Ilya Baran, Daniel Vlasic, Eitan Grinspun, and Jovan Popovic. Semantic deformation transfer. In ACM SIGGRAPH 2009 papers, pages 1-6. 2009.
|
| 221 |
+
[3] Mirela Ben-Chen, Ofir Weber, and Craig Gotsman. Spatial deformation transfer. In Proceedings of the 2009 ACM SIGGRAPH/Eurographics Symposium on Computer Animation, pages 67-74, 2009.
|
| 222 |
+
[4] Bharat Lal Bhatnagar, Garvita Tiwari, Christian Theobalt, and Gerard Pons-Moll. Multi-garment net: Learning to dress 3d people from images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5420-5430, 2019.
|
| 223 |
+
[5] Federica Bogo, Javier Romero, Matthew Loper, and Michael J Black. Faust: Dataset and evaluation for 3d mesh registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3794-3801, 2014.
|
| 224 |
+
[6] Charlotte Bunne, David Alvarez-Melis, Andreas Krause, and Stefanie Jegelka. Learning generative models across incomparable spaces. In International Conference on Machine Learning, pages 851-861. PMLR, 2019.
|
| 225 |
+
[7] Yugang Chen, Muchun Chen, Chaoyue Song, and Bingbing Ni. Cartoonrenderer: An instance-based multi-style cartoon image translator. In International Conference on Multimedia Modeling, pages 176-187. Springer, 2020.
|
| 226 |
+
[8] Hung-Kuo Chu and Chao-Hung Lin. Example-based deformation transfer for 3d polygon models. J. Inf. Sci. Eng., 26(2):379-391, 2010.
|
| 227 |
+
[9] Nicolas Courty, Rémi Flamary, Devis Tuia, and Alain Rakotomamonjy. Optimal transport for domain adaptation. IEEE transactions on pattern analysis and machine intelligence, 39(9):1853-1865, 2016.
|
| 228 |
+
[10] Michael Defferrard, Xavier Bresson, and Pierre Vandergheynst. Convolutional neural networks on graphs with fast localized spectral filtering. In Proceedings of the 30th International Conference on Neural Information Processing Systems, pages 3844-3852, 2016.
|
| 229 |
+
[11] Ishan Deshpande, Yuan-Ting Hu, Ruoyu Sun, Ayis Pyrros, Nasir Siddiqui, Sanmi Koyejo, Zhizhen Zhao, David Forsyth, and Alexander G Schwing. Max-sliced Wasserstein distance and its use for gans. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10648-10656, 2019.
|
| 230 |
+
|
| 231 |
+
[12] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3d object reconstruction from a single image. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 605-613, 2017.
|
| 232 |
+
[13] Yutong Feng, Yifan Feng, Haoxuan You, Xibin Zhao, and Yue Gao. Meshnet: Mesh neural network for 3d shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 8279-8286, 2019.
|
| 233 |
+
[14] Lin Gao, Jie Yang, Yi-Ling Qiao, Yu-Kun Lai, Paul L Rosin, Weiwei Xu, and Shihong Xia. Automatic unpaired shape deformation transfer. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018.
|
| 234 |
+
[15] Michael Garland and Paul S Heckbert. Surface simplification using quadric error metrics. In Proceedings of the 24th annual conference on Computer graphics and interactive techniques, pages 209-216, 1997.
|
| 235 |
+
[16] Shunwang Gong, Lei Chen, Michael Bronstein, and Stefanos Zafeiriou. Spiralnet++: A fast and highly efficient mesh convolution operator. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, pages 0-0, 2019.
|
| 236 |
+
[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identity mappings in deep residual networks. In European conference on computer vision, pages 630-645. Springer, 2016.
|
| 237 |
+
[18] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE International Conference on Computer Vision, pages 1501-1510, 2017.
|
| 238 |
+
[19] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pages 448-456. PMLR, 2015.
|
| 239 |
+
[20] Jiaxin Li, Ben M Chen, and Gim Hee Lee. So-net: Self-organizing network for point cloud analysis. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9397–9406, 2018.
|
| 240 |
+
[21] Isaak Lim, Alexander Dielen, Marcel Campen, and Leif Kobbelt. A simple approach to intrinsic correspondence learning on unstructured 3d meshes. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0–0, 2018.
|
| 241 |
+
[22] Yanbin Liu, Linchao Zhu, Makoto Yamada, and Yi Yang. Semantic correspondence as an optimal transport problem. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4463-4472, 2020.
|
| 242 |
+
[23] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multi-person linear model. ACM transactions on graphics (TOG), 34(6):1-16, 2015.
|
| 243 |
+
[24] Qianli Ma, Jinlong Yang, Anurag Ranjan, Sergi Pujades, Gerard Pons-Moll, Siyu Tang, and Michael J Black. Learning to dress 3d people in generative clothing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6469-6478, 2020.
|
| 244 |
+
[25] Daniel Maturana and Sebastian Scherer. Voxnet: A 3d convolutional neural network for real-time object recognition. In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 922-928. IEEE, 2015.
|
| 245 |
+
[26] Junyi Pan, Xiaoguang Han, Weikai Chen, Jiapeng Tang, and Kui Jia. Deep mesh reconstruction from single rgb images via topology modification networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9964–9973, 2019.
|
| 246 |
+
[27] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2337-2346, 2019.
|
| 247 |
+
|
| 248 |
+
[28] Gilles Puy, Alexandre Boulch, and Renaud Marlet. Flot: Scene flow on point clouds guided by optimal transport. arXiv preprint arXiv:2007.11142, 2020.
|
| 249 |
+
[29] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 652-660, 2017.
|
| 250 |
+
[30] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++ deep hierarchical feature learning on point sets in a metric space. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 5105-5114, 2017.
|
| 251 |
+
[31] Anurag Ranjan, Timo Bolkart, Soubhik Sanyal, and Michael J Black. Generating 3d faces using convolutional mesh autoencoders. In Proceedings of the European Conference on Computer Vision (ECCV), pages 704-720, 2018.
|
| 252 |
+
[32] Richard Sinkhorn. Diagonal equivalence to matrices with prescribed row and column sums. The American Mathematical Monthly, 74(4):402-405, 1967.
|
| 253 |
+
[33] Zhengyu Su, Yalin Wang, Rui Shi, Wei Zeng, Jian Sun, Feng Luo, and Xianfeng Gu. Optimal mass transport for shape matching and comparison. IEEE transactions on pattern analysis and machine intelligence, 37(11):2246-2259, 2015.
|
| 254 |
+
[34] Robert W Sumner and Jovan Popovic. Deformation transfer for triangle meshes. ACM Transactions on graphics (TOG), 23(3):399-405, 2004.
|
| 255 |
+
[35] Qingyang Tan, Lin Gao, Yu-Kun Lai, and Shihong Xia. Variational autoencoders for deforming 3d mesh models. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5841-5850, 2018.
|
| 256 |
+
[36] Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Instance normalization: The missing ingredient for fast stylization. arXiv preprint arXiv:1607.08022, 2016.
|
| 257 |
+
[37] Jiashun Wang, Chao Wen, Yanwei Fu, Haitao Lin, Tianyun Zou, Xiangyang Xue, and Yinda Zhang. Neural pose transfer by spatially adaptive instance normalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5831-5839, 2020.
|
| 258 |
+
[38] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 52-67, 2018.
|
| 259 |
+
[39] Jiqing Wu, Zhiwu Huang, Dinesh Acharya, Wen Li, Janine Thoma, Danda Pani Paudel, and Luc Van Gool. Sliced Wasserstein generative models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3713-3722, 2019.
|
| 260 |
+
[40] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015.
|
| 261 |
+
[41] Jie Yang, Lin Gao, Yu-Kun Lai, Paul L Rosin, and Shihong Xia. Biharmonic deformation transfer with automatic key point selection. Graphical Models, 98:1-13, 2018.
|
| 262 |
+
[42] Wang Yifan, Noam Aigerman, Vladimir G Kim, Siddhartha Chaudhuri, and Olga Sorkine-Hornung. Neural cages for detail-preserving 3d deformations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 75-83, 2020.
|
| 263 |
+
[43] Bo Zhang, Mingming He, Jing Liao, Pedro V Sander, Lu Yuan, Amine Bermak, and Dong Chen. Deep exemplar-based video colorization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8052-8061, 2019.
|
| 264 |
+
[44] Pan Zhang, Bo Zhang, Dong Chen, Lu Yuan, and Fang Wen. Cross-domain correspondence learning for exemplar-based image translation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5143-5153, 2020.
|
| 265 |
+
|
| 266 |
+
[45] Keyang Zhou, Bharat Lal Bhatnagar, and Gerard Pons-Moll. Unsupervised shape and pose disentanglement for 3d meshes. In European Conference on Computer Vision, pages 341-357. Springer, 2020.
|
| 267 |
+
[46] Silvia Zuffi, Angjoo Kanazawa, David W Jacobs, and Michael J Black. 3d menagerie: Modeling the 3d shape and pose of animals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6365-6373, 2017.
|
3dposetransferwithcorrespondencelearningandmeshrefinement/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8db524e851fd921470cc71e1002484f9b446eced3c697cb75f6be5880d6b74bd
|
| 3 |
+
size 390136
|
3dposetransferwithcorrespondencelearningandmeshrefinement/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c24426a4c3888686381e7290378a2d7202485063993946328422f32f460d6efa
|
| 3 |
+
size 341917
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b2ee847ac72fe3b7c3d24e6a7d1b59694f2c6ddd18da2fcf5b09b14d4fb7159
|
| 3 |
+
size 82108
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56a0d843b1f2ef0cff109b312fae9f82f3f021ac8805241ef491591af6c7c35a
|
| 3 |
+
size 106126
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/185adf91-1e9f-4eff-8042-c86b993ddd65_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fa615e1e6412351c5feeddeac25b7b1cbfa10245c343d6284175715d74d0f62
|
| 3 |
+
size 702769
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/full.md
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Siamese Voxel-to-BEV Tracker for Sparse Point Clouds
|
| 2 |
+
|
| 3 |
+
Le Hui†, Lingpeng Wang†, Mingmei Cheng, Jin Xie*, Jian Yang*
|
| 4 |
+
PCA Lab, Nanjing University of Science and Technology, China
|
| 5 |
+
{le.hui, cslpwang, chengmm, csjxie, csjyang}@njust.edu.cn
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
3D object tracking in point clouds is still a challenging problem due to the sparsity of LiDAR points in dynamic environments. In this work, we propose a Siamese voxel-to-BEV tracker, which can significantly improve the tracking performance in sparse 3D point clouds. Specifically, it consists of a Siamese shape-aware feature learning network and a voxel-to-BEV target localization network. The Siamese shape-aware feature learning network can capture 3D shape information of the object to learn the discriminative features of the object so that the potential target from the background in sparse point clouds can be identified. To this end, we first perform template feature embedding to embed the template's feature into the potential target and then generate a dense 3D shape to characterize the shape information of the potential target. For localizing the tracked target, the voxel-to-BEV target localization network regresses the target's 2D center and the $z$ -axis center from the dense bird's eye view (BEV) feature map in an anchor-free manner. Concretely, we compress the voxelized point cloud along $z$ -axis through max pooling to obtain a dense BEV feature map, where the regression of the 2D center and the $z$ -axis center can be performed more effectively. Extensive evaluation on the KITTI and nuScenes datasets shows that our method significantly outperforms the current state-of-the-art methods by a large margin. Code is available at https://github.com/fpthink/V2B.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Object tracking is an essential task in computer vision and has been widely in various applications, such as autonomous vehicle, mobile robotics, and augmented reality. In the past few years, many efforts [33, 2, 12, 64] have been made on 2D object tracking from RGB data. Recently, with the development of 3D sensor such as LiDAR and Kinect, 3D object tracking [37, 72, 53, 28, 42] has attracted more attention. Lately, some pioneering works [22, 18, 52] have focused on point cloud based 3D object tracking. However, due to the sparsity of 3D point clouds, 3D object tracking on point clouds is still a challenging task.
|
| 14 |
+
|
| 15 |
+
Few works are dedicated to 3D single object tracking (SOT) with only point clouds. As a pioneer, SC3D [21] is the first 3D Siamese tracker that performs matching between the template and candidate 3D target proposals generated by Kalman filtering [22]. Furthermore, a shape completion network is used to enhance shape information of candidate proposals in sparse point clouds, thereby improving the accuracy of matching. However, SC3D cannot perform the end-to-end training, and consumes
|
| 16 |
+
|
| 17 |
+
much time when matching exhaustive candidate proposals. Towards these concerns, Qi et al. [52] proposed an end-to-end framework termed P2B, which first localizes potential target centers in the search area via Hough voting [48], and then aggregates vote clusters to generate target proposals. Nonetheless, when facing sparse scenes, P2B may not be able to track the object accurately, or even lose the tracked object. On the one hand, it adopts random sampling to generate initial seed points, which further exacerbates the sparsity of point clouds. On the other hand, it is difficult to generate high-quality target proposals on sparse 3D point clouds. Although SC3D has enhanced shape information of candidate proposals, the low-quality candidate proposals obtained from sparse point clouds still degrade tackling performance.
|
| 18 |
+
|
| 19 |
+
As shown in Fig. 1, we count the number of points on KITTI's cars. It can be found that $51\%$ of cars have less than 100 points, and only $7\%$ of cars have more than 2500 points. When facing sparse point clouds, it is difficult to distinguish the target from the background due to the sparsity of point clouds. Therefore, how to improve the tracking performance in sparse scenes should be considered. Our intuition consists of two folds. First, enhancing shape information of target will provide discriminative information to distinguish the target from the background, especially in sparse point clouds. Second, due to the sparsity of the point cloud, it is difficult to regress the target center in 3D space.
|
| 20 |
+
|
| 21 |
+
We hence consider compressing the sparse 3D space into a dense 2D space, and perform center regression in the dense 2D space to improve tracking performance.
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Statistics of the number of points on KITTI's cars. Cars are colored in red.
|
| 25 |
+
|
| 26 |
+
In this paper, we propose a novel Siamese voxel-to-BEV (V2B) tracker, which aims to improve the tracking performance of 3D single object tracking, especially in sparse point clouds. We illustrate our framework in Fig. 2. We first feed the template and search area into the Siamese network to extract point features, respectively. Then, we employ the global and local template feature embedding to strengthen the correlation between the template and search area so that the potential target in the search area can be effectively localized. After that, we introduce a shape-aware feature learning module to learn the dense geometric features of the potential target, where the complete and dense point clouds of the target are generated. Thus, the geometric structures of the potential target can be captured better so that the potential target can be effectively distinguished from the background in the search area. Finally, we develop a voxel-to-BEV target localization network to localize the target in the search area. In order to avoid using the low-quality proposals on sparse point clouds for target center prediction, we directly regress the 3D center of the target with the highest response in the dense bird's eye view (BEV) feature map, where the dense BEV feature map is generated by voxelizing the learned dense geometric features and performing max-pooling along the $z$ axis. Thus, with the constructed dense BEV feature map, for sparse point clouds, our method can more accurately localize the target center without any proposal.
|
| 27 |
+
|
| 28 |
+
In summary, we propose a novel Siamese voxel-to-BEV tracker, which can significantly improve tracking performance, especially in sparse point clouds. We develop a Siamese shape-aware feature learning network that can introduce shape information to enhance the discrimination of the potential target in the search area. We develop a voxel-to-BEV target localization network, which can accurately detect the 3D target's center in the dense BEV space compared to sparse 3D space. Extensive results show that our method has achieved new state-of-the-art results on the KITTI dataset [20], and has a good generalization ability on the nuScenes [6] dataset.
|
| 29 |
+
|
| 30 |
+
# 2 Related Work
|
| 31 |
+
|
| 32 |
+
2D object tracking. Numerous schemes [5, 22, 34, 74, 1] have been presented and achieved impressive results in 2D object tracking. Early works are mainly based on correlation filtering. As a pioneer, MOSSE [4] presents stable correlation filters for visual tracking. After that, correlation-based methods use Circulant matrices [25], kernelized correlation filters [26], continuous convolution filters [13], factorized convolution operators [12] to improve tracking performance. In recent years, Siamese-based methods [16, 24] have been more popular in the tracking field. In [2], Bertinetto et
|
| 33 |
+
|
| 34 |
+
al. proposed SiamFC, a pioneering work that combines naive feature correlation with a fully-convolutional Siamese network for object tracking. Subsequently, some improvements [85, 68, 75, 82, 76] are made to Siamese trackers, such as combining with a region proposal network [17, 39, 80, 65] or an anchor-free FCOS detector [11], using a deeper architecture [38] or two-branch structure [23], exploiting attention [67, 84] or self-attention [7], applying triplet loss [14]. However, these methods are specially designed for 2D object tracking, so they cannot be directly applied to 3D point clouds.
|
| 35 |
+
|
| 36 |
+
3D single object tracking. Early 3D single object tracking (SOT) methods focus on RGB-D information. As a pioneer, Song et al. [59] first proposed a unified 100 RGB-D video dataset, which opened up a new research direction for RGB-D tracking [3, 43, 29]. Based on RGB-D information, 3D SOT methods [60, 45, 53] usually combines techniques from the 2D tracking with additional depth information. However, RGB-D tracking also relies on RGB information, and it may fail when the RGB information is degraded. Recent efforts [46, 69] begin to use LiDAR point clouds for 3D single object tracking. Among them, SC3D [21] is the first 3D Siamese tracker, but it is not an end-to-end framework. Following it, Re-Track [18] is a two-stage framework that re-tracks the lost objects of the coarse stage in the fine stage. Lately, Qi et al. [52] proposed P2B, which solves the problem that SC3D cannot perform end-to-end training and consumes a lot of time. P2B uses VoteNet [48] to generate proposals and selects the proposal with the highest score as the target. Based on P2B, to handle sparse and incomplete target shapes, BAT [83] introduces a box-aware feature module to enhance the correlation learning between template and search area. Nonetheless, when facing very sparse scenarios, VoteNet used in P2B and BAT may be difficult to generate high-quality proposals, resulting in performance degradation.
|
| 37 |
+
|
| 38 |
+
3D multi-object tracking. Most 3D multi-object tracking (MOT) systems follow the same schemes with the 2D multi-object tracking systems, but the only difference is that 2D detection methods are replaced by 3D detection methods. Most 3D MOT methods [73, 55, 30] usually adopt tracking-by-detection schemes. Specifically, they first use a 3D object detector [57, 58, 56] to detect numerous objects of each frame, and then exploit the data association between detection results of two frames to match the corresponding objects. To exploit the data association, early works [54] use handcrafted features such as spatial distance. Instead, modern 3D trackers use motion information that can be obtained by 3D Kalman filters [47, 10, 71] and learned deep features [78, 81].
|
| 39 |
+
|
| 40 |
+
Deep learning on point clouds. With the introduction of PointNet [49], 3D deep learning on point clouds has stimulated the interest of researchers. Existing methods can be mainly divided into: point-based [51, 40, 63, 9], volumetric-based [50, 44], graph-based [70, 36, 35, 66, 8, 27], and view-based [61, 62, 77] methods. However, volumetric-based and view-based methods lose fine-grained geometric information due to voxelization and projection, while graph-based methods are not suitable for sparse point clouds since few points cannot provide sufficient local geometric information for constructing a graph. Thus, existing 3D tracking networks [21, 18, 52, 83] are point-based methods.
|
| 41 |
+
|
| 42 |
+
# 3 Method
|
| 43 |
+
|
| 44 |
+
Our work is specifically designed for 3D single object tracking in sparse point clouds. An overview of our framework is depicted in Fig. 2. We first present the Siamese shape-aware feature learning network to enhance the discrimination of the potential target in search area (Sec. 3.1). We then localize the target by voxel-to-BEV target localization network (Sec. 3.2).
|
| 45 |
+
|
| 46 |
+
# 3.1 Siamese Shape-Aware Feature Learning Network
|
| 47 |
+
|
| 48 |
+
# 3.1.1 Template Feature Embedding
|
| 49 |
+
|
| 50 |
+
Suppose the size of the template is $N$ , and the size of the search area is $M$ (generally, $M > N$ ). Before template feature embedding, we first use the Siamese network to extract point features of the template and search area, denoted by $P = \{p_i\}_{i=1}^N$ and $Q = \{q_j\}_{j=1}^M$ . The Siamese network consists of template branch and detection branch. In order to reduce the inference time of the network, we just use PointNet++ [51] as the backbone and share parameters. It can be replaced with a powerful network such as KPCov [63]. We then employ template feature embedding to encode the search area by learning the similarity of the global shape and local geometric structures between the template and search area. The illustration of template feature embedding is shown in the left half of Fig. 3.
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Figure 2: The architecture of V2B. Given a template and search area, we first use Siamese network to obtain template and search area features. We then perform template feature embedding and shape-aware feature learning to enhance the ability to distinguish the target from the background. Finally, we perform voxel-to-BEV target localization to detect the 3D object center from the BEV.
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
|
| 57 |
+
Template global feature embedding. We use the multi-layer perceptron (MLP) network to adaptively learn the correlation between the template and search area. The similarity between the template and search area is formulated as:
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\boldsymbol {w} _ {i j} = f _ {\text {c o r r}} \left(\boldsymbol {p} _ {i}, \boldsymbol {q} _ {j}\right) = \operatorname {M L P} \left(\boldsymbol {p} _ {i} - \boldsymbol {q} _ {j}\right), \forall \boldsymbol {p} _ {i} \in P, \boldsymbol {q} _ {j} \in Q \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $\pmb{p}_i - \pmb{q}_j$ characterizes the difference between the two feature vectors and $\pmb{w}_{ij} \in \mathbb{R}^C$ is the correlation weight between two points. The global shape information of the template is given by:
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\boldsymbol {q} _ {j} ^ {\prime} = f _ {e m b} \left(\boldsymbol {q} _ {j}, \boldsymbol {p} _ {1}, \boldsymbol {p} _ {2}, \dots , \boldsymbol {p} _ {N}\right) = \operatorname {M L P} \left(\max _ {i = 1, 2, \dots , N} \left\{\boldsymbol {p} _ {i} \cdot \boldsymbol {w} _ {i j} \right\}\right), \forall \boldsymbol {q} _ {j} \in Q \tag {2}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
where MAX represents the max pooling function and $\boldsymbol{w}_{ij}$ is the correlation weight. The obtained $\boldsymbol{q}_j' \in \mathbb{R}^C$ considers the similarity between the template and search area, and characterizes the global shape information of the target through the max pooling function.
|
| 70 |
+
|
| 71 |
+
Template local feature embedding. To characterize the local similarity between the template and search area, we first obtain the similarity map by computing the cosine distance between them. The similarity function $f_{sim}$ is written as:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\boldsymbol {s} _ {i j} = f _ {\text {s i m}} \left(\boldsymbol {p} _ {i}, \boldsymbol {q} _ {j}\right) = \frac {\boldsymbol {p} _ {i} ^ {\top} \cdot \boldsymbol {q} _ {j}}{\| \boldsymbol {p} _ {i} \| _ {2} \cdot \| \boldsymbol {q} _ {j} \| _ {2}}, \forall \boldsymbol {p} _ {i} \in P, \boldsymbol {q} _ {j} \in Q \tag {3}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $s_{ij}$ indicates the similarity between points $i$ and $j$ . We then assign each point in the search area with its most similar point in the template, which is written as:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\boldsymbol {q} _ {j} ^ {\prime \prime} = \operatorname {M L P} \left(\left[ \boldsymbol {q} _ {j}, \boldsymbol {s} _ {k j}, \boldsymbol {p} _ {k}, \boldsymbol {x} _ {k} \right]\right), k = \underset {i = 1, 2, \dots , N} {\operatorname {a r g m a x}} \left\{f _ {s i m} \left(\boldsymbol {p} _ {i}, \boldsymbol {q} _ {j}\right) \right\}, \forall \boldsymbol {q} _ {j} \in Q \tag {4}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $k$ indicates the index of the maximum value of similarity, and $s_{kj}$ , $\pmb{x}_k$ are the corresponding maximum value and 3D coordinate, respectively. $[\cdot ,\cdot ,\cdot ,\cdot ]$ represents the concatenation operator. We hence obtain the embedded feature $\pmb{q}_j^{\prime \prime}\in \mathbb{R}^C$ of $j$ -th point in the search area after using MLP. Finally, we concatenate the obtained global and local feature maps to obtain an enhanced feature map $F = \{\pmb {f}_j\}_{j = 1}^M,\pmb {f}_j = \mathrm{MLP}[\pmb {q}_j',\pmb {q}_j'']$
|
| 84 |
+
|
| 85 |
+
# 3.1.2 Shape-Aware Feature Learning
|
| 86 |
+
|
| 87 |
+
Due to the sparse and incomplete point clouds of the potential target in the search area, we employ shape-aware feature learning to learn dense geometric features of the target, where the dense and complete point clouds of the target can be obtained. It is expected that the learned features from the generated dense point clouds can characterize the geometric structures of the target better.
|
| 88 |
+
|
| 89 |
+
Dense ground truth processing. To obtain the dense 3D point cloud ground truth, we first crop and center points lying inside the target's ground truth bounding box in all frames. We then concatenate all cropped and centered points to generate a dense aligned 3D point cloud, denoted by $X = \{x_{i}\}_{i=1}^{2048}$ , where $x_{i}$ is the 3D position, and we fix the number of points to 2048 by randomly discarding and duplicating points.
|
| 90 |
+
|
| 91 |
+
Shape information encoding. We depict the network structure in the right half of Fig. 3. Suppose the input point feature $\mathbf{F} \in \mathbb{R}^{M \times C}$ that has been embedded with the template information. Before generating a dense and complete point cloud of the target, we first use a gate mechanism to enhance
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
Figure 3: The architecture of template feature embedding and shape-aware feature learning.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+
the feature of the potential target and suppress the background in the search area, which is written as:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\boldsymbol {F} ^ {\prime} = \sigma (\boldsymbol {F} \boldsymbol {W} ^ {\top} + b) \circ \boldsymbol {F} \tag {5}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $\pmb{F}^{\prime}\in \mathbb{R}^{M\times C}$ is the enhanced feature map, $\sigma$ is the sigmoid function, and $\circ$ is the element-wise product. Besides, $\pmb {W}\in \mathbb{R}^{1\times C}$ is the weight to be learned. It is expected that the potential target can provide more information for generating a dense and complete point cloud of the target. Once we obtain the enhanced feature map, we execute the feature expansion operation [79] to enlarge the feature map from $M\times C$ to $2048\times C$ , i.e., the number of points increases from $M$ to 2048. Then, we capture the global shape information and local geometric structure of the potential target to generate the complete and dense 3D shape. On the one hand, we exploit the max pooling combined with fully connected layers to capture global shape information. On the other hand, we adopt EdgeConv [70] to capture local geometric information of the target. After that, we augment the local feature of each point with the global shape information, yielding a new feature map of size $2048\times 2C$ . Finally, we adopt MLP to generate 3D coordinates, denoted by $\hat{X} = \{\hat{x}_i\}_{i = 1}^{2048}$ . To train the shape generation network, we follow [21, 15] and use Chamfer distance (CD) loss to enforce the network to generate a realistic 3D point cloud. The Chamfer distance measures the similarity between the generated point cloud and dense ground truth, which is given by:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\mathcal {L} _ {\text {s h a p e}} = \sum_ {\boldsymbol {x} _ {i} \in X} \min _ {\hat {\boldsymbol {x}} _ {j} \in \hat {X}} \| \boldsymbol {x} _ {i} - \hat {\boldsymbol {x}} _ {j} \| _ {2} ^ {2} + \sum_ {\hat {\boldsymbol {x}} _ {j} \in \hat {X}} \min _ {\boldsymbol {x} _ {i} \in X} \| \boldsymbol {x} _ {i} - \hat {\boldsymbol {x}} _ {j} \| _ {2} ^ {2} \tag {6}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
By minimizing the CD loss, we can learn dense geometric features of the potential target in the search area by generating a dense and complete point cloud of the target. Note that shape information encoding is only performed during training and will be discarded during testing. Thus, it does not increase the inference time of object tracking in the test scheme.
|
| 111 |
+
|
| 112 |
+
Although SC3D [21] also uses shape completion to encode shape information of the target, the template completion model in SC3D cannot recover complex geometric structures of the potential target well due to limited templates and large variations of the potential target in the search area. However, our method is a complex point cloud generation method that learns the target completion model from the samples of search areas with the gate mechanism to enhance the feature of the potential target and suppress the background in the search area. In addition, the template completion model in SC3D only employs PointNet [49] to extract point features of sparse point clouds, while our target completion model constructs a global-local branch to extract global shape features and local geometric features of sparse point clouds.
|
| 113 |
+
|
| 114 |
+
# 3.2 Voxel-to-BEV Target Localization Network
|
| 115 |
+
|
| 116 |
+
In order to avoid using the low-quality proposals on sparse point clouds for target center prediction, we develop a simple yet effective target center localization network without any proposal to improve the localization precision in sparse point clouds.
|
| 117 |
+
|
| 118 |
+
# 3.2.1 Dense BEV Feature Map Generation
|
| 119 |
+
|
| 120 |
+
In order to improve the localization precision in sparse point clouds, we utilize the voxelization and max-pooling operation to convert the learned discriminative features of sparse 3D points into the dense bird's eye view (BEV) feature map for the target localization, as shown in the right half of Fig. 2. We first convert the point features of the search area into a volumetric representation by
|
| 121 |
+
|
| 122 |
+
averaging the 3D coordinates and features of the points in the same voxel bin. Then, we apply a stack of 3D convolutions on the voxelized feature map to aggregate the feature of the potential target in the search area, where the voxels lying on the target can be encoded with rich target information. However, in the sparse volume space, due to the large number of empty voxels, the differences between the responses in the voxelized feature map might not be remarkable. Thus, the highest response in the feature map is difficult to distinguish from the low responses, leading to the inaccurate regression of the 3D center of the target, including the $z$ -axis center. By performing max-pooling on the voxelized feature map along the $z$ -axis, we can obtain the dense BEV feature map, where the low responses in the voxelized feature map can be suppressed. Thus, compared to the voxelized feature map, we can more accurately localize the 2D center of the target with the highest response in the dense BEV feature map. The response of the 2D center (i.e., max-pooling feature along the $z$ -axis) in the BEV feature map actually contains the geometric structure information of the potential target while the responses of other points in the BEV feature map do not. In addition, we apply a stack of 2D convolutions on the dense BEV feature map to aggregate the feature so that the potential target can obtain sufficient local information in the BEV feature map. Thus, with the constructed dense BEV feature map, for sparse point clouds, our method can more accurately localize the target center without any proposal.
|
| 123 |
+
|
| 124 |
+
# 3.2.2 Target Localization in BEV
|
| 125 |
+
|
| 126 |
+
Inspired by [19], we develop a simple yet powerful network to detect the 2D center and the $z$ -axis center based on the obtained dense BEV feature map. As shown in the right half of Fig. 2, it consists of three heads: 2D-center head, offset & rotation head, and $z$ -axis head. The 2D-center head aims to localize 2D center of target on the $x-y$ plane, and $z$ -axis head regresses the target center of the $z$ -axis. Since the 2D center of 2D grid is discrete, we also regress the offset between it and the continuous center. Thus, we use an offset & rotation head to regress offset plus additional rotation.
|
| 127 |
+
|
| 128 |
+
Target center parameterization. Given the voxel size $v$ and the range of the search area $[(x_{min}, x_{max}), (y_{min}, y_{max})]$ in $x-y$ plane, we can obtain the resolution of the BEV feature map by $H = \left\lfloor \frac{x_{max} - x_{min}}{v} \right\rfloor + 1$ and $W = \left\lfloor \frac{y_{max} - y_{min}}{v} \right\rfloor + 1$ , where $\lfloor \cdot \rfloor$ is the floor operation. Assuming the 3D center $(x, y, z)$ of the target ground truth, we can compute the 2D target center $c = (c_x, c_y)$ in $x-y$ plane by $c_x = \frac{x - x_{min}}{v}$ and $c_y = \frac{y - y_{min}}{v}$ . Besides, the discrete 2D center $\tilde{c} = (\tilde{c}_x, \tilde{c}_y)$ is defined by $\tilde{c}_x = \lfloor c_x \rfloor$ and $\tilde{c}_y = \lfloor c_y \rfloor$ .
|
| 129 |
+
|
| 130 |
+
2D-center head. Following [19], we obtain the target center's ground truth $\mathcal{H} \in \mathbb{R}^{H \times W \times 1}$ . For the pixel $(i,j)$ in the 2D bounding box, if $i = \tilde{c}_x$ and $j = \tilde{c}_y$ , the $\mathcal{H}_{ij} = 1$ , otherwise $\frac{1}{d + 1}$ , where $d$ represents the Euclidean distance between the pixel $(i,j)$ and the target center $(\tilde{c}_x, \tilde{c}_y)$ . For any pixel outside the 2D bounding box, $\mathcal{H}_{ij}$ is set to 0. In the training phase, we enforce the predicted map $\hat{\mathcal{H}} \in \mathbb{R}^{H \times W \times 1}$ to approach the ground truth $\mathcal{H}$ by using Focal loss [41]. The modified Focal loss is formulated as:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\mathcal {L} _ {\text {c e n t e r}} = - \sum \mathbb {I} [ \mathcal {H} _ {i j} = 1 ] \cdot (1 - \hat {\mathcal {H}} _ {i j}) ^ {\alpha} \log (\hat {\mathcal {H}} _ {i j}) + \mathbb {I} [ \mathcal {H} _ {i j} \neq 1 ] \cdot (1 - \mathcal {H} _ {i j}) ^ {\beta} (\hat {\mathcal {H}} _ {i j}) ^ {\alpha} \log (1 - \hat {\mathcal {H}} _ {i j}) \tag {7}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $\mathbb{I}(\text{cond.})$ is the indicator function. If cond. is true, then $\mathbb{I}(\text{cond.}) = 1$ , otherwise 0. Besides, we empirically set $\alpha = 2$ and $\beta = 4$ in all experiments.
|
| 137 |
+
|
| 138 |
+
Offset & rotation head. Since the continuous 2D object center is converted into the discrete one by floor operation, we consider regressing the offset of the continuous ground truth center. To improve the accuracy of regression, we consider a square area with radius $r$ around the object center. Here, we also add rotation regression. Given a predicted map $\hat{\mathcal{O}} \in \mathbb{R}^{H \times W \times 3}$ , where 3-dim means the 2D coordinate offset plus rotation, the error of offset and rotation is expressed as:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
\mathcal {L} _ {o f f} = \sum_ {\triangle x = - r} ^ {r} \sum_ {\triangle y = - r} ^ {r} \left| \hat {\mathcal {O}} _ {\tilde {c} + (\triangle x, \triangle y)} - [ c - \tilde {c} + (\triangle x, \triangle y), \theta ] \right| \tag {8}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $\tilde{c}$ and $c$ mean the discrete and continuous position of the ground truth center, respectively. Besides, $\theta$ indicates the ground truth rotation angle and $[\cdot ,\cdot ]$ is the concatenation operation.
|
| 145 |
+
|
| 146 |
+
$z$ -axis head. We directly regress the $z$ -axis location of the target center from the BEV feature map. Given a predicted map $\hat{\mathcal{Z}} \in \mathbb{R}^{H \times W \times 1}$ , we use $L_{1}$ loss to compute the error of $z$ -axis center by:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\mathcal {L} _ {z} = \left| \hat {\mathcal {Z}} _ {\tilde {c}} - z \right| \tag {9}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
where $\tilde{c}$ is the discrete object center, and $z$ is $z$ -axis center's ground truth.
|
| 153 |
+
|
| 154 |
+
The final loss of our network is as follows: $\mathcal{L}_{total} = \lambda_1\mathcal{L}_{shape} + \lambda_2(\mathcal{L}_{center} + \mathcal{L}_{off}) + \lambda_3\mathcal{L}_z$ where $\lambda_{1},\lambda_{2}$ , and $\lambda_{3}$ are the hyperparameter for shape generation, 2D center and offset regression, and $z$ -axis position regression, respectively. In the experiment, we set $\lambda_{1} = 10^{-6}$ , $\lambda_{2} = 1.0$ , and $\lambda_{3} = 2.0$ .
|
| 155 |
+
|
| 156 |
+
# 4 Experiments
|
| 157 |
+
|
| 158 |
+
# 4.1 Experimental Settings
|
| 159 |
+
|
| 160 |
+
Datasets. For 3D single object tracking, we use KITTI [20] and nuScenes [6] datasets for training and evaluation. Since the ground truth of the test set of KITTI dataset cannot be obtained, we follow [21, 52] and use the training set to train and evaluate our method. It contains 21 video sequences and 8 types of objects. We use scenes 0-16 for training, scenes 17-18 for validation, and scenes 19-20 for testing. For nuScenes dataset, we use its validation set to evaluate the generalization ability of our method. Note that the nuScenes dataset only labels key frames, so we report the performance evaluated on the key frames.
|
| 161 |
+
|
| 162 |
+
Evaluation metrics. For 3D single object tracking, we use the Success and Precision defined in the one pass evaluation (OPE) [32] to evaluate the tracking performance of different methods. Success measures the IOU between the predicted and ground truth bounding boxes, while Precision measures the error AUC of the distance between the centers of two bounding boxes.
|
| 163 |
+
|
| 164 |
+
Implementation details. Following [52], we set the number of points $N = 512$ and $M = 1024$ for the template and search area by randomly discarding and duplicating points. For the backbone network, we use a slightly modified PointNet++ [51], which consists of three set-abstraction (SA) layers (with query radius of 0.3, 0.5, and 0.7) and three feature propagation (FP) layers. For each SA layer passed, the points will be randomly downsampled by half. For the shape generation network, we generate 2048 points. The global branch is the max pooling combined with two fully connected layers, while the local branch only uses one EdgeConv layer. We use a two layer MLP network to generate 3D coordinates. For 3D center detection, the voxel size is set to 0.3 meters in volumetric space. We stack four 3D convolutions (with stride of 2, 1, 2, 1 along the $z$ -axis) and four 2D convolutions (with stride of 2, 1, 1, 2) combined with the skip connections for feature aggregation, respectively. For all experiments, we use Adam [31] optimizer with learning rate 0.001 for training, and the learning rate decays by 0.2 every 6 epochs. It takes about 20 epochs to train our model to convergence.
|
| 165 |
+
|
| 166 |
+
Training and testing. For training, we combine the points inside the first ground truth bounding box (GTBB) and the points inside the previous GTBB plus the random offset as the template of the current frame. To generate the search area, we enlarge the current GTBB by 2 meters and plus the random offset. For testing, we fuse the points inside the first GTBB and the previous result's point cloud (if exists) as the template. Besides, we first enlarge the previous result by 2 meters in current frame, and then collect the points lying in it to generate the search area.
|
| 167 |
+
|
| 168 |
+
# 4.2 Results
|
| 169 |
+
|
| 170 |
+
Quantitative results. We compare our method with current state-of-the-art methods, including SC3D [21], P2B [52], and BAT [83]. The quantitative results are listed in Tab. 1. For the KITTI [20] dataset, we follow [21, 52] and report the performance of four categories, including car, pedestrian, van, and cyclist, and their average results. As one can see from the table, our method is significantly better than other methods on the mean results of four categories. For the car category, our method can even improve the Success from $60.5\%$ (BAT) to $70.5\%$ (V2B). For the nuScenes [6] dataset, we directly apply the models, trained on the corresponding categories of the KITTI dataset, to evaluate performance on the nuScenes dataset. Specifically, the corresponding categories between KITTI and nuScenes datasets are $\mathrm{Car} \rightarrow \mathrm{Car}$ , Pedestrian $\rightarrow$ Pedestrian, Van $\rightarrow$ Truck, and Cyclist $\rightarrow$ Bicycle, respectively. It can be seen that our V2B can still achieve better performance on the mean results of all four categories. The quantitative results on the nuScenes dataset further demonstrate that our V2B has a good generalization ability to adapt to different datasets.
|
| 171 |
+
|
| 172 |
+
Quantitative results on sparse scenes. To verify the effectiveness of our method for object tracking in sparse scenes, we count the performance of SC3D, P2B, BAT, and our V2B in sparse scenes of the
|
| 173 |
+
|
| 174 |
+
Table 1: The Success/Precision of different methods on the KITTI and nuScenes datasets. "Mean" indicates the average results of four categories.
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Dataset</td><td>Method Frame Number</td><td>Car 6424</td><td>Pedestrian 6088</td><td>Van 1248</td><td>Cyclist 308</td><td>Mean 14068</td></tr><tr><td rowspan="4">KITTI</td><td>SC3D [21]</td><td>41.3 / 57.9</td><td>18.2 / 37.8</td><td>40.4 / 47.0</td><td>41.5 / 70.4</td><td>31.2 / 48.5</td></tr><tr><td>P2B [52]</td><td>56.2 / 72.8</td><td>28.7 / 49.6</td><td>40.8 / 48.4</td><td>32.1 / 44.7</td><td>42.4 / 60.0</td></tr><tr><td>BAT [83]</td><td>60.5 / 77.7</td><td>42.1 / 70.1</td><td>52.4 / 67.0</td><td>33.7 / 45.4</td><td>51.2 / 72.8</td></tr><tr><td>V2B (ours)</td><td>70.5 / 81.3</td><td>48.3 / 73.5</td><td>50.1 / 58.0</td><td>40.8 / 49.7</td><td>58.4 / 75.2</td></tr><tr><td>Dataset</td><td>Method Frame Number</td><td>Car 15578</td><td>Pedestrian 8019</td><td>Truck 3710</td><td>Bicycle 501</td><td>Mean 27808</td></tr><tr><td rowspan="4">nuScenes</td><td>SC3D [21]</td><td>25.0 / 27.1</td><td>14.2 / 16.2</td><td>25.7 / 21.9</td><td>17.0 / 18.2</td><td>21.8 / 23.1</td></tr><tr><td>P2B [52]</td><td>27.0 / 29.2</td><td>15.9 / 22.0</td><td>21.5 / 16.2</td><td>20.0 / 26.4</td><td>22.9 / 25.3</td></tr><tr><td>BAT [83]</td><td>22.5 / 24.1</td><td>17.3 / 24.5</td><td>19.3 / 15.8</td><td>17.0 / 18.8</td><td>20.5 / 23.0</td></tr><tr><td>V2B (ours)</td><td>31.3 / 35.1</td><td>17.3 / 23.4</td><td>21.7 / 16.7</td><td>22.2 / 19.1</td><td>25.8 / 29.0</td></tr></table>
|
| 177 |
+
|
| 178 |
+
KITTI dataset. Specifically, we filter out sparse scenes for evaluation according to the number of points lying in the target bounding boxes in the test set. Specifically, the conditions for the sparse scenes are: $\leq 150$ (car), $\leq 100$ (pedestrian), $\leq 150$ (van), and $\leq 100$ (cyclist), respectively. For the four categories, the number of selected frames are 3293 (car), 1654 (pedestrian), 734 (van), and 59 (cyclist), respectively. In Tab. 2, we report the results of Success and Precision. As one can see from the table, our V2B achieves the best performance on the mean results of all four categories. Note that when switching from sparse frames (Tab. 2) to all types of frames (Tab. 1), SC3D and P2B suffer from a performance drop on the mean results of four categories. The worse tracking performance of SC3D and P2B on large amounts of sparse frames leads to the inaccurate template updates on the consecutive dense frames. Thus, SC3D and P2B cannot obtain better tracking performance on the dense frames. Although SC3D uses template shape completion, due to limited template samples and large variations of the potential target in the search area, it cannot accurately recover the complex geometric structures of the target in the sparse frames, which poses challenges on localizing the potential target with sparse points. On the contrary, our V2B employs the proposed shape-aware feature learning module to generate dense and complete point clouds of the potential target for the target shape completion, leading to more accurate localization of the target in the sparse frames. Compared with BAT, our V2B achieves the performance gain of $2\%$ on the mean results of all four categories from sparse frames to all types of frames. Therefore, the comparison results can demonstrate that our V2B can effectively improve the performance of single object tracking in sparse point clouds.
|
| 179 |
+
|
| 180 |
+
Table 2: Comparison of Success/Precision of different methods on the sparse scenarios.
|
| 181 |
+
|
| 182 |
+
<table><tr><td>Method</td><td>Car</td><td>Pedestrian</td><td>Van</td><td>Cyclist</td><td>Mean</td></tr><tr><td>Frame Number</td><td>3293</td><td>1654</td><td>734</td><td>59</td><td>5740</td></tr><tr><td>SC3D [21]</td><td>37.9 / 53.0</td><td>20.1 / 42.0</td><td>36.2 / 48.7</td><td>50.2 / 69.2</td><td>32.7 / 49.4</td></tr><tr><td>P2B [52]</td><td>56.0 / 70.6</td><td>33.1 / 58.2</td><td>41.1 / 46.3</td><td>24.1 / 28.3</td><td>47.2 / 63.5</td></tr><tr><td>BAT [83]</td><td>60.7 / 75.5</td><td>48.3 / 77.1</td><td>41.5 / 47.4</td><td>25.3 / 30.5</td><td>54.3 / 71.9</td></tr><tr><td>V2B (ours)</td><td>64.7 / 77.4</td><td>50.8 / 74.2</td><td>46.8 / 55.1</td><td>30.4 / 37.2</td><td>58.0 / 73.2</td></tr></table>
|
| 183 |
+
|
| 184 |
+
Visualization results. As shown in Fig. 4, we plot the visualization results of P2B and our V2B on the car category. Specifically, we plot a couple sparse and dense scenarios on the car category of the KITTI dataset. It can be clearly seen from the figure that compared with P2B, our V2B can track the targets more accurately in both sparse and dense scenes. Especially in sparse scenes, compared with P2B, our V2B can track the targets effectively. The visualization results can demonstrate the effectiveness of our V2B for sparse point clouds.
|
| 185 |
+
|
| 186 |
+
# 4.3 Ablation Study
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 4: Visualization results of sparse (the first-row sequence) and dense (the second-row sequence) scenes on the car category. The green boxes are ground truth bounding boxes. The red boxes are the objects tracked by our V2B, while the blue boxes are the objects tracked by P2B. In addition, we mark the points of cars to red for better identification.
|
| 190 |
+
|
| 191 |
+
Template feature embedding. We study the impact of template feature embedding on tracking performance. As shown in Tab. 3, we report the results of the car category in the KITTI dataset. It can be seen that without using the template feature embedding (dubbed "w/o template feature"), the performance will be greatly reduced from $70.5 / 81.3$ to $63.9 / 73.9$ by a large margin. In addition, only using the local branch or global branch cannot achieve the best performance. Since template feature embedding builds the relationship between the template and search area, it will contribute to identify the potential target from the background
|
| 192 |
+
|
| 193 |
+
in the search area. Therefore, when the template feature embedding is absent, the performance will be greatly reduced, which further demonstrates the effectiveness of the proposed template feature embedding for improving tracking performance.
|
| 194 |
+
|
| 195 |
+
Shape-aware feature learning. For sparse point clouds, we further introduce shape-aware feature learning to enhance the ability to distinguish the potential target from the background in the search area. As shown in Tab. 3, we conduct experiments to demonstrate the effectiveness of the shape information. It can be seen from the table that without using shape-aware feature learning module (dubbed "w/o shape information"), the performance will reduce from 70.5 / 81.3 to 67.6 / 78.2. In addition, only using the local geometric branch or global shape branch cannot achieve the best performance. Since the shape generation network can capture 3D shape information of the object to learn the discriminative features of potential target so that it can be identified from the search area.
|
| 196 |
+
|
| 197 |
+
Voxel-to-BEV target localization. Different from SC3D [21] and P2B [52], our V2B adopts another route to localize potential target in object tracking. SC3D performs matching between the template and the exhaustive candidate 3D proposals to select the most similar proposal as the target. P2B and BAT use VoteNet [48] to generate 3D target proposals, and select the proposal with the highest score as the target. However, when facing sparse point clouds, it is hard to generate high-quality
|
| 198 |
+
|
| 199 |
+
Table 4: Comparison of different detection schemes on the sparse scenarios.
|
| 200 |
+
|
| 201 |
+
<table><tr><td>Module</td><td>VoteNet</td><td>Voxel-to-BEV</td></tr><tr><td>Car</td><td>56.9 / 72.0</td><td>64.7 / 77.4</td></tr><tr><td>Pedestrian</td><td>35.3 / 62.1</td><td>50.8 / 74.2</td></tr><tr><td>Van</td><td>30.7 / 39.0</td><td>46.8 / 55.1</td></tr><tr><td>Cyclist</td><td>23.9 / 30.0</td><td>30.4 / 37.2</td></tr></table>
|
| 202 |
+
|
| 203 |
+
proposals, so these methods may not be able to track the object effectively. Our V2B is an anchor-free method that does not require generating numerous 3D proposals. Therefore, our method can overcome the above concern. In order to prove this, we use VoteNet instead of voxel-to-BEV target localization to conduct experiments in the KITTI dataset. In Tab. 4, we report the results of different detection methods in the sparse scenarios. Likewise, we filter out sparse scenes in the test set for
|
| 204 |
+
|
| 205 |
+
evaluation according to the number of points (refer to the setting of Tab. 2). It can be found that the results of VoteNet are lower than that of voxel-to-BEV target localization, which further demonstrates the effectiveness of our method in sparse point clouds.
|
| 206 |
+
|
| 207 |
+
Different voxel sizes. We compress the voxelized point cloud into a BEV feature map for subsequent target center detection. Since the scope of object tracking is a large area, the size of voxel will affect the size of BEV feature map, thereby affecting the tracking performance. We hence study the impact of different voxel sizes on the tracking performance. Specifically, we consider four sizes, including 0.1, 0.2, 0.3, and 0.4 meters. The Success/Precision results of the four sizes are 52.5 / 62.2 (0.1m), 68.1 / 79.4 (0.2m), 70.5 / 81.3 (0.3m), and 69.1 / 80.0 (0.4m), respectively. When the voxel size is set to 0.3 meters, we achieve the best performance. A larger voxel size will increase the sparsity and cause the loss of the target's details. A smaller voxel size will increase the size of the BEV feature map, thereby increasing the difficulty of detecting the center.
|
| 208 |
+
|
| 209 |
+
Template generation scheme. Following [21, 52, 83], we study the impact of different template generation schemes on tracking performance. As shown in Tab. 5, we report the results of four schemes on the car category in the KITTI dataset. It can be seen from the table that our V2B outperforms SC3D, P2B, and BAT in all schemes by a large margin. Compared with these methods, our V2B can yield stable results on four template generation schemes, which further demonstrates that our method can consistently generate accurate tracking results in all types of frames.
|
| 210 |
+
|
| 211 |
+
Table 5: The results of different template generation schemes of different methods in the car category.
|
| 212 |
+
|
| 213 |
+
<table><tr><td>Scheme</td><td>SC3D [21]</td><td>P2B [52]</td><td>BAT [83]</td><td>V2B (ours)</td></tr><tr><td>The First GT</td><td>31.6 / 44.4</td><td>46.7 / 59.7</td><td>51.8 / 65.5</td><td>67.8 / 79.3</td></tr><tr><td>Previous result</td><td>25.7 / 35.1</td><td>53.1 / 68.9</td><td>59.2 / 75.6</td><td>70.0 / 81.3</td></tr><tr><td>The First GT & Previous result</td><td>34.9 / 49.8</td><td>56.2 / 72.8</td><td>60.5 / 77.7</td><td>70.5 / 81.3</td></tr><tr><td>All previous results</td><td>41.3 / 57.9</td><td>51.4 / 66.8</td><td>55.8 / 71.4</td><td>69.8 / 81.2</td></tr></table>
|
| 214 |
+
|
| 215 |
+
# 5 Conclusion
|
| 216 |
+
|
| 217 |
+
In this paper, we proposed a Siamese voxel-to-BEV (V2B) tracker for 3D single object tracking on sparse point clouds. In order to learn the dense geometric features of the potential target in the search area, we developed a Siamese shape-aware feature learning network that utilizes the target completion model to generate the dense and complete targets. In order to avoid using the low-quality proposals on sparse point clouds for target center prediction, we developed a simple yet effective voxel-to-BEV target localization network that can directly regress the center of the potential target from the dense BEV feature map without any proposal. Rich experiments on the KITTI and nuScenes datasets have demonstrated the effectiveness of our method on sparse point clouds.
|
| 218 |
+
|
| 219 |
+
# Acknowledgments
|
| 220 |
+
|
| 221 |
+
This work was supported by the National Science Fund of China (Grant Nos. U1713208, 61876084).
|
| 222 |
+
|
| 223 |
+
# References
|
| 224 |
+
|
| 225 |
+
[1] Luca Bertinetto, Jack Valmadre, Stuart Golodetz, Ondrej Miksik, and Philip HS Torr. Staple: Complementary learners for real-time tracking. In CVPR, 2016.
|
| 226 |
+
[2] Luca Bertinetto, Jack Valmadre, Joao F Henriques, Andrea Vedaldi, and Philip HS Torr. Fully-convolutional Siamese networks for object tracking. In ECCV, 2016.
|
| 227 |
+
[3] Adel Bibi, Tianzhu Zhang, and Bernard Ghanem. 3D part-based sparse tracker with automatic synchronization and registration. In CVPR, 2016.
|
| 228 |
+
[4] David S Bolme, J Ross Beveridge, Bruce A Draper, and Yui Man Lui. Visual object tracking using adaptive correlation filters. In CVPR, 2010.
|
| 229 |
+
[5] Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard Säckinger, and Roopak Shah. Signature verification using a "Siamese" time delay neural network. In NeurIPS, 1993.
|
| 230 |
+
|
| 231 |
+
[6] Holger Caesar, Varun Bankiti, Alex H. Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuScenes: A multimodal dataset for autonomous driving. arXiv preprint arXiv:1903.11027, 2019.
|
| 232 |
+
[7] Xin Chen, Bin Yan, Jiawen Zhu, Dong Wang, Xiaoyun Yang, and Huchuan Lu. Transformer tracking. In CVPR, 2021.
|
| 233 |
+
[8] Mingmei Cheng, Le Hui, Jin Xie, and Jian Yang. SSPC-Net: Semi-supervised semantic 3D point cloud segmentation network. In AAAI, 2021.
|
| 234 |
+
[9] Mingmei Cheng, Le Hui, Jin Xie, Jian Yang, and Hui Kong. Cascaded non-local neural network for point cloud semantic segmentation. In IROS, 2020.
|
| 235 |
+
[10] Hsu-kuang Chiu, Antonio Prioletti, Jie Li, and Jeannette Bohg. Probabilistic 3D multi-object tracking for autonomous driving. arXiv preprint arXiv:2001.05673, 2020.
|
| 236 |
+
[11] Janghoon Choi, Junseok Kwon, and Kyoung Mu Lee. Visual tracking by tridentalign and context embedding. In ACCV, 2020.
|
| 237 |
+
[12] Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, and Michael Felsberg. ECO: Efficient convolution operators for tracking. In CVPR, 2017.
|
| 238 |
+
[13] Martin Danelljan, Andreas Robinson, Fahad Shahbaz Khan, and Michael Felsberg. Beyond correlation filters: Learning continuous convolution operators for visual tracking. In ECCV, 2016.
|
| 239 |
+
[14] Xingping Dong and Jianbing Shen. Triplet loss in Siamese network for object tracking. In ECCV, 2018.
|
| 240 |
+
[15] Haoqiang Fan, Hao Su, and Leonidas J Guibas. A point set generation network for 3D object reconstruction from a single image. In CVPR, 2017.
|
| 241 |
+
[16] Heng Fan and Haibin Ling. Siamese cascaded region proposal networks for real-time visual tracking. In CVPR, 2019.
|
| 242 |
+
[17] Christoph Feichtenhofer, A. Pinz, and Andrew Zisserman. Detect to track and track to detect. In ICCV, 2017.
|
| 243 |
+
[18] Tuo Feng, Licheng Jiao, Hao Zhu, and Long Sun. A novel object re-track framework for 3D point clouds. In ACM MM, 2020.
|
| 244 |
+
[19] Runzhou Ge, Zhuangzhuang Ding, Y. Hu, Yu Wang, Sijia Chen, L. Huang, and Y. Li. AFDet: Anchor free one stage 3D object detection. arXiv preprint arXiv: 2006.12671, 2020.
|
| 245 |
+
[20] Andreas Geiger, Philip Lenz, and R. Urtasun. Are we ready for autonomous driving? the KITTI vision benchmark suite. In CVPR, 2012.
|
| 246 |
+
[21] Silvio Giancola, Jesus Zarzar, and Bernard Ghanem. Leveraging shape completion for 3D Siamese tracking. In CVPR, 2019.
|
| 247 |
+
[22] Neil Gordon, B Ristic, and S Arulampalam. Beyond the Kalman filter: Particle filters for tracking applications. *Artech House*, London, 830(5):1-4, 2004.
|
| 248 |
+
[23] Anfeng He, Chong Luo, Xinmei Tian, and Wenjun Zeng. A twofold Siamese network for real-time object tracking. In CVPR, 2018.
|
| 249 |
+
[24] David Held, Sebastian Thrun, and Silvio Savarese. Learning to track at 100 fps with deep regression networks. In ECCV, 2016.
|
| 250 |
+
[25] Joao F Henriques, Rui Caseiro, Pedro Martins, and Jorge Batista. Exploiting the circulant structure of tracking-by-detection with kernels. In ECCV, 2012.
|
| 251 |
+
[26] João F Henriques, Rui Caseiro, Pedro Martins, and Jorge Batista. High-speed tracking with kernelized correlation filters. IEEE Transactions on Pattern Analysis and Machine Intelligence, 37(3):583-596, 2014.
|
| 252 |
+
[27] Le Hui, Jia Yuan, Mingmei Cheng, Jin Xie, Xiaoya Zhang, and Jian Yang. Superpoint network for point cloud oversegmentation. In ICCV, 2021.
|
| 253 |
+
[28] Ugur Kart, J. Kämäräinen, and Jiri Matas. How to make an rgbd tracker? In ECCV Workshops, 2018.
|
| 254 |
+
[29] Ugur Kart, A. Lukezic, M. Kristan, J. Kämäräinen, and Jiri Matas. Object tracking by reconstruction with view-specific discriminative correlation filters. In CVPR, 2019.
|
| 255 |
+
|
| 256 |
+
[30] Aleksandr Kim, Aljosa Osep, and Laura Leal-Taixe. EagerMOT: 3D multi-object tracking via sensor fusion. arXiv preprint arXiv:2104.14682, 2021.
|
| 257 |
+
[31] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv: 1412.6980.
|
| 258 |
+
[32] M. Kristan, Jiri Matas, A. Leonardis, Tomás Vojír, R. Pflugfelder, G. Fernández, G. Nebehay, F. Porikli, and Luka Cehovin. A novel performance evaluation methodology for single-target trackers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38:2137-2155, 2016.
|
| 259 |
+
[33] Matej Kristan, Jiri Matas, Ales Leonardis, Michael Felsberg, Luka Cehovin, Gustavo Fernandez, Tomas Vojir, Gustav Hager, Georg Nebehay, and Roman Pflugfelder. The visual object tracking vot2015 challenge results. In ICCV workshops, 2015.
|
| 260 |
+
[34] Matej Kristan, Jiri Matas, Aleš Leonardis, Tomáš Vojíř, Roman Pflugfelder, Gustavo Fernández, Georg Nebehay, Fatih Porikli, and Luka Čehovin. A novel performance evaluation methodology for single-target trackers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(11):2137–2155, 2016.
|
| 261 |
+
[35] Loic Landrieu and Mohamed Boussaha. Point cloud oversegmentation with graph-structured deep metric learning. In CVPR, 2019.
|
| 262 |
+
[36] Loic Landrieu and Martin Simonovsky. Large-scale point cloud semantic segmentation with superpoint graphs. In CVPR, 2018.
|
| 263 |
+
[37] K. Lebeda, S. Hadfield, and R. Bowden. 2D or not 2D: Bridging the gap between tracking and structure from motion. In ACCV, 2014.
|
| 264 |
+
[38] Bo Li, Wei Wu, Qiang Wang, Fangyi Zhang, Junliang Xing, and Junjie Yan. SiamRPN++: Evolution of Siamese visual tracking with very deep networks. In CVPR, 2019.
|
| 265 |
+
[39] Bo Li, Junjie Yan, Wei Wu, Zheng Zhu, and Xiaolin Hu. High performance visual tracking with Siamese region proposal network. In CVPR, 2018.
|
| 266 |
+
[40] Yangyan Li, Rui Bu, Mingchao Sun, Wei Wu, Xinhan Di, and Baoquan Chen. PointCNN: Convolution on mathcal{X}-transformed points. In NeurIPS, 2018.
|
| 267 |
+
[41] Tsung-Yi Lin, Priyal Goyal, Ross B. Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42:318-327, 2020.
|
| 268 |
+
[42] Y. Liu, X. Jing, Jianhui Nie, Hao Gao, Jun Liu, and Guo-Ping Jiang. Context-aware three-dimensional mean-shift with occlusion handling for robust object tracking in rgb-d videos. IEEE Transactions on Multimedia, 21:664-677, 2019.
|
| 269 |
+
[43] Ye Liu, Xiao-Yuan Jing, Jianhui Nie, Hao Gao, Jun Liu, and Guo-Ping Jiang. Context-aware three-dimensional mean-shift with occlusion handling for robust object tracking in RGB-D videos. IEEE Transactions on Multimedia, 21(3):664-677, 2018.
|
| 270 |
+
[44] Zhijian Liu, Haotian Tang, Yujun Lin, and Song Han. Point-Voxel cnn for efficient 3D deep learning. In NeurIPS, 2019.
|
| 271 |
+
[45] Matthias Luber, Luciano Spinello, and Kai O Arras. People tracking in RGB-D data with on-line boosted target models. In IROS, 2011.
|
| 272 |
+
[46] Wenjie Luo, Bin Yang, and Raquel Urtasun. Fast and furious: Real time end-to-end 3D detection, tracking and motion forecasting with a single convolutional net. In CVPR, 2018.
|
| 273 |
+
[47] Abhishek Patil, Srikanth Malla, Haiming Gang, and Yi-Ting Chen. The H3D dataset for full-surround 3D multi-object detection and tracking in crowded urban scenes. In ICRA, 2019.
|
| 274 |
+
[48] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3D object detection in point clouds. In ICCV, 2019.
|
| 275 |
+
[49] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. PointNet: Deep learning on point sets for 3D classification and segmentation. In CVPR, 2017.
|
| 276 |
+
[50] Charles R Qi, Hao Su, Matthias Niebner, Angela Dai, Mengyuan Yan, and Leonidas J Guibas. Volumetric and multi-view cnns for object classification on 3D data. In CVPR, 2016.
|
| 277 |
+
[51] Charles R Qi, Li Yi, Hao Su, and Leonidas J Guibas. PointNet++: Deep hierarchical feature learning on point sets in a metric space. In NeurIPS, 2017.
|
| 278 |
+
|
| 279 |
+
[52] Haozhe Qi, C. Feng, Zhiguo Cao, F. Zhao, and Yang Xiao. P2B: Point-to-box network for 3D object tracking in point clouds. In CVPR, 2020.
|
| 280 |
+
[53] Martin Rünz and L. Agapito. Co-fusion: Real-time segmentation, tracking and fusion of multiple objects. In ICRA, 2017.
|
| 281 |
+
[54] Samuel Scheidegger, Joachim Benjaminsson, Emil Rosenberg, Amrit Krishnan, and Karl Granström. Mono-camera 3D multi-object tracking using deep learning detections and PBM filtering. In IV, 2018.
|
| 282 |
+
[55] Abhijeet Shenoi, Mihir Patel, JunYoung Gwak, Patrick Goebel, Amir Sadeghian, Hamid Rezatofighi, Roberto Martin-Martin, and Silvio Savarese. JRMOT: A real-time 3D multi-object tracker and a new large-scale dataset. In IROS, 2020.
|
| 283 |
+
[56] Shaoshuai Shi, Chaoxu Guo, Li Jiang, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. PV-RCNN: Point-voxel feature set abstraction for 3D object detection. In CVPR, 2020.
|
| 284 |
+
[57] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. PointRCNN: 3D object proposal generation and detection from point cloud. In CVPR, 2019.
|
| 285 |
+
[58] Shaoshuai Shi, Zhe Wang, Jianping Shi, Xiaogang Wang, and Hongsheng Li. From points to parts: 3D object detection from point cloud with part-aware and part-aggregation network. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.
|
| 286 |
+
[59] Shuran Song and Jianxiong Xiao. Tracking revisited using RGBD camera: Unified benchmark and baselines. In ICCV, 2013.
|
| 287 |
+
[60] Luciano Spinello, Kai Arras, Rudolph Triebel, and Roland Siegwart. A layered approach to people detection in 3D range data. In AAAI, 2010.
|
| 288 |
+
[61] Hang Su, Subhransu Maji, Evangelos Kalogerakis, and Erik Learned-Miller. Multi-view convolutional neural networks for 3D shape recognition. In ICCV, 2015.
|
| 289 |
+
[62] Hao Su, Fan Wang, Eric Yi, and Leonidas J Guibas. 3D-assisted feature synthesis for novel views of an object. In ICCV, 2015.
|
| 290 |
+
[63] Hugues Thomas, Charles R Qi, Jean-Emmanuel Deschaud, Beatriz Marcotegui, François Goulette, and Leonidas J Guibas. KPCov: Flexible and deformable convolution for point clouds. In ICCV, 2019.
|
| 291 |
+
[64] Jack Valmadre, Luca Bertinetto, Joao Henriques, Andrea Vedaldi, and Philip HS Torr. End-to-end representation learning for correlation filter based tracking. In CVPR, 2017.
|
| 292 |
+
[65] P. Voigtlaender, Jonathon Luiten, P. Torr, and B. Leibe. Siam R-CNN: Visual tracking by re-detection. In CVPR, 2020.
|
| 293 |
+
[66] Lei Wang, Yuchun Huang, Yaolin Hou, Shenman Zhang, and Jie Shan. Graph attention convolution for point cloud semantic segmentation. In CVPR, 2019.
|
| 294 |
+
[67] Qiang Wang, Zhu Teng, Junliang Xing, Jin Gao, Weiming Hu, and Stephen Maybank. Learning attentions: residual attentional Siamese network for high performance online visual tracking. In CVPR, 2018.
|
| 295 |
+
[68] Qiang Wang, Li Zhang, Luca Bertinetto, Weiming Hu, and Philip HS Torr. Fast online object tracking and segmentation: A unifying approach. In CVPR, 2019.
|
| 296 |
+
[69] Sukai Wang, Yuxiang Sun, Chengju Liu, and Ming Liu. PointTrackNet: An end-to-end network for 3D object detection and tracking from point clouds. IEEE Robotics and Automation Letters, 5(2):3206-3212, 2020.
|
| 297 |
+
[70] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. arXiv preprint arXiv:1801.07829, 2018.
|
| 298 |
+
[71] Xinshuo Weng, Jianren Wang, David Held, and Kris Kitani. 3D multi-object tracking: A baseline and new evaluation metrics. In IROS, 2020.
|
| 299 |
+
[72] T. Whelan, Renato F. Salas-Moreno, B. Glocker, A. Davison, and Stefan Leutenegger. Elasticfusion: Real-time dense slam and light source estimation. The International Journal of Robotics Research, 35:1697-1716, 2016.
|
| 300 |
+
[73] Hai Wu, Wenkai Han, Chenglu Wen, Xin Li, and Cheng Wang. 3D multi-object tracking in point clouds based on prediction confidence-guided data association. IEEE Transactions on Intelligent Transportation Systems, 2021.
|
| 301 |
+
|
| 302 |
+
[74] Yi Wu, Jongwoo Lim, and Ming-Hsuan Yang. Online object tracking: A benchmark. In CVPR, 2013.
|
| 303 |
+
[75] Yinda Xu, Zeyu Wang, Zuoxin Li, Ye Yuan, and Gang Yu. SiamFC++: Towards robust and accurate visual tracking with target estimation guidelines. In AAAI, 2020.
|
| 304 |
+
[76] Bin Yan, Xinyu Zhang, Dong Wang, Huchuan Lu, and Xiaoyun Yang. Alpha-refine: Boosting tracking performance by precise bounding box estimation. In CVPR, 2021.
|
| 305 |
+
[77] Ze Yang and Liwei Wang. Learning relationships for multi-view 3D object recognition. In ICCV, 2019.
|
| 306 |
+
[78] Tianwei Yin, Xingyi Zhou, and Philipp Krahenbuhl. Center-based 3D object detection and tracking. In CVPR, 2021.
|
| 307 |
+
[79] Lequan Yu, Xianzhi Li, Chi-Wing Fu, Daniel Cohen-Or, and Pheng-Ann Heng. PU-Net: Point cloud upsampling network. In CVPR, 2018.
|
| 308 |
+
[80] Yuechen Yu, Yilei Xiong, Weilin Huang, and M. Scott. Deformable Siamese attention networks for visual object tracking. In CVPR, 2020.
|
| 309 |
+
[81] Wenwei Zhang, Hui Zhou, Shuyang Sun, Zhe Wang, Jianping Shi, and Chen Change Loy. Robust multi-modality multi-object tracking. In CVPR, 2019.
|
| 310 |
+
[82] Zhipeng Zhang, Houwen Peng, Jianlong Fu, Bing Li, and Weiming Hu. Ocean: Object-aware anchor-free tracking. arXiv preprint arXiv:2006.10721, 2020.
|
| 311 |
+
[83] Chaoda Zheng, Xu Yan, Jiantao Gao, Weibing Zhao, Wei Zhang, Zhen Li, and Shuguang Cui. Box-aware feature enhancement for single object tracking on point clouds. In ICCV, 2021.
|
| 312 |
+
[84] Wenzhang Zhou, Longyin Wen, L. Zhang, Dawei Du, T. Luo, and Y. Wu. SiamMan: Siamese motion-aware network for visual tracking. arXiv preprint arXiv:1912.05515, 2019.
|
| 313 |
+
[85] Zheng Zhu, Qiang Wang, Bo Li, Wei Wu, Junjie Yan, and Weiming Hu. Distractor-aware Siamese networks for visual object tracking. In ECCV, 2018.
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9be6e112b2937f76386ce5501005978e11e3f4770ec2f630b2668f329e9e4f0
|
| 3 |
+
size 444245
|
3dsiamesevoxeltobevtrackerforsparsepointclouds/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c13fe15bb7df0fb05c794e3ba836f29c1facdf092b649abded0a63a023f503b
|
| 3 |
+
size 425989
|
a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e2126db78acb8097e1e666199872cdcb68c7875ffdff5b0ec3dfe657de867cd
|
| 3 |
+
size 61568
|
a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e28732b12ffaa3b0c1013e3bde0b94cdbed7cc00cf2e5b58cd52595113201123
|
| 3 |
+
size 74541
|
a3dgenerativemodelforstructurebaseddrugdesign/c62c4fbc-171b-4007-927c-6f337110ede9_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:87a6b65167a02b3eedc731693d6cb19ecd84270fe1c95586fda879bb76a3dd49
|
| 3 |
+
size 3189248
|
a3dgenerativemodelforstructurebaseddrugdesign/full.md
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A 3D Generative Model for Structure-Based Drug Design
|
| 2 |
+
|
| 3 |
+
Shitong Luo
|
| 4 |
+
|
| 5 |
+
HeliXon Research luost@helixon.com luost26@gmail.com
|
| 6 |
+
|
| 7 |
+
Jianzhu Ma
|
| 8 |
+
Peking University
|
| 9 |
+
majianzhu@pku.edu.cn
|
| 10 |
+
|
| 11 |
+
Jiaqi Guan
|
| 12 |
+
University of Illinois Urbana-Champaign
|
| 13 |
+
jiaqi@illinois.edu
|
| 14 |
+
|
| 15 |
+
Jian Peng
|
| 16 |
+
University of Illinois Urbana-Champaign
|
| 17 |
+
jianpeng@illinois.edu
|
| 18 |
+
|
| 19 |
+
# Abstract
|
| 20 |
+
|
| 21 |
+
We study a fundamental problem in structure-based drug design — generating molecules that bind to specific protein binding sites. While we have witnessed the great success of deep generative models in drug design, the existing methods are mostly string-based or graph-based. They are limited by the lack of spatial information and thus unable to be applied to structure-based design tasks. Particularly, such models have no or little knowledge of how molecules interact with their target proteins exactly in 3D space. In this paper, we propose a 3D generative model that generates molecules given a designated 3D protein binding site. Specifically, given a binding site as the 3D context, our model estimates the probability density of atom's occurrences in 3D space — positions that are more likely to have atoms will be assigned higher probability. To generate 3D molecules, we propose an auto-regressive sampling scheme — atoms are sampled sequentially from the learned distribution until there is no room for new atoms. Combined with this sampling scheme, our model can generate valid and diverse molecules, which could be applicable to various structure-based molecular design tasks such as molecule sampling and linker design. Experimental results demonstrate that molecules sampled from our model exhibit high binding affinity to specific targets and good drug properties such as drug-likeness even if the model is not explicitly optimized for them.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Designing molecules that bind to a specific protein binding site, also known as structure-based drug design, is one of the most challenging tasks in drug discovery [2]. Searching for suitable molecule candidates in silico usually involves massive computational efforts because of the enormous space of synthetically feasible chemicals [22] and conformational degree of freedom of both compound and protein structures [11].
|
| 26 |
+
|
| 27 |
+
In recent years, we have witnessed the success of machine learning approaches to problems in drug design, especially on molecule generation. Most of these approaches use deep generative models to propose drug candidates by learning the underlying distribution of desirable molecules. However, most of such methods are generally SMILES/string-based [10, 17] or graph-based [18, 19, 13, 14]. They are limited by the lack of spatial information and unable to perceive how molecules interact with proteins in 3D space. Hence, these methods are not applicable to generating molecules that fit to a specific protein structure which is also known as the drug target. Another line of work studies
|
| 28 |
+
|
| 29 |
+
generating molecules directly in 3D space [8, 28, 29, 20, 30, 15]. Most of them [8, 28, 29] can only handle very small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms. [20] proposes to generate voxelized molecular images and use a post-processing algorithm to reconstruct molecular structures. Though this method could produce drug-scale molecules for specific protein pockets, the quality of the sampling is heavily limited by voxelization. Therefore, generating high-quality drug molecules for specific 3D protein binding sites remains challenging.
|
| 30 |
+
|
| 31 |
+
In this work, we propose a 3D generative model to approach this task. Specifically, we aim at modeling the distribution of atom occurrence in the 3D space of the binding site. Formally, given a binding site $\mathcal{C}$ as input, we model the distribution $p(e,r|\mathcal{C})$ , where $\boldsymbol{r} \in \mathbb{R}^3$ is an arbitrary 3D coordinate and $e$ is atom type. To realize this distribution, we design a neural network architecture which takes as input a query 3D coordinate $\boldsymbol{r}$ , conditional on the 3D context $\mathcal{C}$ , and outputs the probability of $\boldsymbol{r}$ being occupied by an atom of a particular chemical element. In order to ensure the distribution is equivariant to $\mathcal{C}$ 's rotation and translation, we utilize rotationally invariant graph neural networks to perceive the context of each query coordinate.
|
| 32 |
+
|
| 33 |
+
Despite having a neural network to model the distribution of atom occurrence $p(e, \boldsymbol{r} | \mathcal{C})$ , how to generate valid and diverse molecules still remains technically challenging, mainly for the following two reasons: First, simply drawing i.i.d. samples from the distribution $p(e, \boldsymbol{r} | \mathcal{C})$ does not yield valid molecules because atoms within a molecule are not independent of each other. Second, a desirable sampling algorithm should capture the multi-modality of the feasible chemical space, i.e. it should be able to generate a diverse set of desired molecules given a specific binding context. To tackle the challenge, we propose an auto-regressive sampling algorithm. In specific, we start with a context consisting of only protein atoms. Then, we iteratively sample one atom from the distribution at each step and add it to the context to be used in the next step, until there is no room for new atoms. Compared to other recent methods [20, 23], our auto-regressive algorithm is simpler and more advantageous. It does not rely on post-processing algorithms to infer atom placements from density. More importantly, it is capable of multi-modal sampling by the nature of auto-regressive, avoiding additional latent variables via VAEs [16] or GANs [9] which would bring about extra architectural complexity and training difficulty.
|
| 34 |
+
|
| 35 |
+
We conduct extensive experiments to evaluate our approach. Quantitative and qualitative results show that: (1) our method is able to generate diverse drug-like molecules that have high binding affinity to specific targets based on 3D structures of protein binding sites; (2) our method is able to generate molecules with fairly high drug-likeness score (QED) [4] and synthetic accessibility score (SA) [6] even if the model is not specifically optimized for them; (3) in addition to molecule generation, the proposed method is also applicable to other relevant tasks such as linker design.
|
| 36 |
+
|
| 37 |
+
# 2 Related Work
|
| 38 |
+
|
| 39 |
+
SMILES-Based and Graph-Based Molecule Generation Deep generative models have been prevalent in molecule design. The overall idea is to use deep generative models to propose molecule candidates by learning the underlying distribution of desirable molecules. Existing works can be roughly divided into two classes — string-based and graph-based. String-based methods represent molecules as linear strings, e.g. SMILES strings [34], making a wide range of language modeling tools readily applicable. For example, [5, 10, 26] utilize recurrent neural networks to learn a language model of SMILES strings. However, string-based representations fail to capture molecular similarities, making it a sub-optimal representation for molecules [13]. In contrast, graph representations are more natural, and graph-based approaches have drawn great attention. The majority of graph-based models generate molecules in an auto-regressive fashion, i.e., adding atoms or fragments sequentially, which could be implemented based upon VAEs [13], normalizing flows [27], reinforcement learning [35, 14], etc. Despite the progress made in string-based and graph-based approaches, they are limited by the lack of spatial information and thus unable to be directly applied to structure-based drug design tasks [2]. Specifically, as 1D/2D-based methods, they are unable to perceive how molecules interact with their target proteins exactly in 3D space.
|
| 40 |
+
|
| 41 |
+
Molecule Generation in 3D Space There has been another line of methods that generate molecules directly in 3D space. [8] proposes an auto-regressive model which takes a partially generated molecule as input and outputs the next atom's chemical element and the distances to previous atoms and places
|
| 42 |
+
|
| 43 |
+
the atoms in the 3D space according to the distance constraints. [28, 29] approach this task via reinforcement learning by generating 3D molecules in a sequential way. Different from the previous method[8], they mainly rely on a reward function derived from the potential energy function of atomic systems. These works could generate realistic 3D molecules. However, they can only handle small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms.
|
| 44 |
+
|
| 45 |
+
[20, 23] propose a non-autoregressive approach to 3D molecular generation which is able to generate drug-scale molecules. It represents molecules as 3D images by voxelizing molecules onto 3D meshgrids. In this way, the molecular generation problem is transformed into an image generation problem, making it possible to leverage sophisticated image generation techniques. In specific, it employs convolutional neural network-based VAEs [16] or GANs [9] to generate such molecular images. It also attempts to fuse the binding site structures into the generative network, enabling the model to generate molecules for designated binding targets. In order to reconstruct the molecular structures from images, it leverages a post-processing algorithm to search for atom placements that best fit the image. In comparison to previous methods which can only generate small 3D molecules, this method can generate drug-scale 3D molecules. However, the quality of its generated molecules is not satisfying because of the following major limitations. First, it is hardly scalable to large binding pockets, as the number of voxels grows cubically to the size of the binding site. Second, the resolution of the 3D molecular images is another bottleneck that significantly limits the precision due to the same scalability issue. Last, conventional CNNs are not rotation-equivariant, which is crucial for modeling molecular systems [25].
|
| 46 |
+
|
| 47 |
+
# 3 Method
|
| 48 |
+
|
| 49 |
+
Our goal is to generate a set of atoms that is able to form a valid drug-like molecule fitting to a specific binding site. To this end, we first present a 3D generative model in Section 3.1 that predicts the probability of atom occurrence in 3D space of the binding site. Second, we present in Section 3.2 the auto-regressive sampling algorithm for generating valid and multi-modal molecules from the model. Finally, in Section 3.3, we derive the training objective, by which the model learns to predict where should be placed and atoms and what type of atom should be placed.
|
| 50 |
+
|
| 51 |
+
# 3.1 3D Generative Model Design
|
| 52 |
+
|
| 53 |
+
A binding site can be defined as a set of atoms $\mathcal{C} = \{(a_i, r_i)\}_{i=1}^{N_b}$ , where $N_b$ is the number of atoms in the binding site, $a_i$ is the $i$ -th atom's attributes such as chemical element, belonging amino acid, etc., and $r_i$ is its 3D coordinate. To generate atoms in the binding site, we consider modeling the probability of atom occurring at some position $r$ in the site. Formally, this is to model the density $p(e|r,\mathcal{C})$ , where $r \in \mathbb{R}^3$ is an arbitrary 3D coordinate, and $e \in \mathcal{E} = \{\mathrm{H},\mathrm{C},\mathrm{O},\ldots\}$ is the chemical element. Intuitively, this density can be interpreted as a classifier that takes as input a 3D coordinate $r$ conditional on $\mathcal{C}$ and predicts the probability of $r$ being occupied by an atom of type $e$ .
|
| 54 |
+
|
| 55 |
+
To model $p(e|r,\mathcal{C})$ , we devise a model consisting of two parts: Context Encoder learns the representation of each atom in the context $\mathcal{C}$ via graph neural networks. Spatial Classifier takes as input a query position $\pmb{r}$ , then aggregates the representation of contextual atoms nearby it, and finally predicts $p(e|r,\mathcal{C})$ . The implementation of these two parts is detailed as follows.
|
| 56 |
+
|
| 57 |
+
Context Encoder The purpose of the context encoder is to extract information-rich representations for each atom in $\mathcal{C}$ . We assume a desirable representation should satisfy two properties: (1) context-awareness: the representation of an atom should not only encode the property of the atom itself, but also encode its context. (2) rotational and translational invariance: since the physical and biological properties of the system do not change according to rigid transforms, the representations that reflect these properties should be invariant to rigid transforms as well. To this end, we employ rotationally and translationally invariant graph neural networks [25] as the backbone of the context encoder, described as follows.
|
| 58 |
+
|
| 59 |
+
First of all, since there is generally no natural topology in $\mathcal{C}$ , we construct a $k$ -nearest-neighbor graph based on inter-atomic distances, denoted as $\mathcal{G} = \langle \mathcal{C}, \mathbf{A} \rangle$ , where $\mathbf{A}$ is the adjacency matrix. We also denote the $k$ -NN neighborhood of atom $i$ as $N_{k}(\boldsymbol{r}_{i})$ for convenience. The context encoder will take $\mathcal{G}$ as input and output structure-aware node embeddings.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Figure 1: An illustration of the sampling process. Atoms are sampled sequentially. The probability density changes as we place new atoms. The sampling process naturally diverges, leading to different samples.
|
| 63 |
+
|
| 64 |
+
The first layer of the encoder is a linear layer. It maps atomic attributes $\{a_i\}$ to initial embeddings $\{h_i^{(0)}\}$ . Then, these embeddings along with the graph structure $A$ are fed into $L$ message passing layers. Specifically, the formula of message passing takes the form:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
\boldsymbol {h} _ {i} ^ {(\ell + 1)} = \sigma \left(\boldsymbol {W} _ {0} ^ {\ell} \boldsymbol {h} _ {i} ^ {(\ell)} + \sum_ {j \in N _ {k} (\boldsymbol {r} _ {i})} \boldsymbol {W} _ {1} ^ {\ell} \boldsymbol {w} \left(d _ {i j}\right) \odot \boldsymbol {W} _ {2} ^ {\ell} \boldsymbol {h} _ {j} ^ {(\ell)}\right), \tag {1}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
where $\boldsymbol{w}(\cdot)$ is a weight network and $d_{ij}$ denotes the distance between atom $i$ and atom $j$ . The formula is similar to continuous filter convolution [25]. Note that, the weight of message from $j$ to $i$ depends only on $d_{ij}$ , ensuring its invariance to rotation and translation. Finally, we obtain $\{\pmb{h}_i^{(L)}\}$ a set of embeddings for each atom in $\mathcal{C}$ .
|
| 71 |
+
|
| 72 |
+
Spatial Classifier The spatial classifier takes as input a query position $\boldsymbol{r} \in \mathbb{R}^3$ and predicts the type of atom occupying $\boldsymbol{r}$ . In order to make successful predictions, the model should be able to perceive the context around $\boldsymbol{r}$ . Therefore, the first step of this part is to aggregate atom embeddings from the context encoder:
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\boldsymbol {v} = \sum_ {j \in N _ {k} (\boldsymbol {r})} \boldsymbol {W} _ {0} \boldsymbol {w} _ {\text {a g g r}} \left(\| \boldsymbol {r} - \boldsymbol {r} _ {j} \|\right) \odot \boldsymbol {W} _ {1} \boldsymbol {h} _ {j} ^ {(L)}, \tag {2}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $N_{k}(\boldsymbol{r})$ is the $k$ -nearest neighborhood of $\boldsymbol{r}$ . Note that we weight different embedding using the weight network $\boldsymbol{w}_{\mathrm{aggr}}(\cdot)$ according to distances because it is necessary to distinguish the contribution of different atoms in the context. Finally, in order to predict $p(e|\boldsymbol{r},\mathcal{C})$ , the aggregated feature $\boldsymbol{v}$ is then passed to a classical multi-layer perceptron classifier:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\boldsymbol {c} = \operatorname {M L P} (\boldsymbol {v}), \tag {3}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $c$ is the non-normalized probability of chemical elements. The estimated probability of position $r$ being occupied by atom of type $e$ is:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
p (e | \boldsymbol {r}, \mathcal {C}) = \frac {\exp (\boldsymbol {c} [ e ])}{1 + \sum_ {e ^ {\prime} \in \mathcal {E}} \exp (\boldsymbol {c} [ e ^ {\prime} ])}, \tag {4}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\mathcal{E}$ is the set of possible chemical elements. Unlike typical classifiers that apply softmax to $c$ , we make use of the extra degree of freedom by adding 1 to the denominator, so that the probability of "nothing" can be expressed as:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C}) = \frac {1}{1 + \sum \exp \left(\boldsymbol {c} \left[ e ^ {\prime} \right]\right)}. \tag {5}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
# 3.2 Sampling
|
| 97 |
+
|
| 98 |
+
Sampling a molecule amounts to generating a set of atoms $\{(e_i, r_i)\}_{i=1}^{N_a}$ . However, formulating an effective sampling algorithm is non-trivial because of the following three challenges. First, we have to define the joint distribution of $e$ and $r$ , i.e. $p(e, r|C)$ , from which we can jointly sample an atom's
|
| 99 |
+
|
| 100 |
+
chemical element and its position. Second, notice that simply drawing i.i.d. samples from $p(e, \mathbf{r}|\mathcal{C})$ doesn't make sense because atoms are clearly not independent of each other. Thus, the sampling algorithm should be able to attend to the dependencies between atoms. Third, the sampling algorithm should produce multi-modal samples. This is important because in reality there is usually more than one molecule that can bind to a specific target.
|
| 101 |
+
|
| 102 |
+
In the following, we first define the joint distribution $p(e,r|\mathcal{C})$ . Then, we present an auto-regressive sampling algorithm to tackle the second and the third challenges.
|
| 103 |
+
|
| 104 |
+
Joint Distribution We define the joint distribution of coordinate $\mathbf{r}$ and atom type $e$ using Eq.4:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
p (e, \boldsymbol {r} | \mathcal {C}) = \frac {\exp (\boldsymbol {c} [ e ])}{Z}, \tag {6}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $Z$ is an unknown normalizing constant and $c$ is a function of $r$ and $\mathcal{C}$ as defined in Eq.3. Though $p(e, r)$ is a non-normalized distribution, drawing samples from it would be efficient because the dimension of $r$ is only 3. Viable sampling methods include Markov chain Monte Carlo (MCMC) or discretization.
|
| 111 |
+
|
| 112 |
+
Auto-Regressive Sampling We sample a molecule by progressively sampling one atom at each step. In specific, at step $t$ , the context $C_t$ contains not only protein atoms but also $t$ atoms sampled beforehand. Sampled atoms in $C_t$ are treated equally as protein atoms in the model, but they have different attributes in order to differentiate themselves from protein atoms. Then, the $(t + 1)$ -th atom will be sampled from $p(e, r | C_t)$ and will be added to $C_t$ , leading to the context for next step $C_{t+1}$ . The sampling process is illustrated in Figure 1. Formally, we have:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\left(e _ {t + 1}, \boldsymbol {r} _ {t + 1}\right) \sim p (e, \boldsymbol {r} | \mathcal {C} _ {t}),
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {C} _ {t + 1} \leftarrow \mathcal {C} _ {t} \cup \{(e _ {t + 1}, \boldsymbol {r} _ {t + 1}) \}. \tag {7}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
To determine when the auto-regressive sampling should stop, we employ an auxiliary network. The network takes as input the embedding of previously sampled atoms, and classifies them into two categories: frontier and non-frontier. If all the existing atoms are non-frontier, which means there is no room for more atoms, the sampling will be terminated. Finally, we use OpenBabel [21, 20] to obtain bonds of generated structures.
|
| 123 |
+
|
| 124 |
+
In summary, the proposed auto-regressive algorithm succeeds to settle the aforementioned two challenges. First, the model is aware of other atoms when placing new atoms, thus being able to consider the dependencies between them. Second, auto-regressive sampling is a stochastic process. Its sampling path naturally diverges, leading to diverse samples.
|
| 125 |
+
|
| 126 |
+
# 3.3 Training
|
| 127 |
+
|
| 128 |
+
As we adopt auto-regressive sampling strategies, we propose a cloze-filling training scheme — at training time, a random portion of the target molecule is masked, and the network learns to predict the masked part from the observable part and the binding site. This emulates the sampling process where the model can only observe partial molecules. The training loss consists of three terms described below.
|
| 129 |
+
|
| 130 |
+
First, to make sure the model is able to predict positions that actually have atoms (positive positions), we include a binary cross entropy loss to contrast positive positions against negative positions:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
L _ {\mathrm {B C E}} = - \mathbb {E} _ {\boldsymbol {r} \sim p _ {+}} \left[ \log \left(1 - p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C})\right) \right] - \mathbb {E} _ {\boldsymbol {r} \sim p _ {-}} \left[ \log p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C}) \right]. \tag {8}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
Here, $p_{+}$ is a positive sampler that yields coordinates of masked atoms. $p_{-}$ is a negative sampler that yields random coordinates in the ambient space. $p_{-}$ is empirically defined as a Gaussian mixture model containing $|\mathcal{C}|$ components centered at each atom in $\mathcal{C}$ . The standard deviation of each component is set to $2\AA$ in order to cover the ambient space. Intuitively, the first term in Eq.8 increases the likelihood of atom placement for positions that should get an atom. The second term decreases the likelihood for other positions.
|
| 137 |
+
|
| 138 |
+
Second, our model should be able to predict the chemical element of atoms. Hence, we further include a standard categorical cross entropy loss:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
L _ {\mathrm {C A T}} = - \mathbb {E} _ {(e, \boldsymbol {r}) \sim p _ {+}} [ \log p (e | \boldsymbol {r}, \mathcal {C}) ]. \tag {9}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
Figure 2: (a) A portion of the molecule is masked. (b) Positive coordinates are drawn from the masked atoms' positions and negative coordinates are drawn from the ambient space. (c) Both positive and negative coordinates are fed into the model. The model predicts the probability of atom occurrence at the coordinates. (d) Training losses are computed based on the discrepancy between predicted probabilities and ground truth.
|
| 146 |
+
|
| 147 |
+
Third, as introduced in Section 3.2, the sampling algorithm requires a frontier network to tell whether the sampling should be terminated. This leads to the last term — a standard binary cross entropy loss for training the frontier network:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
L _ {\mathrm {F}} = \sum_ {i \in \mathcal {F} \subseteq \mathcal {C}} \log \sigma (F (\boldsymbol {h} _ {i})) + \sum_ {i \notin \mathcal {F} \subseteq \mathcal {C}} \log (1 - \sigma (F (\boldsymbol {h} _ {i}))), \tag {10}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where $\mathcal{F}$ is the set of frontier atoms in $\mathcal{C}$ , $\sigma$ is the sigmoid function, and $F(\cdot)$ is the frontier network that takes atom embedding as input and predicts the logit probability of the atom being a frontier. During training, an atom is regarded as a frontier if and only if (1) the atom is a part of the target molecule, and (2) at least one of its bonded atom is masked.
|
| 154 |
+
|
| 155 |
+
Finally, by summing up $L_{\mathrm{BCE}}$ , $L_{\mathrm{CAT}}$ , and $L_{\mathrm{F}}$ , we obtain the full training loss $L = L_{\mathrm{BCE}} + L_{\mathrm{CAT}} + L_{\mathrm{F}}$ . The full training process is illustrated in Figure 2.
|
| 156 |
+
|
| 157 |
+
# 4 Experiments
|
| 158 |
+
|
| 159 |
+
We evaluate the proposed method on two relevant structure-based drug design tasks: (1) Molecule Design is to generate molecules for given binding sites (Section 4.1), and (2) Linker Prediction is to generate substructures to link two given fragments in the binding site. (Section 4.2). Below, we describe common setups shared across tasks. Detailed task-specific setups are provided in each subsection.
|
| 160 |
+
|
| 161 |
+
Data We use the CrossDocked dataset [7] following [20]. The dataset originally contains 22.5 million docked protein-ligand pairs at different levels of quality. We filter out data points whose binding pose RMSD is greater than $1\AA$ , leading to a refined subset consisting of 184,057 data points. We use mmseqs2 [31] to cluster data at $30\%$ sequence identity, and randomly draw 100,000 protein-ligand pairs for training and 100 proteins from remaining clusters for testing.
|
| 162 |
+
|
| 163 |
+
Model We trained a universal model for all the tasks. The number of message passing layers in context encoder $L$ is 6, and the hidden dimension is 256. We train the model using the Adam optimizer at learning rate 0.0001. Other details about model architectures and training parameters are provided in the supplementary material and the open source repository: https://github.com/luost26/3D-Generative-SBDD.
|
| 164 |
+
|
| 165 |
+
<table><tr><td colspan="2">Metric</td><td>liGAN</td><td>Ours</td><td>Ref</td></tr><tr><td rowspan="2">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-6.144</td><td>-6.344</td><td>-7.158</td></tr><tr><td>Med.</td><td>-6.100</td><td>-6.200</td><td>-6.950</td></tr><tr><td rowspan="2">QED (↑)</td><td>Avg.</td><td>0.371</td><td>0.525</td><td>0.484</td></tr><tr><td>Med.</td><td>0.369</td><td>0.519</td><td>0.469</td></tr><tr><td rowspan="2">SA (↑)</td><td>Avg.</td><td>0.591</td><td>0.657</td><td>0.733</td></tr><tr><td>Med.</td><td>0.570</td><td>0.650</td><td>0.745</td></tr><tr><td rowspan="2">High Affinity (%, ↑)</td><td>Avg.</td><td>23.77</td><td>29.09</td><td>-</td></tr><tr><td>Med.</td><td>11.00</td><td>18.50</td><td>-</td></tr><tr><td rowspan="2">Diversity (↑)</td><td>Avg.</td><td>0.655</td><td>0.720</td><td>-</td></tr><tr><td>Med.</td><td>0.676</td><td>0.736</td><td>-</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Table 1: Mean and median values of the four metrics on generation quality. $(\uparrow)$ indicates higher is better. $(\downarrow)$ indicates lower is better.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 3: Distributions of Vina, QED, and SA scores over all the generated molecules.
|
| 171 |
+
|
| 172 |
+
# 4.1 Molecule Design
|
| 173 |
+
|
| 174 |
+
In this task, we generate molecules for specific binding sites with our model and baselines. The input to models are binding sites extracted from the proteins in the testing set. We sample 100 unique molecules for each target.
|
| 175 |
+
|
| 176 |
+
Baselines We compare our approach with the state-of-the-art baseline liGAN [20]. liGAN is based on conventional 3D convolutional neural networks. It generates voxelized molecular images and relies on a post-processing algorithm to reconstruct the molecule from the generated image.
|
| 177 |
+
|
| 178 |
+
Metrics We evaluate the quality of generated molecules from three main aspects: (1) Binding Affinity measures how well the generated molecules fit the binding site. We use Vina [33, 1] to compute the binding affinity (Vina Score). Before feeding the molecules to Vina, we employ the universal force fields (UFF) [24] to refine the generated structures following [20]. (2) Drug Likeness reflects how much a molecule is like a drug. We use QED score [4] as the metric for drug-likeness. (3) Synthesizability assesses the ease of synthesis of generated molecules. We use normalized SA score [6, 35] to measure molecules' synthesizability.
|
| 179 |
+
|
| 180 |
+
In order to evaluate the generation quality and diversity for each binding site, we define two additional metrics: (1) Percentage of Samples with High Affinity, which measures the percentage of a binding site's generated molecules whose binding affinity is higher than or equal to the reference ligand. (2) Diversity [14], which measures the diversity of generated molecules for a binding site. It is calculated by averaging pairwise Tanimoto similarities [3, 32] over Morgan fingerprints among the generated molecules of a target.
|
| 181 |
+
|
| 182 |
+
Results We first calculate Vina Score, QED, and SA for each of the generated molecules. Figure 3 presents the histogram of these three metrics and Table 1 shows the mean and median values of them over all generated molecules. For each binding site, we further calculate Percentage of Samples with High Affinity and Diversity. We report their mean and median values in the bottom half of Table 1. From the quantitative results, we find that in general, our model is able to discover diverse molecules that have higher binding affinity to specific targets. Besides, the generated molecules from our model also exhibit other desirable properties including fairly high drug-likeness and synthesizeability. When compared to the CNN baseline liGAN [20], our method achieves clearly better performance on all metrics, especially on the drug-likeness score QED, which indicates that our model produces more realistic drug-like molecules.
|
| 183 |
+
|
| 184 |
+
To better understand the results, we select two binding sites in the testing set and visualize their top affinity samples for closer inspection. The top row of Figure 4 is the first example (PDB ID:2hcj). The average QED and SA scores of the generated molecules for this target are 0.483 and 0.663
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Ours (2hcj)
|
| 188 |
+
Figure 4: Generated molecules with top binding affinity and the reference molecule for two representative binding sites. Lower Vina score indicates higher binding affinity.
|
| 189 |
+
|
| 190 |
+
respectively, around the median of these two scores. $8\%$ of the generated molecules have higher binding affinity than the reference molecule, below the median $18.5\%$ . The second example (PDB ID:4r1u) is shown in the bottom row. The average QED and SA scores are 0.728 and 0.785, and $18\%$ of sampled molecules achieve higher binding affinity. From these two examples in Figure 4, we can see that the generated molecules have overall structures similar to the reference molecule and they share some common important substructures, which indicates that the generated molecules fit into the binding site as well as the reference one. Besides, the top affinity molecules generally achieve QED and SA score comparable to or even higher than the reference molecule, which reflects that the top affinity molecules not only fit well into the binding site but also exhibit desirable quality. In conclusion, the above two representative cases evidence the model's ability to generate drug-like and high binding affinity molecules for designated targets.
|
| 191 |
+
|
| 192 |
+
# 4.2 Linker Prediction
|
| 193 |
+
|
| 194 |
+
Linker prediction is to build a molecule that incorporates two given disconnected fragments in the context of a binding site [12]. Our model is capable of linker design without any task-specific adaptation or re-training. In specific, given a binding site and some fragments as input, we compose the initial context $\mathcal{C}_0$ containing both the binding site and the fragments. Then, we run the auto-regressive sampling algorithm to sequentially add atoms until the molecule is comp
|
| 195 |
+
|
| 196 |
+
Table 2: Performance of linker prediction.
|
| 197 |
+
|
| 198 |
+
<table><tr><td colspan="2">Metric</td><td>DeLinker</td><td>Ours</td></tr><tr><td rowspan="2">Similarity (↑)</td><td>Avg.</td><td>0.612</td><td>0.701</td></tr><tr><td>Med.</td><td>0.600</td><td>0.722</td></tr><tr><td colspan="2">Recovered (%, ↑)</td><td>40.00</td><td>48.33</td></tr><tr><td rowspan="2">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-8.512</td><td>-8.603</td></tr><tr><td>Med.</td><td>-8.576</td><td>-8.575</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Data Preparation Following [12], we construct fragments of molecules in the testing set by enumerating possible double-cuts of acyclic single bonds. The pre-processing results in 120 data points in total. Each of them consists of two disconnected molecule fragments.
|
| 201 |
+
|
| 202 |
+
Baselines We compare our model with DeLinker [12]. Despite that DeLinker incorporates some 3D information, it is still a graph-based generative model. In contrast, our method operates fully in 3D space and thus is able to fully utilize the 3D context.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Figure 5: Two example of linker prediction. Atoms highlighted in red are predicted linkers.
|
| 206 |
+
|
| 207 |
+
Metrics We assess the generated molecules from fragments with four main metrics: (1) Similarity: We use Tanimoto Similarity [32, 3] over Morgan fingerprints [14] to measure the similarity between the molecular graphs of generated molecule and the reference molecule. (2) Percentage of Recovered Molecules: We say a test molecule is recovered if the model is able to generate a molecule that perfectly matches it (Similarity = 1.0). We calculate the percentage of test molecules that are recovered by the model. (3) Binding Affinity: We use Vina [1, 33] to compute the the generated molecules' binding affinity to the target.
|
| 208 |
+
|
| 209 |
+
Results For each data point, we use our model and DeLinker to generate 100 molecules. We first calculate the average similarity for each data point and report their overall mean and median values. Then, we calculate the percentage of test molecules that are successfully recovered by the model. Finally, we use Vina to evaluate the generated molecules' binding affinity. These results are summarized in Table 2. As shown in the table, when measured by Vina score, our proposed method's performance is on par with the graph-based baseline DeLinker. However, our method clearly outperforms DeLinker on Similarity and Percentage of Recovery, suggesting that our method is able to link fragments in a more realistic way. In addition, we present two examples along with 5 generated molecules at different similarities in Figure 5. The example demonstrates the model's ability to generate suitable linkers.
|
| 210 |
+
|
| 211 |
+
# 5 Conclusions and Discussions
|
| 212 |
+
|
| 213 |
+
In this paper, we propose a new approach to structure-based drug design. In specific, we design a 3D generative model that estimates the probability density of atom's occurrences in 3D space and formulate an auto-regressive sampling algorithm. Combined with the sampling algorithm, the model is able to generate drug-like molecules for specific binding sites. By conducting extensive experiments, we demonstrate our model's effectiveness in designing molecules for specific targets. Though our proposed method achieves reasonable performance in structure-based molecule design, there is no guarantee that the model always generates valid molecules successfully. To build a more robust and useful model, we can consider incorporating graph representations to building 3D molecules as future work, such that we can leverage on sophisticated techniques for generating valid molecular graphs such as valency check [35] and property optimization [14].
|
| 214 |
+
|
| 215 |
+
# References
|
| 216 |
+
|
| 217 |
+
[1] Amr Alhossary, Stephanus Daniel Handoko, Yuguang Mu, and Chee-Keong Kwoh. Fast, accurate, and reliable molecular docking with quickvina 2. Bioinformatics, 31(13):2214-2216, 2015.
|
| 218 |
+
[2] Amy C. Anderson. The process of structure-based drug design. Chemistry & Biology, 10(9): 787-797, 2003. ISSN 1074-5521. doi: https://doi.org/10.1016/j.chembiol.2003.09.002. URL https://www.sciencedirect.com/science/article/pii/S1074552103001947.
|
| 219 |
+
[3] Dávid Bajusz, Anita Rácz, and Károly Héberger. Why is tanimoto index an appropriate choice for fingerprint-based similarity calculations? Journal of cheminformatics, 7(1):1-13, 2015.
|
| 220 |
+
[4] G Richard Bickerton, Gaia V Paolini, Jérémy Besnard, Sorel Muresan, and Andrew L Hopkins. Quantifying the chemical beauty of drugs. Nature chemistry, 4(2):90–98, 2012.
|
| 221 |
+
[5] Esben Jannik Bjerrum and Richard Threlfall. Molecular generation with recurrent neural networks (rnns). arXiv preprint arXiv:1705.04612, 2017.
|
| 222 |
+
[6] Peter Ertl and Ansgar Schuffenhauer. Estimation of synthetic accessibility score of drug-like molecules based on molecular complexity and fragment contributions. Journal of cheminformatics, 1(1):1-11, 2009.
|
| 223 |
+
[7] Paul G Francoeur, Tomohide Masuda, Jocelyn Sunseri, Andrew Jia, Richard B Iovanisci, Ian Snyder, and David R Koes. Three-dimensional convolutional neural networks and a cross-docked data set for structure-based drug design. Journal of Chemical Information and Modeling, 60(9):4200-4215, 2020.
|
| 224 |
+
[8] Niklas WA Gebauer, Michael Gastegger, and Kristof T Schütt. Symmetry-adapted generation of 3d point sets for the targeted discovery of molecules. arXiv preprint arXiv:1906.00957, 2019.
|
| 225 |
+
[9] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. arXiv preprint arXiv:1406.2661, 2014.
|
| 226 |
+
[10] Rafael Gómez-Bombarelli, Jennifer N Wei, David Duvenaud, José Miguel Hernández-Lobato, Benjamín Sánchez-Lengeling, Dennis Sheberla, Jorge Aguilera-Iparraguirre, Timothy D Hirzel, Ryan P Adams, and Alán Aspuru-Guzik. Automatic Chemical Design Using a Data-Driven Continuous Representation of Molecules. ACS Central Science, 4(2):268-276, 2018. ISSN 2374-7943. doi: 10.1021/acscentsci.7b00572.
|
| 227 |
+
[11] Paul CD Hawkins. Conformation generation: the state of the art. Journal of Chemical Information and Modeling, 57(8):1747-1756, 2017.
|
| 228 |
+
[12] Fergus Imrie, Anthony R Bradley, Mihaela van der Schaar, and Charlotte M Deane. Deep generative models for 3d linker design. Journal of chemical information and modeling, 60(4): 1983-1995, 2020.
|
| 229 |
+
[13] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Junction tree variational autoencoder for molecular graph generation. In International Conference on Machine Learning, pages 2323-2332. PMLR, 2018.
|
| 230 |
+
[14] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Composing molecules with multiple property constraints. arXiv preprint arXiv:2002.03244, 2020.
|
| 231 |
+
[15] Wengong Jin, Jeremy Wohlwend, Regina Barzilay, and Tommi Jaakkola. Iterative refinement graph neural network for antibody sequence-structure co-design. arXiv preprint arXiv:2110.04624, 2021.
|
| 232 |
+
[16] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 233 |
+
[17] Matt J Kusner, Brooks Paige, and José Miguel Hernández-Lobato. Grammar variational autoencoder. In International Conference on Machine Learning, pages 1945-1954. PMLR, 2017.
|
| 234 |
+
|
| 235 |
+
[18] Yujia Li, Oriol Vinyals, Chris Dyer, Razvan Pascanu, and Peter Battaglia. Learning deep generative models of graphs. arXiv preprint arXiv:1803.03324, 2018.
|
| 236 |
+
[19] Qi Liu, Miltiadis Allamanis, Marc Brockschmidt, and Alexander L Gaunt. Constrained graph variational autoencoders for molecule design. arXiv preprint arXiv:1805.09076, 2018.
|
| 237 |
+
[20] Tomohide Masuda, Matthew Ragoza, and David Ryan Koes. Generating 3d molecular structures conditional on a receptor binding site with deep generative models. arXiv preprint arXiv:2010.14442, 2020.
|
| 238 |
+
[21] Noel M O'Boyle, Michael Banck, Craig A James, Chris Morley, Tim Vandermeersch, and Geoffrey R Hutchison. Open babel: An open chemical toolbox. Journal of cheminformatics, 3 (1):1-14, 2011.
|
| 239 |
+
[22] Pavel G Polishchuk, Timur I Madzhidov, and Alexandre Varnek. Estimation of the size of drug-like chemical space based on gdb-17 data. Journal of computer-aided molecular design, 27(8):675-679, 2013.
|
| 240 |
+
[23] Matthew Ragoza, Tomohide Masuda, and David Ryan Koes. Learning a continuous representation of 3d molecular structures with deep generative models. arXiv preprint arXiv:2010.08687, 2020.
|
| 241 |
+
[24] Anthony K Rappe, Carla J Casewit, KS Colwell, William A Goddard III, and W Mason Skiff. Uff, a full periodic table force field for molecular mechanics and molecular dynamics simulations. Journal of the American chemical society, 114(25):10024-10035, 1992.
|
| 242 |
+
[25] Kristof T Schütt, PJ Kindermans, Huziel E Sauceda, Stefan Chmiela, Alexandre Tkatchenko, and Klaus R Müller. Schnet: A continuous-filter convolutional neural network for modeling quantum interactions. In 31st Conference on Neural Information Processing Systems (NIPS 2017), Long Beach, CA, USA, pages 1-11, 2017.
|
| 243 |
+
[26] Marwin HS Segler, Thierry Kogej, Christian Tyrchan, and Mark P Waller. Generating focused molecule libraries for drug discovery with recurrent neural networks. ACS central science, 4(1): 120-131, 2018.
|
| 244 |
+
[27] Chence Shi, Minkai Xu, Zhaocheng Zhu, Weinan Zhang, Ming Zhang, and Jian Tang. Graphaf: a flow-based autoregressive model for molecular graph generation. arXiv preprint arXiv:2001.09382, 2020.
|
| 245 |
+
[28] Gregor Simm, Robert Pinsler, and José Miguel Hernández-Lobato. Reinforcement learning for molecular design guided by quantum mechanics. In International Conference on Machine Learning, pages 8959-8969. PMLR, 2020.
|
| 246 |
+
[29] Gregor NC Simm, Robert Pinsler, Gábor Csányi, and José Miguel Hernández-Lobato. Symmetry-aware actor-critic for 3d molecular design. arXiv preprint arXiv:2011.12747, 2020.
|
| 247 |
+
[30] Miha Skalic, José Jiménez, Davide Sabbadin, and Gianni De Fabritiis. Shape-based generative modeling for de novo drug design. Journal of chemical information and modeling, 59(3): 1205-1214, 2019.
|
| 248 |
+
[31] Martin Steinegger and Johannes Söding. Mmseqs2 enables sensitive protein sequence searching for the analysis of massive data sets. Nature biotechnology, 35(11):1026-1028, 2017.
|
| 249 |
+
[32] Taffee T Tanimoto. Elementary mathematical theory of classification and prediction. 1958.
|
| 250 |
+
[33] Oleg Trot and Arthur J Olson. Autodock vina: improving the speed and accuracy of docking with a new scoring function, efficient optimization, and multithreading. Journal of computational chemistry, 31(2):455-461, 2010.
|
| 251 |
+
[34] David Weininger. Smiles, a chemical language and information system. 1. introduction to methodology and encoding rules. Journal of chemical information and computer sciences, 28 (1):31-36, 1988.
|
| 252 |
+
[35] Jiaxuan You, Bowen Liu, Rex Ying, Vijay Pande, and Jure Leskovec. Graph convolutional policy network for goal-directed molecular graph generation. arXiv preprint arXiv:1806.02473, 2018.
|
a3dgenerativemodelforstructurebaseddrugdesign/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6b51fe17a20416c08ae8391695dab132d85b1ff944db44238a1ee4c665c5b7a
|
| 3 |
+
size 473045
|
a3dgenerativemodelforstructurebaseddrugdesign/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e056a20c4a3b98f5a56af7ba46e449c448f01fc42bc433500000cb438e8ed6c1
|
| 3 |
+
size 318071
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb46d4022bc40b0687d21b29b785a43d471b376babbe258a47a17987d235b124
|
| 3 |
+
size 83400
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc8280070722d1935d4643c2caeedef913f732086fd77a7d48c3f75c880695bb
|
| 3 |
+
size 103212
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/ed451712-3b1e-4fc2-9077-e985d17a8b0d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:428c5bc2fe68265fa9b2aa007be2bbcad9eaa6b5f5a298b8d690d05a4cab9c1b
|
| 3 |
+
size 743677
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/full.md
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Bayesian-Symbolic Approach to Reasoning and Learning in Intuitive Physics
|
| 2 |
+
|
| 3 |
+
# Kai Xu
|
| 4 |
+
|
| 5 |
+
University of Edinburgh contact@xuk.ai
|
| 6 |
+
|
| 7 |
+
# Akash Srivastava
|
| 8 |
+
|
| 9 |
+
MIT-IBM Watson AI Lab akash.srivastava@ibm.com
|
| 10 |
+
|
| 11 |
+
# Dan Gutfreund
|
| 12 |
+
|
| 13 |
+
MIT-IBM Watson AI Lab dgutfre@us.ibm.com
|
| 14 |
+
|
| 15 |
+
# Felix A. Sosa
|
| 16 |
+
|
| 17 |
+
Harvard University fsosa@fas.harvard.edu
|
| 18 |
+
|
| 19 |
+
# Tomer Ullman
|
| 20 |
+
|
| 21 |
+
Harvard University tomerullman@gmail.com
|
| 22 |
+
|
| 23 |
+
# Joshua B. Tenenbaum
|
| 24 |
+
|
| 25 |
+
Massachusetts Institute of Technology jbt@mit.edu
|
| 26 |
+
|
| 27 |
+
# Charles Sutton
|
| 28 |
+
|
| 29 |
+
University of Edinburgh & Google AI c.sutton@ed.ac.uk
|
| 30 |
+
|
| 31 |
+
# Abstract
|
| 32 |
+
|
| 33 |
+
Humans can reason about intuitive physics in fully or partially observed environments even after being exposed to a very limited set of observations. This sample-efficient intuitive physical reasoning is considered a core domain of human common sense knowledge. One hypothesis to explain this remarkable capacity, posits that humans quickly learn approximations to the laws of physics that govern the dynamics of the environment. In this paper, we propose a Bayesian-symbolic framework (BSP) for physical reasoning and learning that is close to human-level sample-efficiency and accuracy. In BSP, the environment is represented by a top-down generative model of entities, which are assumed to interact with each other under unknown force laws over their latent and observed properties. BSP models each of these entities as random variables, and uses Bayesian inference to estimate their unknown properties. For learning the unknown forces, BSP leverages symbolic regression on a novel grammar of Newtonian physics in a bilevel optimization setup. These inference and regression steps are performed in an iterative manner using expectation-maximization, allowing BSP to simultaneously learn force laws while maintaining uncertainty over entity properties. We show that BSP is more sample-efficient compared to neural alternatives on controlled synthetic datasets, demonstrate BSP's applicability to real-world common sense scenes and study BSP's performance on tasks previously used to study human physical reasoning. $^{1}$
|
| 34 |
+
|
| 35 |
+
# 1 Introduction
|
| 36 |
+
|
| 37 |
+
Imagine a ball rolling down a ramp. If asked to predict the trajectory of the ball, most of us will find it fairly easy to make a reasonable prediction. Not only that, simply by observing a single trajectory people can make reasonable guesses about the material and weight of the ball and the ramp. It is astonishing that while the exact answers to any of these prediction and reasoning tasks requires an in-depth knowledge of Newtonian mechanics and solving of some intricate equations, yet an average human can perform such tasks without any formal training in physics. Studies suggest that from early age humans come to understand physical interactions with very limited supervision, and can
|
| 38 |
+
|
| 39 |
+
efficiently reason and plan actions in common sense tasks, even in absence of complete information (Spelke, 2000; Battaglia et al., 2013). For example, with limited data, 4 or 5 years old children are capable of learning the physical laws behind magnetism (Bonawitz et al., 2019). Physical reasoning is considered a core domain of human common-sense knowledge (Spelke & Kinzler, 2007). Recent studies suggest that the ability to efficiently learn physical properties and interactions with limited supervision is driven by a noisy model of Newtonian dynamics, referred to as the intuitive physics engine (IPE; Bates et al., 2015; Gerstenberg et al., 2015; Sanborn et al., 2013; Lake et al., 2017; Battaglia et al., 2013). This has led to a surge in research aimed at developing agents with an IPE, or a model of the environment dynamics (Amos et al., 2018; Chang et al., 2016; Grzeszczech & Animotor, 1998; Fragkiadaki et al., 2015; Battaglia et al., 2016; Watters et al., 2017; Sanchez-Gonzalez et al., 2019; Ehrhardt et al., 2017; Kipf et al., 2018; Seo et al., 2019; Paradel et al., 2020). These efforts have created methods that either trade-off data-efficiency, by using deep neural networks (NNs), for high predictive accuracy (Breen et al., 2019; Battaglia et al., 2016; Sanchez-Gonzalez et al., 2019) or trade-off flexibility to learn from data for data-efficiency by using symbolic methods (Ullman et al., 2018; Smith et al., 2019; Sanborn et al., 2013; Bramley et al., 2018).
|
| 40 |
+
|
| 41 |
+
Inspired by the highly data-efficient ability of humans to learn and reason about their physical environment with incomplete information, we present Bayesian-symbolic physics (BSP), a Bayesian-symbolic model with an expectation-maximization (EM) algorithm that combines the sample efficiency of symbolic methods with the accuracy and generalization of data-driven approaches, using statistical inference of unobserved object properties and symbolic learning of physical force laws. In BSP, we model the evolution of the environment's dynamics over time as a generative program of entities interacting under Newtonian mechanics. As a probabilistic method, BSP treats the properties of entities, such as mass and charge, as random variables. Since Newtonian force laws are functions of these properties, in BSP we replace data-hungry NNs with symbolic regression (SR) to learn explicit force expressions, and then evolve them deterministically using equations of motion. A naive SR implementation here is not enough though due to two issues. One is that if it operates on a vanilla grammar that does not constrain the search space over force-laws, it can potentially have worse data-efficiency than a NN. Therefore, we introduce a grammar of Newtonian physics that leverages dimensional analysis to induce a physical unit system over the search space and impose physics-based constraints on the production rules. This prunes physically meaningless laws, therefore, drastically speeding up SR. Another issue is that the symbolic force expressions usually contain global constants, e.g. the gravitational constant, to learn, and common ways to deal with this challenge turn out to be inefficient especially in an EM setup. We tackle this challenge by using SR in a bilevel optimization framework in which a lower-level gradient based optimization step is used to optimize the constant. In short, our three main contributions are:
|
| 42 |
+
|
| 43 |
+
- We introduce a Bayesian-symbolic model for physical dynamics and an EM based algorithm, which combines approximate inference methods and SR, for maximum likelihood learning.
|
| 44 |
+
- We introduce a grammar of Newtonian physics that appropriately constrains SR for data-efficient learning, based on priors from dimensional analysis and physics-based constraints.
|
| 45 |
+
- Empirically, we show that BSP reaches human-like data-efficiency, often requiring just 1 to 5 synthetic scenes to learn the underlying force laws – much more data efficient than the closest neural alternatives. We then illustrate how BSP can discover physical laws from real-world common sense scenes from Wu et al. (2016). Finally, we study BSP on tasks previously used to study human physical reasoning in Ullman et al. (2018) and discuss the similarity and differences with human results.
|
| 46 |
+
|
| 47 |
+
# 2 Related work
|
| 48 |
+
|
| 49 |
+
Many symbolic and data driven models of learning and reasoning about physics can be broken down into smaller components that are either learned or fixed. In figure 1, we compare some of the closely-related recent work on physics learning. Starting on the right end, we have fully learned, deep NN approaches such as that used by Breen et al. (2019). This approach does not use any prior knowledge about physics, and learns to predict dynamics in a purely data-driven way. In the middle are hybrid models that introduce some prior knowledge about physical interactions or dynamics, in their NN-based prediction models. These include interaction networks (INs; Battaglia et al., 2016), ODE graph networks (OGNs), and Hamiltonian ODE graph networks (HOGNs; Sanchez-Gonzalez et al., 2019). Since these middle approaches use deep NNs, they tend to have very good predictive accuracy, yet poor sample complexity, requiring orders of magnitude more data to train than humans
|
| 50 |
+
|
| 51 |
+
<table><tr><td rowspan="3">Fixed
|
| 52 |
+
Physics
|
| 53 |
+
Engine</td><td>Fixed
|
| 54 |
+
Symbolic
|
| 55 |
+
Force Laws</td><td>Learnable
|
| 56 |
+
Symbolic
|
| 57 |
+
Force Laws</td><td>Neurally
|
| 58 |
+
Learned
|
| 59 |
+
Interaction</td><td>Neurally
|
| 60 |
+
Learned
|
| 61 |
+
Interaction</td><td rowspan="3">Deep
|
| 62 |
+
Neural
|
| 63 |
+
Networks</td><td>Legend</td></tr><tr><td>Summation</td><td>Summation</td><td>Aggregation</td><td>Aggregation</td><td rowspan="2">□ Physics model
|
| 64 |
+
□ Interaction model
|
| 65 |
+
□ How each entity receives interactions
|
| 66 |
+
□ Dynamics model</td></tr><tr><td>Newtonian
|
| 67 |
+
Dynamics</td><td>Newtonian
|
| 68 |
+
Dynamics</td><td>Hamiltonian
|
| 69 |
+
Dynamics</td><td>Neurally
|
| 70 |
+
Learned
|
| 71 |
+
Dynamics</td></tr></table>
|
| 72 |
+
|
| 73 |
+
Figure 1: From left to right are rule-based to purely data-driven models of physics. Examples for each column are (1) Smith et al. (2019), (2) Ullman et al. (2018), (3) BSP (Ours), (4) (H)OGN Sanchez-Gonzalez et al. (2019), (5) IN Battaglia et al. (2016) and (6) Breen et al. (2019).
|
| 74 |
+
|
| 75 |
+
(Ullman et al., 2018; Battaglia et al., 2016; Sanchez-Gonzalez et al., 2019). On the other end of the spectrum (left) are fully symbolic, rule-based physics models and engines (Smith et al., 2019; Allen et al., 2019; Wu et al., 2015; Ullman et al., 2018). While these methods are suitable for reasoning tasks, they lack the flexibility of data-driven, learned models as they cannot generalize or adapt to changes in the environment that their fixed physics engine cannot simulate. For example, inference can fail on physically implausible scenes, and may require additional workarounds such as 'low probability events' outside of the dynamics (Smith et al., 2019).
|
| 76 |
+
|
| 77 |
+
Symbolic regression has been used for general physics learning in prior research, ranging from Schmidt & Lipson (2009)'s work on discovering force laws from experimental data, to the more recent work of Cranmer et al. (2020) on distilling symbolic forces from INs using genetic algorithms. More recently, Udrescu & Tegmark (2020) proposed AI Feynman, which recursively simplifies the SR problem using dimensional analysis and symmetries inferred by neural networks, to discover the underlying physics equations of the data. The focus of these types of work has been to discover underlying laws and equations based on direct input-output data. The focus of BSP, on the other hand, is on physics learning based on indirect signals from the environment; this is a task of interest in both intuitive physics studies with humans, and for human-like AI. Further, most symbolic approaches learn physics while assuming all properties in a system are known, which renders them inapplicable to environments with incomplete information. Some neural approaches focus on addressing such limitations in an end-to-end fashion (Zheng et al., 2018; Veerapaneni et al., 2020; Janner et al., 2019).
|
| 78 |
+
|
| 79 |
+
# 3 Bayesian-symbolic physics
|
| 80 |
+
|
| 81 |
+
BSP represents the physical environment using a generative model that evolves under Newtonian dynamics (section 3.1). In this model, physical laws are treated as learnable symbolic expressions and learned using symbolic regression and a specialized grammar of Newtonian physics that confines the search space and prevent the model from learning physically meaningless laws (section 3.2). BSP does not require all properties of the entities to be fully observed. It models these properties as latent random variables and infers them using Bayesian learning. To fit BSP on data, with incomplete information, we propose an EM algorithm that iterates between Bayesian inference of the latent properties, and SR which gives maximum likelihood estimation of the force expressions (section 3.3).
|
| 82 |
+
|
| 83 |
+
# 3.1 Generative model of the environment
|
| 84 |
+
|
| 85 |
+
In BSP's generative model, we represent each entity $i \in \{1 \dots N\}$ by a vector of intrinsic physical properties $z^i$ (such as mass, charge, and shape), and a time dependent state vector $\mathbf{s}_t^i = (\mathbf{p}_t^i, \mathbf{v}_t^i)$ which describes the evolution of its position $\mathbf{p}_t^i \in \mathbb{R}^d$ and velocity $\mathbf{v}_t^i \in \mathbb{R}^d$ under Newtonian dynamics. Here, $d$ refers to the dimensionality of the environment, and is typically 2 or 3. Let $\{\tau^i\}_{i=1}^{N}$ be the set of observed trajectories from an environment with $N$ entities, where $\tau^i = \mathbf{p}_{1:T}^i := (\mathbf{p}_1^i, \ldots, \mathbf{p}_T^i)$ . Then, together with the prior on $z$ , for an observed trajectory data, $\mathcal{D}$ , the generative model of BSP defines a joint probability distribution $p(\mathcal{D}, z; F)$ over $\mathcal{D}$ and latent properties $z$ , given the force
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
Figure 2: Illustration of how the dimensional analysis and translation invariance priors help constrain the search space. Each box contains a subset of valid and illegal (stroked) sub-expressions.
|
| 89 |
+
|
| 90 |
+
function $F$ . The state transition of an entity in a Newtonian system depends on its properties and current state as well as its interaction with other entities. So, in BSP the force on entity $i$ at time $t$ is defined as $\mathbf{f}_t^i = \sum_{j=1}^{N} F(z^i, \mathbf{s}_t^i, z^j, \mathbf{s}_t^j)$ , where $F(z^i, \mathbf{s}_t^i, z^j, \mathbf{s}_t^j)$ is the interaction force between entities $i$ and $j$ . Then, the trajectory $\tau_i$ of entity $i$ is generated by a transition function $\mathbb{T}$ that consumes the current state and the resultant force to compute $\mathbf{s}_{t+1}^i = \mathbb{T}(\mathbf{s}_t^i, \mathbf{f}_t^i)$ . Similar to Sanchez-Gonzalez et al. (2019), we use numerical integration to simulate the Newtonian dynamics inside $\mathbb{T}$ . Specifically, we choose the Euler integrator and expand $\mathbb{T}$ as
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\mathbf {a} _ {t} ^ {i} = \mathbf {f} _ {t} ^ {i} / m ^ {i}, \quad \mathbf {v} _ {t + 1} ^ {i} = \mathbf {v} _ {t} ^ {i} + \mathbf {a} _ {t} \Delta t, \quad \mathbf {p} _ {t + 1} ^ {i} = \mathbf {p} _ {t} ^ {i} + \mathbf {v} _ {t + 1} ^ {i} \Delta t, \tag {1}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $m^i$ is the mass of the recipient of the force $\mathbf{f}_t^i$ and $\Delta t$ is the step size of the Euler integrator. Finally, we add Gaussian noise to each trajectory $\{\tau^i\}_{i=1}^N$ , that is, $\mathcal{D} := \{\tilde{\tau}^i\}_{i=1}^N$ where $\tilde{\tau}^i := (\tilde{\mathbf{p}}_1^i, \ldots, \tilde{\mathbf{p}}_T^i)$ , $\tilde{\mathbf{p}}_t^i \sim \mathcal{N}(\mathbf{p}_t^i, \sigma^2)$ and $\sigma$ is the noise level. See appendix A.1 for the details of the complete generative process and illustrative examples.
|
| 97 |
+
|
| 98 |
+
# 3.2 A grammar of Newtonian physics
|
| 99 |
+
|
| 100 |
+
In order to attain good data efficiency, we choose to learn the pairwise force $F(z^{i},\mathbf{s}^{i},z^{j},\mathbf{s}^{j})$ between entities $i$ and $j$ using symbolic search. This approach can be inefficient if the search space of possible functions is too large, or inaccurate if the search space is too small. So, we constrain the function $F$ to be a member of a context-free language with a grammar $\mathcal{G}$ , which we call the grammar of Newtonian physics. We design the grammar to be expressive enough to represent a large variety of potential force laws, while incorporating some simple, general constraints to improve the efficiency of symbolic search. Here we describe $\mathcal{G}$ informally; for the formal description, see figure 11 (appendix A.2).
|
| 101 |
+
|
| 102 |
+
We consider the following terminal nodes in $\mathcal{G}$ : the masses $m_i, m_j$ of the entities, their friction coefficients $\mu_i, \mu_j$ , shapes $s_i, s_j$ , positions $\mathbf{p}_i, \mathbf{p}_j$ , velocities $\mathbf{v}_i, \mathbf{v}_j$ the contact point $\mathbf{c}$ i.e. the position (if any) at which they touch, and finally a set of $K$ learnable constants $\{c_k\}_{k=1}^K$ . In cases of no contact, $\mathbf{c}$ is set as the middle position of the two objects, i.e. $\mathbf{c} = (\mathbf{p}_i + \mathbf{p}_j) / 2$ . We include the operators: $(\cdot)^2$ (square), $+, -, \times, \div, \| \cdot \|_2$ (L2-norm), normalise $(\cdot)$ and project $(\cdot, \cdot)$ , which projects a vector onto the unit ball. $^3$ The grammar also allows forces to be conditioned on a Boolean expression, in order to support conditional forces that only apply when a condition is true, e.g., when two objects collide. We provide $\mathcal{G}$ two primitive functions that encode the output of the perception system: doesCollide for collision detection and isOn to check if an entity is on a surface. These functions output integers 0 or 1. A rule in the grammar then allows a force expression to be multiplied by a conditional, so that BSP can learn expressions that represent when a conditional force should be applied.
|
| 103 |
+
|
| 104 |
+
Naively supporting all possible expressions for any combination of terminals would make SR highly inefficient, and even lead to physically impossible force laws. Therefore, we introduce two simple and general types of prior knowledge inspired by physics: dimensional analysis and translation invariance. Figure 2 shows examples of expressions that are excluded by each. First, inspired by dimensional analysis in natural sciences, where the relations between different units of measurement are tracked (Brescia, 2012), we built the concept of units of measurement into the nonterminals of $\mathcal{G}$ . The units we consider are kilogram ( $Kg$ ) for mass, meter ( $Meter$ ) for distance, and meter per second ( $MeterSec$ ) for speed. With this unit system in place, we only allow addition and subtraction of symbols with the same units, avoiding physically impossible sub-expressions such as $Kg - Meter$ .<sup>4</sup>
|
| 105 |
+
|
| 106 |
+
Importantly, this can lead to force laws with unit Newton $(N)$ .<sup>5</sup> Second, the grammar ensures that all force laws are translation-invariant, that is, independent on the choice of the origin of the reference frame. To do this, the grammar forbids the direct use of absolute positions $\mathbf{p}_i$ , $\mathbf{p}_j$ and $\mathbf{c}$ and only allows their differences to be used expressions.<sup>6</sup>
|
| 107 |
+
|
| 108 |
+
Finally, some care is needed to ensure the grammar is unambiguous. For example, if we used a rule like $\text{Coeff} \rightarrow \text{Coeff} \times \text{Coeff}$ , then the grammar could generate many expressions that redundantly represent the same function. This would make search much more expensive. Instead, we rewrite this rule in an equivalent right-branching way, e.g., $\text{Coeff} \rightarrow \text{BaseCoeff} \times \text{Coeff}$ . This significantly reduces the search space without changing the expressivity of the grammar. Overall, although the grammar puts basic constraints on plausible physical laws, it is still expressive: there are more than 7 million possible trees up to depth 6 while even the expression of universal gravitation has a depth of 7; the number of expressions up to depth 7 in $\mathcal{G}$ is intractable to count.
|
| 109 |
+
|
| 110 |
+
# 3.3 Learning algorithm
|
| 111 |
+
|
| 112 |
+
Following the EM approach, our learning method alternates between an E-step, where object property distributions are estimated given the current forces via approximate inference, and an M-step step, where forces are learned given object property distributions via SR (section 3.3.1). For the E-step, we consider two standard inference options: importance sampling (for any prior) and Hamiltonian Monte Carlo (for continuous priors only). Appendix A.3.1 discusses some details on applying them in BSP. Note appendix A.3 also provides all pseudo-code for algorithms introduced in this section.
|
| 113 |
+
|
| 114 |
+
Implementation In our work, we implement the generative models as probabilistic programs using the Turing probabilistic programming language (Ge et al., 2018) in Julia. As such the E-step is simply done by Turing's built-in samplers. For the M-step, we use the cross-entropy implementation from the ExprOptimization.jl package which allows users to define grammars with intuitive syntax.
|
| 115 |
+
|
| 116 |
+
# 3.3.1 Symbolic regression with bilevel optimization for learnable constants
|
| 117 |
+
|
| 118 |
+
Symbolic regression (SR) is a function approximator that searches over a space of mathematical expressions defined by a context-free grammar (CGF) (Koza, 1994). In our work, we use the cross-entropy method for SR. The method starts with a Probabilistic context-free grammars (PCFG) that assumes a uniform distribution over the production rules (PCFGs extend CFGs by assigning each production rule a probability). At each successive iteration, it samples $n$ trees (up to depth $d$ ) from the current PCFG, evaluates their fitness by a loss function $\mathcal{L}$ , and uses the top- $k$ trees to fit a PCFG via maximum likelihood for the next iteration. This process returns the learned force law at the end of the training. More formally, to learn force laws, we need to find an expression $e \in L(\mathcal{G})$ , where $L(\mathcal{G})$ is the language generated by $\mathcal{G}$ , and values for the learnable constants $c \coloneqq \{c_i\}_{k=1}^3$ that define the force function $\mathbf{f}_{e,c}$ . The loss used by the cross-entropy method involves computing the log-likelihood of the generative model. As the observed trajectory is generated sequentially given an initial state, the computation of the log-likelihood term cannot be parallelized, and can be computationally expensive in practice. Therefore, following Battaglia et al. (2016) and Sanchez-Gonzalez et al. (2019), we use a vectorized version of the log-likelihood that basically performs simulation in each time stamp in parallel $LL(e,c;z,\mathcal{D}) = \sum_{i=1}^{N}\sum_{t=1}^{T-1}\log\mathcal{N}(\tilde{\mathbf{p}}_{t+1}^i;\mathbb{T}(\mathbf{s}_t^i,\mathbf{f}_{e,c,t}^i),\sigma)$ where $\mathbb{T}$ is expanded following equation 1 and $\tilde{\mathbf{s}}_t^i \coloneqq (\tilde{\mathbf{p}}_t^i,\tilde{\mathbf{v}}_t^i)$ . Clearly, $LL$ differs from its sequential corresponding, as the input for the integrator contains noise at each step. However, similar to previous work, we found it is not an issue when learning forces by regression.
|
| 119 |
+
|
| 120 |
+
In order to prevent overfitting by finding over-complex expressions, we add a regularization term - weighted log-probability under a uniform PCFG prior of $\mathcal{G}$ - to the negative log-likelihood; to arrive at our final loss per trajectory $\mathcal{L}(e,c;z,\mathcal{D}) = -LL(e,c;z,\mathcal{D}) + \lambda \log \mathcal{P}_0(e)$ . Here $\mathcal{P}_0$ is the uniform PCFG of $\mathcal{G}$ , and $\lambda$ is the hyper-parameter that controls the regularization. The loss for multiple trajectories is just a summation of $\mathcal{L}$ over individual trajectories. The continuous constants $c$ require care as they can take any value. To handle this, we use bilevel optimization (Dempe, 2002),
|
| 121 |
+
|
| 122 |
+
where the upper-level is the original symbolic regression problem, and the lower-level is an extra optimization for constants. This means we optimize the constants before computing the loss of each candidate tree within the cross-entropy iterations. The defined loss for each expression $e$ in SR is then $\mathcal{L}(e;z,\mathcal{D}) = \mathcal{L}(e,\arg \min_c\mathcal{L}(e,c);z,\mathcal{D})$ . In BSP, we use the L-BFGS optimizer to solve the lower-level optimization.
|
| 123 |
+
|
| 124 |
+
Our way of handling learnable constants is related to other SR methods and bilevel optimization. Traditionally, constants are either randomly generated from a predefined, fixed integer set or a continuous interval, or for evolutionary algorithms, they can be mutated and combined during evolution to produce constants that fit better; such constants are often referred as ephemeral constants (Davidson et al., 2001). Compared to these methods, the benefit of our formulation is that the evaluation of each tree candidate depends on the symbolic form only as the constants are optimized away, making the search more efficient. Note that although the literature has not explicitly considered our way of constant learning as bilevel optimization problem, similar strategies are also used in (Cerny et al., 2008; Kommenda et al., 2013; Quade et al., 2016). In contrast to recent use of bilevel optimization in meta-learning, e.g. (Finn et al., 2017), our method is simpler: As our upper-level optimization is gradient-free, we do not need to pass gradient from the lower-level to the upper-level.
|
| 125 |
+
|
| 126 |
+
# 4 Experiment: Learning force laws in fully observed environment
|
| 127 |
+
|
| 128 |
+
In this section, we evaluate the BSP in a data-limited setting when the properties are fully observed.
|
| 129 |
+
|
| 130 |
+
# Synthetic datasets (SYNTH).
|
| 131 |
+
|
| 132 |
+
We created three synthetic datasets for controlled evaluation: NBODY (n-body simulations with 4 bodies), BOUNCE (bouncing balls) and MAT (mats with friction); see figure 3 for an illustration. NBODY is populated by placing a heavy body with large mass and no velocity at $[0,0]$ , and three other bodies at random positions with random velocities such that,
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
(a) NBODY
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
(b) BOUNCE
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
(c) MAT
|
| 142 |
+
Figure 3: Example scenes from SYNTH. Entities in gray are static.
|
| 143 |
+
|
| 144 |
+
they orbit the heavy body in the middle in the absence of the other two bodies. The gravitational constant is set such that the system is stable for the duration of the simulation. The ground truth force to learn is the gravitational force between bodies. BOUNCE is generated by simulating elastic collisions between balls in a box. The ground truth force to learn is the collision resolution force. MAT simulates friction-based interaction between discs and a mat. We populate this dataset by rolling discs over mats and applying a friction force when they come into contact. We randomized the initial states of the discs as well as the sizes, friction coefficients, and positions of the mats. The ground truth force to learn is the force of friction.
|
| 145 |
+
|
| 146 |
+
All scenes are simulated using a physics engine with a time-discretization of 0.02, for 50 frames. We generate 100 scenes per dataset, and hold-out 20 of them for testing. Appendix C.1 provides the ground truth force expressions used to generate each dataset under our grammar.
|
| 147 |
+
|
| 148 |
+
# 4.1 Data-efficiency: Symbolic vs neural
|
| 149 |
+
|
| 150 |
+
Baselines For the experiments in this section we use four different neural baselines: (i) A specialized instance of the OGN model (Sanchez-Gonzalez et al., 2019) that only outputs the partial derivative of the velocity variable, unlike the original model that also outputs the partial derivative of the position variable. This is because under Newtonian dynamics, the partial derivative of the position variable is simply the velocity. (ii) An Interaction Network (IN) (Battaglia et al., 2016) (iii) A multi-layer perceptron-based force model (MLP (Force)) that directly outputs the force, and (iv) A multi-layer perceptron-based position model (MLP (Position)) that outputs the next position. See appendix C.2 for details of the neural architecture, training and parameterization setup for all the baselines. Lastly, as a reference, we also include the performance of a zero-force baseline $(F_0)$ , which corresponds to
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
(a) NBODY
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
(b) BOUNCE
|
| 157 |
+
Figure 4: Comparison of neural baselines and BSP, using predictive error on held out scenes given varying number of training scenes. Some baselines are not displayed due to very poor performance; see figure 16 in appendix C.4 for the version with all methods displayed.
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
(c) MAT
|
| 161 |
+
|
| 162 |
+
the constant velocity baseline in (Battaglia et al., 2016). Note that all neural baselines as well as BSP are provided with symbolic representations for fair comparisons.
|
| 163 |
+
|
| 164 |
+
In order to compare the symbolic M-step of BSP against the neural baselines in terms of data-efficiency, we report the per-frame prediction accuracy on held-out datasets, as a function of the amount of training data. We use noise-free trajectories in this evaluation. Since the neural baselines cannot be trained if the properties are not fully observed, we provide all properties as observed data. For each dataset, we hold out 20 scenes for evaluation. We randomly shuffle the remaining 80 scenes, and use the first $k$ scenes to fit the models. Because an average person can perform physical learning task similar to the ones we use with fewer than 5 scenes (Ullman et al., 2018), we only vary $k$ from 1 to 10 in our experiments. We use the normalized root mean squared error (nRMSE), per frame per entity, between the predicted location of the entity and its actually location, as the performance metric. We repeat each of the experiments five times with different training set. These results are shown in figure 4, where the line plots are median values and the error bars are the $25\%$ and $75\%$ quantiles. Note that the ground truth force $F^{*}$ has an RMSE of 0 per frame. As can be seen, for most values of $k$ across of the three datasets, the symbolic M-step of BSP is more data-efficient than the neural baselines. The exceptions are in the MAT for $k = 1$ and in the MAT dataset for $k = 2, 3, 4$ . This is likely due to specific bad local minima that may exist in the limited training data.
|
| 165 |
+
|
| 166 |
+
For NBODY and MAT, BSP can find the ground truth force function with 1 scene and 10 scenes respectively. BOUNCE is the only case where our method fails to find the true law within 10 scenes, we include the typical inferred force law in appendix C.3.1 as well the predicated trajectories of some selected scenes for inspection. In appendix C.3.2, we also demonstrate that this inferred force law closely approximates the true force law and so can generalize to other scenes.
|
| 167 |
+
|
| 168 |
+
For BOUNCE, the neural baselines cannot reach the performance of $F_{0}$ even after 10 scenes for training. This is a known issue with neural network approaches when learning collisions, as the inherently sparse nature of the collision interaction does not provide enough training signal (Battaglia et al., 2016). The rank of performance between neural baselines also supports our discussion around figure 1. Object-centric modeling (OGN and IN) tends to have better performance than the rest by decomposing the transition into interaction and dynamics. Predefined dynamics with numerical integration (OGN and MLP force) have better performance than their correspondences with learned dynamics (IN and MLP position) by a notion of "force" (as the Euler integrator is used in this case).
|
| 169 |
+
|
| 170 |
+
In short, the symbolic regression with proper priors that constrain the search space in BSP leads to significantly better performance in terms of data efficiency across the three datasets studied. Together with the performance rank of neural models, our experiments show how different levels and forms of prior help with data-efficient learning.
|
| 171 |
+
|
| 172 |
+
# 4.2 Ablation study of priors in the BSP grammar
|
| 173 |
+
|
| 174 |
+
To demonstrate the impact of the grammar of Newtonian physics on the overall data efficiency of BSP, we consider two ablations of our grammar: (i) $\mathcal{G}_{01}$ , which is $\mathcal{G}$ without the dimensional analysis prior and $\mathcal{G}_{00}$ , which is $\mathcal{G}$ without both priors in (i) and (ii); we also refer to $\mathcal{G}$ as $\mathcal{G}_{11}$ in this section. For reference, for a maximum depth of 5, $\mathcal{G}_{00}$ contains 8,593,200 expressions, $\mathcal{G}_{01}$ contains 7,935,408
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
(a) NBODY
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
(b) BOUNCE (y-axis in log scale)
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
(c) MAT
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
(a) $i = 0\colon F_0 = 0$
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
(b) $i = 1\colon F_1 = F^\dagger$
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
(c) $i = 3\colon F_3\approx F^*$
|
| 193 |
+
Figure 6: Results of the EM algorithm on NBODY. figure 6a to figure 6c shows the posterior of mass for Entity 1 in Scene 1 with the corresponding force function for different EM iteration $i$ . In figure 6b, the force function $F^{\dagger} = 239.99\frac{m_{i}m_{j}}{\|\mathbf{p}_{i} - \mathbf{p}_{j}\|_{2}}\frac{\mathbf{p}_{i} - \mathbf{p}_{j}}{\|\mathbf{p}_{i} - \mathbf{p}_{j}\|_{2}}$ . The constant in figure 6d is $c = 2.04\mathrm{e}3$ .
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Figure 5: Ablation study of priors using predictive error on held out scenes given varying number of training scenes. Comparison between $\mathcal{G}_{11}$ and $\mathcal{G}_{01}$ shows the effect of the dimensional analysis prior and comparison between $\mathcal{G}_{01}$ and $\mathcal{G}_{00}$ shows the effect of the translation invariance prior.
|
| 197 |
+
(d) Expression tree for $F_{3}$
|
| 198 |
+
|
| 199 |
+
expression and $\mathcal{G}_{11}$ contains 75,816. For a maximum depth of 6, $\mathcal{G}_{11}$ contains 771,120, and the number for other variants is intractable.
|
| 200 |
+
|
| 201 |
+
We repeat the experiment from section 4.1 for all three grammar variants, and report the results in figure 5. As can be seen, both priors in BSP's grammar contribute to data efficiency of its M-step while dimensional analysis has more impact. This is aligned with the analysis of the number of expressions per grammar above, showing that data efficiency improves as the number of possible expressions decreases. There is also a case in which the priors do not show advantages: MAT, in which the friction law is simple enough (the shallowest expression among all) thus easy to find even without priors.
|
| 202 |
+
|
| 203 |
+
# 5 Experiment: Learning force laws in partially observed environments
|
| 204 |
+
|
| 205 |
+
We now evaluate BSP's performance in environments with some unknown intrinsic entity properties. We do not consider the neural baselines in this section, as they did not show competitive performance compared to BSP, even in the fully observed environment.
|
| 206 |
+
|
| 207 |
+
# 5.1 EM performance on SYNTH
|
| 208 |
+
|
| 209 |
+
We first demonstrate BSP's ability to jointly learn and reason about the environment by recovering the true force law when some properties are unobserved. As an illustrative example, we use three scenes from the NBODY dataset (with four entities per scene), such that if the true masses are given, the M-step can successfully learn the true force law. We assume that the mass of the heavy entity is known but the masses of other the three, lighter entities are unknown with a uniform prior $\mathcal{U}(0.02, 9)$ . We use the EM algorithm to fit the same generative model that simulates the data using BSP. Figure 6 shows the posterior distribution over mass and the force function at initialization (figure 6a), middle (figure 6b), and convergence (figure 6c). In this run, after 3 iterations, our algorithm successfully recovers the true force function. We repeat this experiment ten times with randomly sampled scenes. For eight of them, BSP successfully recovers the true force law. Appendix D.1.1 provides another demonstrative example on MAT.
|
| 210 |
+
|
| 211 |
+
For a quantitative analysis, we run the EM algorithm on each of the three scenarios from SYNTH with 5 random scenes repeated 5 times. In table 1 we report nRMSE on a fixed testing set of 20
|
| 212 |
+
|
| 213 |
+
<table><tr><td rowspan="2"></td><td colspan="3">NBODY</td><td colspan="3">BOUNCE</td><td colspan="3">MAT</td></tr><tr><td>Median</td><td>25% Q</td><td>75% Q</td><td>Median</td><td>25% Q</td><td>75% Q</td><td>Median</td><td>25% Q</td><td>75% Q</td></tr><tr><td>BSP</td><td>8.05e-1</td><td>7.83e-1</td><td>1.07e0</td><td>3.16e-2</td><td>3.11e-2</td><td>3.94e-2</td><td>5.39e-4</td><td>3.58e-4</td><td>7.63e-4</td></tr><tr><td>F0</td><td></td><td>1.77e0</td><td></td><td></td><td>5.98e-2</td><td></td><td></td><td>1.20e-3</td><td></td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 1: Test predictive performance (nRMSE) on partially observed SYNTH scenarios (using 5 random scenes for training and from 5 different runs). Note BSP consistently beats the constant baseline.
|
| 216 |
+
|
| 217 |
+
scenes. Notice that the performance has a large variance due to the fact that not all randomly selected 5-scene subsets provide enough training signal. In all three scenarios, BSP consistently outperforms the zero force baseline and can successfully recover the true force law in some random subsets.
|
| 218 |
+
|
| 219 |
+
# 5.2 Real world data: Physics 101
|
| 220 |
+
|
| 221 |
+
While SYNTH benchmark is interesting, a strength of our approach is the ability to generalize to real world data. To demonstrate this, we use the PHYS101 dataset (Wu et al., 2016), a dataset of real world physical scenes. We consider two scenarios, FALL and SPRING (shown in figure 7). As BSP works on symbolic inputs, we pre-process raw videos using standard tracking algorithms from OpenCV to extract observations in numerical form; see the appendix for pre-processing details.
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Figure 7: Example frames for FALL (left) and SPRING (right)
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Figure 8: Learned force expression for FALL (left) and SPRING (right)
|
| 228 |
+
|
| 229 |
+
<table><tr><td></td><td>Mode</td><td>25% Q</td><td>75% Q</td></tr><tr><td>BSP</td><td>4.30e-2</td><td>3.45e-2</td><td>5.09e-2</td></tr><tr><td>F0</td><td>5.05e-2</td><td>4.61e-2</td><td>5.49e-2</td></tr><tr><td>BSP</td><td>9.22e-3</td><td>8.25e-3</td><td>9.87e-3</td></tr><tr><td>F0</td><td>2.19e-2</td><td>2.17e-2</td><td>2.21e-2</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Table 2: Test predictive performance (nRMSE) for FALL (top) and SPRING (bottom)
|
| 232 |
+
|
| 233 |
+
FALL We first train BSP on a single scene from FALL, where an object is dropped on a table. A typical force expression that BSP discovers is $c \times m_i \times m_j \times \text{normalize}(\mathbf{p}_i - \mathbf{p}_j)$ , as shown in figure 8, suggesting that BSP is able to learn the correct form of gravitational force law. Here, the direction of $\mathbf{p}_i - \mathbf{p}_j$ points towards the table and $m_j$ is the mass of the table, which together with $c$ , serves as the constant $g$ in $F_g = m_ig$ . In another solution frequently found by BSP, it learns the direction as normalize $(\mathbf{v}_i)$ as the velocity is always downwards. BSP can also learn global forces directly if constant vectors [1, 0] and [0, 1] are provided, which is done in the next section.
|
| 234 |
+
|
| 235 |
+
SPRING After learning the gravity from FALL, in SPRING, assuming that the original length of the spring is known, we train BSP on a single scene to evaluate if it can learn the Hooke's law $F = kx$ . Here $k$ is the tensor coefficient, and $x$ is the displacement of the spring. An example force law that BSP learns in this case is $c \times (\mathrm{norm}(\mathbf{p}_i - \mathbf{p}_j) - l_j) \times (\mathbf{p}_i - \mathbf{p}_j) \div \mathrm{norm}(\mathbf{p}_i - \mathbf{p}_j)$ , as shown in figure 8, clearly suggesting that BSP can learn Hooke's law.
|
| 236 |
+
|
| 237 |
+
Finally, to quantitatively evaluate BSP's performance on PHYS101, we select a fixed set of 4 scenes from each scenario and use 2 for training and 2 for testing, repeating for all the permutations of the 4 scenes. The aggregated test performance in terms of normalized RMSE is given in table 2. BSP only slightly outperforms compared to the zero force baseline on the FALL. This is because the FALL contains only limited number of frames $(< 10)$ and the trajectories do not diverge too far away from what zero force would predict. For SPRING, BSP outperforms the baseline significantly, closely estimating with ground truth time period of the harmonic motion. We also provide qualitative evaluation via a time-series plot of the change of the block's vertical position with time in the appendix.
|
| 238 |
+
|
| 239 |
+
# 5.3 Does BSP perform similarly to humans?
|
| 240 |
+
|
| 241 |
+
In this section, we compare BSP's performance against humans' on the experiment done in (Ullman et al., 2018). For this purpose, we use the ULLMAN dataset from this study, which consists of 60 videos in which a set of discs interact with each other and mats within a bounded area, as exemplified
|
| 242 |
+
|
| 243 |
+
in figure 9. While similar to SYNTH, ULLMAN has a lot more diversity in the scenes (stimulus) and reasoning tasks. The force laws in ULLMAN are similar to those in SYNTH but they have different constants and the scenes are generated from a completely different simulator. In the original experiment, each participant is presented with 5 videos. Each of the videos is from a different "world", such that the object properties (for each color) and force laws are different in every video. For each video, the participant is asked 13 multiple choice questions related to the mass of discs ("Mass"), roughness of mats ("Friction") and types of global ("Global") and pairwise forces ("Pairwise"). For example, "How massive are red objects?" where the options to choose from are "Light", "Medium" and "Heavy". Please refer to appendix D.3 for the complete set of questions and options.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 9: Example from ULLMAN
|
| 247 |
+
|
| 248 |
+
<table><tr><td></td><td>Human</td><td>BSP</td><td>Chance</td></tr><tr><td>Mass</td><td>43%</td><td>40%</td><td>33%</td></tr><tr><td>Friction</td><td>44%</td><td>39%</td><td>33%</td></tr><tr><td>Global</td><td>68%</td><td>55%</td><td>20%</td></tr><tr><td>Pairwise</td><td>62%</td><td>50%</td><td>33%</td></tr></table>
|
| 249 |
+
|
| 250 |
+
Table 3: Accuracy per question category
|
| 251 |
+
|
| 252 |
+
To be consistent with the setup in (Ullman et al., 2018), we assume that the friction and collision forces are known apriori. Thus the goal is to apply BSP on a single scene and infer the properties by learning the expressions for the residual global and pairwise force. The properties to infer are the mass for the discs, the friction coefficients for the mats and a latent property $q$ that controls the pairwise interaction. To accommodate for the global force, we added two constant vectors [1, 0] and [0, 1] to the grammar; properties related to the known forces are also removed from the grammar. In comparison to models studied in Ullman et al. (2018), BSP aims to learn the force expressions explicitly, rather than inferring binary variables to turn on/off predefined force components. Appendix D.3 provides details on the learning tasks and setup of BSP. We perform 3 runs of BSP on each of the 60 scenes and use the learning results to answer the same set of 13 questions presented to participants in (Ullman et al., 2018).
|
| 253 |
+
|
| 254 |
+
Table 3 summarizes the accuracy for humans and BSP on the four question categories. As it can be seen, BSP's performance is worse than that of humans' but convincingly better than chance. Considering the difficulty of inferring 9 properties and learning the targeted force law using only 1 scene, this may not be surprising. There are intriguing similarities between the answers given by BSP and human participants. Both display the same relative order of accuracy across question types "Global" > "Pairwise" > "Friction" > "Mass" while BSP's performance is still inferior to humans'. We hypothesize that humans may have much prior experience with similar physical scenes to answer these questions or they may answer these questions in a different way than explicitly learning the forces. Fully addressing the similarity and difference between BSP and humans requires more analysis that is out of the scope of this paper.
|
| 255 |
+
|
| 256 |
+
# 6 Limitation and Future Work
|
| 257 |
+
|
| 258 |
+
BSP relies on two main assumptions. First, we assume it has access to the grammar of Newtonian physics. However, how this knowledge is acquired, e.g. through evolution, is not addressed. Second, BSP works on symbolic inputs and assumes perceptual modules are given. This requires extra pre-processing steps when applying BSP to perceptual data, e.g. videos from PHYS101 and ULLMAN. BSP's performance also depends on the quality of such pre-processing step. It will be interesting to address these two limitations by formulating a computational framework in which the grammar and the perceptual modules are learned, either in separate phases or jointly with symbolic force learning.
|
| 259 |
+
|
| 260 |
+
# 7 Conclusion
|
| 261 |
+
|
| 262 |
+
We present BSP, a Bayesian approach to learning symbolic physics which, to our knowledge, is the first to combine symbolic learning of physical force laws and statistical learning of unobserved attributes. Our work enables data-efficient symbolic physics learning from partially-observed trajectory data and paves the way for using learnable IPEs in intuitive physics by providing a computational framework to study how humans' iterative reasoning-learning is mentally performed.
|
| 263 |
+
|
| 264 |
+
# Acknowledgment
|
| 265 |
+
|
| 266 |
+
We thank David D. Cox, John Cohn, Masataro Asai, Cole Hurwitz, Seungwook Han and all the reviewers for their helpful feedback and discussions. This work is supported by the DARPA Machine Common Sense (MCS) program.
|
| 267 |
+
|
| 268 |
+
# References
|
| 269 |
+
|
| 270 |
+
Allen, K. R., Smith, K. A., and Tenenbaum, J. B. The tools challenge: Rapid trial-and-error learning in physical problem solving. arXiv preprint arXiv:1907.09620, 2019.
|
| 271 |
+
Amos, B., Dinh, L., Cabi, S., Rothörl, T., Colmenarejo, S. G., Muldal, A., Erez, T., Tassa, Y., de Freitas, N., and Denil, M. Learning awareness models. arXiv preprint arXiv:1804.06318, 2018.
|
| 272 |
+
Baradel, F., Neverova, N., Mille, J., Mori, G., and Wolf, C. Cophy: Counterfactual learning of physical dynamics. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SkeyppEFvS.
|
| 273 |
+
Bates, C., Battaglia, P. W., Yildirim, I., and Tenenbaum, J. B. Humans predict liquid dynamics using probabilistic simulation. In CogSci, 2015.
|
| 274 |
+
Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110(45):18327-18332, 2013.
|
| 275 |
+
Battaglia, P. W., Pascanu, R., Lai, M., Rezende, D., and Kavukcuoglu, K. Interaction networks for learning about objects, relations and physics. arXiv:1612.00222 [cs], dec 2016.
|
| 276 |
+
Bonawitz, E., Ullman, T. D., Bridgers, S., Gopnik, A., and Tenenbaum, J. B. Sticking to the evidence? a behavioral and computational case study of micro-theory change in the domain of magnetism. Cognitive Science, 43(8):e12765, 2019. ISSN 1551-6709. doi: 10.1111/cogs.12765.
|
| 277 |
+
Bramley, N. R., Gerstenberg, T., Tenenbaum, J. B., and Gureckis, T. M. Intuitive experimentation in the physical world. Cognitive psychology, 105:9-38, 2018.
|
| 278 |
+
Breen, P. G., Foley, C. N., Boekholt, T., and Zwart, S. P. Newton vs the machine: solving the chaotic three-body problem using deep neural networks. arXiv:1910.07291 [astro-ph, physics:physics], oct 2019.
|
| 279 |
+
Brescia, F. Fundamentals of Chemistry: A Modern Introduction (1966). Elsevier, 2012.
|
| 280 |
+
Cerny, B. M., Nelson, P. C., and Zhou, C. Using differential evolution for symbolic regression and numerical constant creation. In Proceedings of the 10th annual conference on Genetic and evolutionary computation, GECCO '08, pp. 1195-1202, Atlanta, GA, USA, July 2008. Association for Computing Machinery. ISBN 978-1-60558-130-9. doi: 10.1145/1389095.1389331.
|
| 281 |
+
Chang, M. B., Ullman, T., Torralba, A., and Tenenbaum, J. B. A compositional object-based approach to learning physical dynamics. arXiv preprint arXiv:1612.00341, 2016.
|
| 282 |
+
Cranmer, M., Sanchez-Gonzalez, A., Battaglia, P., Xu, R., Cranmer, K., Spergel, D., and Ho, S. Discovering symbolic models from deep learning with inductive biases. arXiv:2006.11287 [astro-ph, physics:physics, stat], June 2020.
|
| 283 |
+
Davidson, J. W., Savic, D. A., and Walters, G. A. Symbolic and numerical regression: Experiments and applications. In John, R. and Birkenhead, R. (eds.), Developments in Soft Computing, Advances in Soft Computing, pp. 175-182, Heidelberg, 2001. Physica-Verlag HD. ISBN 978-3-7908-1829-1. doi: 10.1007/978-3-7908-1829-1_21.
|
| 284 |
+
Dempe, S. Foundations of bilevel programming. Springer Science & Business Media, 2002.
|
| 285 |
+
Duane, S., Kennedy, A. D., Pendleton, B. J., and Roweth, D. Hybrid Monte Carlo. Physics letters B, 195(2):216-222, 1987.
|
| 286 |
+
Ehrhardt, S., Monszpart, A., Mitra, N. J., and Vedaldi, A. Learning a physical long-term predictor. arXiv preprint arXiv:1703.00247, 2017.
|
| 287 |
+
|
| 288 |
+
Feser, J. K., Chaudhuri, S., and Dillig, I. Synthesizing data structure transformations from input-output examples. ACM SIGPLAN Notices, 50(6):229-239, 2015.
|
| 289 |
+
Finn, C., Abbeel, P., and Levine, S. Model-agnostic meta-learning for fast adaptation of deep networks. arXiv:1703.03400 [cs], July 2017.
|
| 290 |
+
Fragkiadaki, K., Agrawal, P., Levine, S., and Malik, J. Learning visual predictive models of physics for playing billiards. arXiv preprint arXiv:1511.07404, 2015.
|
| 291 |
+
Ge, H., Xu, K., and Ghahramani, Z. Turing: A Language for Flexible Probabilistic Inference. In The International Conference on Artificial Intelligence and Statistics (AISTATS), 2018.
|
| 292 |
+
Gerstenberg, T., Goodman, N. D., Lagnado, D. A., and Tenenbaum, J. B. How, whether, why: Causal judgments as counterfactual contrasts. In CogSci, 2015.
|
| 293 |
+
Grzesczuk, N. and Animator, T. D. H. G. N. Fast neural network emulation and control of physics-based models. Proc. ACM SIGGRAPH '98 (New York, 1998).-ACM Press, pp. 9-20, 1998.
|
| 294 |
+
Hoffman, M. D. and Gelman, A. The no-U-turn sampler: Adaptively setting path Lengths in Hamiltonian Monte Carlo. J. Mach. Learn. Res., 15(1):1593-1623, 2014. URL http://arxiv.org/abs/1111.4246. arXiv: 1111.4246.
|
| 295 |
+
Janner, M., Levine, S., Freeman, W. T., Tenenbaum, J. B., Finn, C., and Wu, J. Reasoning about physical interactions with object-oriented prediction and planning. arXiv:1812.10972 [cs, stat], January 2019.
|
| 296 |
+
Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
|
| 297 |
+
Kipf, T., Fetaya, E., Wang, K.-C., Welling, M., and Zemel, R. Neural relational inference for interacting systems. arXiv preprint arXiv:1802.04687, 2018.
|
| 298 |
+
Kommenda, M., Kronberger, G., Winkler, S., Affenzeller, M., and Wagner, S. Effects of constant optimization by nonlinear least squares minimization in symbolic regression. In Proceedings of the 15th annual conference companion on Genetic and evolutionary computation, GECCO '13 Companion, pp. 1121-1128, Amsterdam, The Netherlands, July 2013. Association for Computing Machinery. ISBN 978-1-4503-1964-5. doi: 10.1145/2464576.2482691.
|
| 299 |
+
Koza, J. R. Genetic programming as a means for programming computers by natural selection. Statistics and Computing, 4(2):87-112, June 1994. ISSN 1573-1375. doi: 10.1007/BF00175355.
|
| 300 |
+
Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40, 2017.
|
| 301 |
+
Neal, R. M. MCMC using Hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011.
|
| 302 |
+
Osera, P.-M. and Zdancewic, S. Type-and-example-directed program synthesis. In Proceedings of the 36th ACM SIGPLAN Conference on Programming Language Design and Implementation, PLDI '15, pp. 619-630, New York, NY, USA, June 2015. Association for Computing Machinery. ISBN 978-1-4503-3468-6. doi: 10.1145/2737924.2738007.
|
| 303 |
+
Quade, M., Abel, M., Shafi, K., Niven, R. K., and Noack, B. R. Prediction of dynamical systems by symbolic regression. Physical Review E, 94(1):012214, July 2016. ISSN 2470-0045, 2470-0053. doi: 10.1103/PhysRevE.94.012214.
|
| 304 |
+
Sanborn, A. N., Mansinghka, V. K., and Griffiths, T. L. Reconciling intuitive physics and newtonian mechanics for colliding objects. Psychological review, 120(2):411, 2013.
|
| 305 |
+
Sanchez-Gonzalez, A., Bapst, V., Cranmer, K., and Battaglia, P. Hamiltonian graph networks with ode integrators. arXiv:1909.12790 [physics], sep 2019.
|
| 306 |
+
Schmidt, M. and Lipson, H. Distilling free-form natural laws from experimental data. Science, 324 (5923):81-85, April 2009. ISSN 0036-8075, 1095-9203. doi: 10.1126/science.1165893.
|
| 307 |
+
|
| 308 |
+
Seo, S., Meng, C., and Liu, Y. Physics-aware difference graph networks for sparsely-observed dynamics. In International Conference on Learning Representations, 2019.
|
| 309 |
+
Smith, K., Mei, L., Yao, S., Wu, J., Spelke, E., Tenenbaum, J., and Ullman, T. Modeling expectation violation in intuitive physics with coarse probabilistic object representations. In Advances in Neural Information Processing Systems, pp. 8983-8993, 2019.
|
| 310 |
+
Spelke, E. S. Core knowledge. American psychologist, 55(11):1233, 2000.
|
| 311 |
+
Spelke, E. S. and Kinzler, K. D. Core knowledge. Developmental science, 10(1):89-96, 2007.
|
| 312 |
+
Udrescu, S.-M. and Tegmark, M. Ai feynman: A physics-inspired method for symbolic regression. Science Advances, 6(16):eaay2631, April 2020. ISSN 2375-2548. doi: 10.1126/sciadv.aay2631.
|
| 313 |
+
Ullman, T. D., Stuhlmüller, A., Goodman, N. D., and Tenenbaum, J. B. Learning physical parameters from dynamic scenes. Cognitive Psychology, 104:57-82, August 2018. ISSN 0010-0285. doi: 10.1016/j.cogpsych.2017.05.006.
|
| 314 |
+
Veerapaneni, R., Co-Reyes, J. D., Chang, M., Janner, M., Finn, C., Wu, J., Tenenbaum, J., and Levine, S. Entity abstraction in visual model-based reinforcement learning. In Conference on Robot Learning, pp. 1439-1456. PMLR, 2020.
|
| 315 |
+
Watters, N., Zoran, D., Weber, T., Battaglia, P., Pascanu, R., and Tacchetti, A. Visual interaction networks: Learning a physics simulator from video. In Advances in neural information processing systems, pp. 4539-4547, 2017.
|
| 316 |
+
Wood, F., Meent, J. W., and Mansinghka, V. A new approach to probabilistic programming inference. In Artificial Intelligence and Statistics, pp. 1024-1032, 2014.
|
| 317 |
+
Wu, J., Yildirim, I., Lim, J. J., Freeman, B., and Tenenbaum, J. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. In Advances in neural information processing systems, pp. 127-135, 2015.
|
| 318 |
+
Wu, J., Lim, J., Zhang, H., Tenenbaum, J., and Freeman, W. Physics 101: Learning physical object properties from unlabeled videos. In Proceedings of the British Machine Vision Conference 2016, pp. 39.1-39.12, York, UK, 2016. British Machine Vision Association. ISBN 978-1-901725-59-9. doi: 10.5244/C.30.39.
|
| 319 |
+
Zheng, D., Luo, V., Wu, J., and Tenenbaum, J. B. Unsupervised learning of latent physical properties using perception-prediction networks. arXiv:1807.09244 [cs, stat], July 2018.
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d2acece87b0fb23900d7167c487527273b995806aa4bbccbe19f90ccc0840f8
|
| 3 |
+
size 254373
|
abayesiansymbolicapproachtoreasoningandlearninginintuitivephysics/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d896687e306b72c12ce850520779529f059e46b6ba2c421b6d54cfa6cdef877
|
| 3 |
+
size 448167
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2088458ff5b68a307593aea9fe1f0021bcc75a6c14eb87c518ec5840c4a1dfb1
|
| 3 |
+
size 82024
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb05b2603b260807df1cfaf3e3d69f16d8e810aab8e3b7c4dffe5db5d37d3e83
|
| 3 |
+
size 101031
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/7e2acf6e-e260-4d15-8094-949aaa3f1fb0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d04420c67e04b5c1cb13cbfdad04eba8b51e3b9d5073055d6eb914d5073a221a
|
| 3 |
+
size 1205454
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/full.md
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Biased Graph Neural Network Sampler with Near-Optimal Regret
|
| 2 |
+
|
| 3 |
+
Qingru Zhang $^{1*}$ David Wipf $^{2}$ Quan Gan $^{2}$ Le Song $^{1,3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Georgia Institute of Technology <sup>2</sup>Amazon Shanghai AI Lab
|
| 6 |
+
|
| 7 |
+
<sup>3</sup>Mohamed bin Zayed University of Artificial Intelligence
|
| 8 |
+
|
| 9 |
+
qingru.zhang@gatech.edu, daviwipf@amazon.com
|
| 10 |
+
|
| 11 |
+
quagan@amazon.com, le.song@mbzuai.ac.ae
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Graph neural networks (GNN) have recently emerged as a vehicle for applying deep network architectures to graph and relational data. However, given the increasing size of industrial datasets, in many practical situations the message passing computations required for sharing information across GNN layers are no longer scalable. Although various sampling methods have been introduced to approximate full-graph training within a tractable budget, there remain unresolved complications such as high variances and limited theoretical guarantees. To address these issues, we build upon existing work and treat GNN neighbor sampling as a multi-armed bandit problem but with a newly-designed reward function that introduces some degree of bias designed to reduce variance and avoid unstable, possibly-unbounded pay outs. And unlike prior bandit-GNN use cases, the resulting policy leads to near-optimal regret while accounting for the GNN training dynamics introduced by SGD. From a practical standpoint, this translates into lower variance estimates and competitive or superior test accuracy across several benchmarks.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Graph convolution networks (GCN) and Graph neural networks (GNN) in general [21, 17] have recently become a powerful tool for representation learning for graph structured data [6, 2, 33]. These neural networks iteratively update the representation of a node using a graph convolution operator or message passing operator which aggregate the embeddings of the neighbors of the node, followed by a non-linear transformation. After stacking multiple graph convolution layers, these models can learn node representations which can capture information from both immediate and distant neighbors.
|
| 20 |
+
|
| 21 |
+
GCNs and variants [32] have demonstrated the start-of-art performance in a diverse range of graph learning problems [21, 17, 3, 30, 13, 15, 23]. However, they face significant computational challenges given the increasing sizes of modern industrial datasets. The multilayers of graph convolutions is equivalent to recursively unfold the neighbor aggregation in a top-down manner which will lead to an exponentially growing neighborhood size with respect to the number of layers. If the graph is dense and scale-free, the computation of embeddings will involve a large portion of the graph even with a few layers, which is intractable for large-scale graph [21, 34].
|
| 22 |
+
|
| 23 |
+
Several sampling methods have been proposed to alleviate the exponentially growing neighborhood sizes, including node-wise sampling [17, 9, 24], layer-wise sampling [8, 37, 20] and subgraph sampling [10, 35, 19]. However, the optimal sampler with minimum variance is a function of the neighbors' embeddings unknown apriori before the sampling and only partially observable for those sampled neighbors. Most previous methods approximate the optimal sampler with a static distribution which cannot reduce variance properly. And most of existing approaches [8, 37, 20, 10, 35, 19]
|
| 24 |
+
|
| 25 |
+
do not provide any asymptotic convergence guarantee on the sampling variance. We are therefore less likely to be confident of their behavior as GNN models are applied to larger and larger graphs. Recently, Liu et al. [24] propose a novel formulation of neighbor sampling as a multi-armed bandit problem (MAB) and apply bandit algorithms to update sampler and reduce variance. Theoretically, they provide an asymptotic regret analysis on sampling variance. Empirically, this dynamic sampler named as BanditSampler is more flexible to capture the underlying dynamics of embeddings and exhibits promising performance in a variety of datasets.
|
| 26 |
+
|
| 27 |
+
However, we will show in Section 2.3 that there are several critical issues related to the numerical stability and theoretical limitations of the BanditSampler [24]. First, the reward function designed is numerically unstable. Second, the bounded regret still can be regarded as a linear function of training horizon $T$ . Third, their analysis relies on two strong implicit assumptions, and does not account for the unavoidable dependency between embedding-dependent rewards and GNN training dynamics.
|
| 28 |
+
|
| 29 |
+
In this paper, we build upon the bandit formulation for GNN sampling and propose a newly-designed reward function that trades bias with variance. In Section 3.1, we highlight that the proposed reward has the following crucial advantages: (i) It is numerically stable. (ii) It leads to a more meaningful notion of regret directly connected to sampling approximation error, the expected error between aggregation from sampling and that from full neighborhood. (iii) Its variation can be formulated by GNN training dynamics. Then in Section 3.2, we clarify how the induced regret is connected to sampling approximation error and emphasize that the bounded variation of rewards is essential to derive a meaningful sublinear regret, i.e., a per-iteration regret that decays to zero as $T$ becomes large. In that sense, we are the first to explicitly account for GNN training dynamic due to stochastic gradient descent (SGD) so as to establish a bounded variation of embedding-dependent rewards, which we present in Section 3.3.
|
| 30 |
+
|
| 31 |
+
Based on that, in Section 4, we prove our main result, namely, that the regret of the proposed algorithm as the order of $(T\sqrt{\ln T})^{2/3}$ , which is near-optimal and manifest that the sampling approximation error of our algorithm asymptotically converges to that of the optimal oracle with the near-fastest rate. Hence we name our algorithm as Thanos from "Thanos Has A Near-Optimal Sampler". Finally, empirical results in Section 5 demonstrate the improvement of Thanos over BanditSampler and others in terms of variance reduction and generalization performance.
|
| 32 |
+
|
| 33 |
+
# 2 Background
|
| 34 |
+
|
| 35 |
+
# 2.1 Graph Neural Networks and Neighbor Sampling
|
| 36 |
+
|
| 37 |
+
Graph Neural Networks. Given a graph $\mathcal{G} = (\mathcal{V},\mathcal{E})$ , where $\mathcal{V}$ and $\mathcal{E}$ are node and edge sets respectively, the forward propagation of a GNN is formulated as $h_{v,t}^{(l + 1)} = \sigma (\sum_{i\in \mathcal{N}_v}a_{vi}h_{i,t}^{(l)}W_t^{(l)})$ for the node $v\in \mathcal{V}$ at training iteration $t$ . Here $h_{i,t}^{(l)}\in \mathbb{R}^d$ is the hidden embedding of node $i$ at the layer $l$ , $h_{i,t}^{(0)} = x_i$ is the node feature, and $\sigma (\cdot)$ is the activation function. Additionally, $\mathcal{N}_v$ is the neighbor set of node $v$ , $D_v = |\mathcal{N}_v|$ is the degree of node $v$ , and $a_{vi} > 0$ is the edge weight between node $v$ and $i$ . And $W_{t}^{(l)}\in \mathbb{R}^{d\times d}$ is the GNN weight matrix, learned by minimizing the stochastic loss $\widehat{\mathcal{L}}$ with SGD. Finally, we denote $\pmb {z}_{i,t}^{(l)} = a_{vi}\pmb{h}_{i,t}^{(l)}$ as the weighted embedding, $[D_v] = \{i|1\leq i\leq D_v\}$ , and for a vector $\pmb {x}\in \mathbb{R}^{d_0}$ , we refer to its 2-norm as $\| \pmb {x}\|$ ; for matrix $W$ , its spectral norm is $\| W\|$ .
|
| 38 |
+
|
| 39 |
+
Neighbor Sampling. Recursive neighborhood expansion will cover a large portion of the graph if the graph is dense or scale-free even within a few layers. Therefore, we consider to neighbor sampling methods which samples $k$ neighbors under the distribution $p_{v,t}^{(l)}$ to approximate $\sum_{i\in \mathcal{N}_v}\pmb {z}_{i,t}^{(l)}$ with this subset $S_{t}$ . We also call $p_{v,t}^{(l)}$ the policy. For ease of notation, we simplify $p_{v,t}^{(l)}$ as $p_t = \{p_{i,t}|i\in \mathcal{N}_v\}$ ; $p_{i,t}$ is the probability of neighbor $i$ to be sampled. We can then approximate $\pmb{\mu}_{v,t}^{(l)} = \sum_{i\in \mathcal{N}_v}\pmb{z}_{i,t}^{(l)}$ with an unbiased estimator $\widehat{\pmb{\mu}}_{v,t}^{(l)} = \frac{1}{k}\sum_{i\in S_t}\pmb{z}_{i,t}^{(l)} / p_{i,t}$ . As it is unbiased, only the variance term $\mathbf{V}_{pt}(\widehat{\pmb{\mu}}_{v,t}^{(l)})$ need to be considered when optimizing the policy $p_t$ . Define the variance term when $k = 1$ as $\mathbf{V}_{p_t}$ . Then following [29], $\mathbf{V}_{p_t}(\widehat{\pmb{\mu}}_{v,t}^{(l)}) = \mathbf{V}_{p_t} / k$ with $\mathbf{V}_{p_t}$ decomposes as $\mathbf{V}_{p_t} = \mathbf{V}_e - \mathbf{V}_c$ . With $\mathbf{V}_e = \sum_{i\in \mathcal{N}_v}\| z_{i,t}^{(l)}\| ^2 /p_{i,t}$ , which is dependent on $p_t$ and thus refereed as the effective variance. And $\mathbf{V}_c = \| \sum_{j\in \mathcal{N}_v}\pmb {z}_{j,t}^{(l)}\| ^2$ is independent on the policy and therefore referred to as constant variance.
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
(a) Adversary Multi-Armed Bandit.
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
(b) Formulate neighbor sampling as a MAB problem.
|
| 46 |
+
Figure 1: Fig. 1a visualizes the pipeline of adversary multi-armed bandit, in which, the reward is prior unknown, non-stationary and only partially observable for the pulled arms. It motivates us to formulate the neighbor sampling as a MAB problem (Fig. 1b).
|
| 47 |
+
|
| 48 |
+
# 2.2 Formulate Neighbor Sampling as Multi-Armed Bandit
|
| 49 |
+
|
| 50 |
+
The optimal policy in terms of reducing the variance $\mathbf{V}_{p_t}$ is given by $p_{i,t}^{*} = \frac{\|\pmb{z}_{i,t}\|}{\sum_{j\in\mathcal{N}_v}\|\pmb{z}_{j,t}\|}$ [29]. However, this expression is intractable to compute for the following reasons: (i) It is only after sampling and forward propagation that we can observe $\pmb{z}_{i,t}^{(l)}$ , and $\pmb{z}_{i,t}^{(l)}$ changes with time along an optimization trajectory with unknown dynamics. (ii) $\pmb{z}_{i,t}^{(l)}$ is only partially observable in that we cannot see the embeddings of the nodes we do not sample. While static policies [17, 8, 37] are capable of dealing with (ii), they are not equipped to handle (i) as required to approximate $p_t^*$ and reduce the sampling variance effectively. In contrast, adversarial MAB frameworks are capable of addressing environments with unknown, non-stationary dynamics and partial observations alike (See Fig.1). The basic idea is that a hypothetical gambler must choose which of $K$ slot machines to play (See Fig. 1a). For neighbor sampling, $K$ is equal to the degree $D_v$ of root node $v$ . At each time step, the gambler takes an action, meaning pulling an arm $I_t\in [K]$ according to his policy $p_t$ , and then receives a reward $r_{I_t}$ . To maximize cumulative rewards, an algorithm is applied to update the policy based on the observed reward history $\{r_{I_\tau}:\tau = 1\dots ,t\}$ .
|
| 51 |
+
|
| 52 |
+
Liu et al. [24] formulate node-wise neighbor sampling as a MAB problem. Following the general strategy from Salehi et al. [29] designed to reduce the variance of stochastic gradient descent, they apply an adversarial MAB to GNN neighbor sampling using the reward
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
r _ {i, t} = - \nabla_ {p _ {i, t}} \mathbf {V} _ {e} (p _ {t}) = \| \boldsymbol {z} _ {i, t} ^ {(l)} \| / p _ {i, t} ^ {2}, \tag {1}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
which is the negative gradient of the effective variance w.r.t. the policy. Since $\mathbf{V}_e(p_t) - \mathbf{V}_e(p_t^*) \leq \langle p_t - p_t^*, \nabla_{p_t} \mathbf{V}_e(p_t) \rangle$ , maximizing this reward over a sequence of arm pulls, i.e., $\sum_{t=1}^{T} r_{I_t, t}$ , is more-or-less equivalent to minimizing an upper bound on $\sum_{t=1}^{T} \mathbf{V}_e(p_t) - \sum_{t=1}^{T} \mathbf{V}_e(p_t^*)$ . The actual policy is then updated using one of two existing algorithms designed for adversarial bandits, namely Exp3 [1] and Exp3.M [31]. Please see Appendix C for details. Finally, Liu et al. [24] prove that the resulting BanditSampler can asymptotically approach the optimal variance with a factor of three:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\sum_ {t = 1} ^ {T} \mathbf {V} _ {e} \left(p _ {t}\right) \leq 3 \sum_ {t = 1} ^ {T} \mathbf {V} _ {e} \left(p _ {t} ^ {*}\right) + 1 0 \sqrt {T D _ {v} ^ {4} \ln \left(D _ {v} / k\right) / k ^ {3}}. \tag {2}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
Critically however, this result relies on strong implicit assumptions, and does not account for the unavoidable dependency between the reward distribution and GNN model training dynamics. We elaborate on this and other weaknesses of the BanditSampler next.
|
| 65 |
+
|
| 66 |
+
# 2.3 Limitation of BanditSampler
|
| 67 |
+
|
| 68 |
+
Updated by Exp3, BanditSampler as described is sufficiently flexible to capture the embeddings' dynamics and give higher probability to ones with larger norm. And the dynamic policy endows it with promising performance on large datasets. Moreover, it can be applied not only to GCN but GAT models [32], where $a_{vi}$ change with time as well. It is an advantage over previous sampling approaches. Even so, we still found several crucial drawbacks of the BanditSampler.
|
| 69 |
+
|
| 70 |
+
Numerical Instability Due to the $p_{i,t}$ in the denominator of (1), the reward of BanditSampler suffers from numerical instability especially when the neighbors with small $p_{i,t}$ are sampled. From Fig. 5a (in Appendix), we can observe that the rewards (1) of BanditSampler range between a large scale. Even though the mean of received rewards is around 2.5, the max of received rewards can attain 1800. This extremely heavy tail distribution forces us to choose a quite small temperature hyperparameter $\eta$ (Algorithm 3 and 5 in Appendix C), resulting in dramatic slowdown of the policy optimization. By
|
| 71 |
+
|
| 72 |
+
contrast, the reward proposed by us in the following section is more numerically stable (See Fig. 5b in Appendix) and possesses better practical interpretation (Fig. 2c).
|
| 73 |
+
|
| 74 |
+
Limitation of Existing Regret and Rewards There are two types of regret analyses for bandit algorithms [1, 4]: (i) the weak regret with a static oracle given by $\widehat{\mathcal{R}}(T) = \max_{j \in [D_v]} (\sum_{t=1}^{T} \mathbb{E}[r_{j,t}]) - \sum_{t=1}^{T} \mathbb{E}[r_{I_t,t}]$ , which measures performance relative pulling the single best arm; and (ii) the worst-case regret with a dynamic oracle given by $\mathcal{R}(T) = \sum_{t=1}^{T} \max_{j \in [D_v]} \mathbb{E}[r_{j,t}] - \sum_{t=1}^{T} \mathbb{E}[r_{I_t,t}]$ , where the oracle can pull the best arm at each $t$ . When the growth of the regret as a function of $T$ is sublinear, the policy is long-run average optimal, meaning the long-run average performance converges to that of the oracle. But from this perspective, the bound from (2) can actually function more like worst-case regret. To see this, note that the scale factor on the oracle variance is 3, which implies that once we subtract $\mathbf{V}_e(p_t^*)$ from the upper bound, the effective regret satisfies $\mathcal{R}(T) \leq 2 \sum_{t=1}^{T} \mathbf{V}_e(p_t^*) + O(\sqrt{T})$ . By substituting $p_t^*$ into $\mathbf{V}_e$ , we obtain $\mathbf{V}_e(p_t^*) = \sum_{i \in \mathcal{N}_v} \sum_{j \in \mathcal{N}_v} \|z_{i,t}^{(l)}\| \|z_{j,t}^{(l)}\|$ , which can be regarded as a constant lower bound given the converged variation of $z_{i,t}$ (Lemma 1). Consequently, the regret is still linear about $T$ . And linear worst-case regret cannot confirm the effectiveness of policy since uniform random guessing will also achieve linear regret.
|
| 75 |
+
|
| 76 |
+
**Crucial Implicit Assumptions** There are two types of adversaries: if the current reward distribution is independent with the previous actions of the player, it is an oblivious adversary; otherwise, it is a non-oblivious adversary [7]. GNN neighbor sampling is apparently non-oblivious setting but it is theoretically impossible to provide any meaningful guarantees on the worst-case regret in the non-oblivious setting (beyond what can be achieved by random guessing) unless explicit assumptions are made on reward variation [4]. BanditSampler [24] circumvents this issue by implicitly assuming bounded variation and oblivious setting (See Appendix H), but this cannot possibly be true since embedding-dependent rewards must depend on training trajectory and previous sampling. In contrast, we are the first to explicitly account for training dynamic in deriving reward variation and further regret bound in non-oblivious setting, and without this consideration no meaningful bound can possibly exist.
|
| 77 |
+
|
| 78 |
+
# 3 Towards a More Meaningful Notion of Regret
|
| 79 |
+
|
| 80 |
+
To address the limitations of the BanditSampler, we need a new notion of regret and the corresponding reward upon which it is based. In this section we motivate a new biased reward function, interpret the resulting regret that emerges, and then conclude by linking with the GCN training dynamics.
|
| 81 |
+
|
| 82 |
+
# 3.1 Rethinking the Reward
|
| 83 |
+
|
| 84 |
+
Consider the following bias-variance decomposition of approximation error:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\mathbb {E} \left[ \| \widehat {\boldsymbol {\mu}} _ {v, t} ^ {(l)} - \boldsymbol {\mu} _ {v, t} ^ {(l)} \| ^ {2} \right] = \| \boldsymbol {\mu} _ {v, t} ^ {(l)} - \mathbb {E} \left[ \widehat {\boldsymbol {\mu}} _ {v, t} ^ {(l)} \right] \| ^ {2} + \mathbf {V} _ {p _ {t}} \left(\widehat {\boldsymbol {\mu}} _ {v, t} ^ {(l)}\right) \triangleq \operatorname {B i a s} \left(\widehat {\boldsymbol {\mu}} _ {v, t} ^ {(l)}\right) + \mathbf {V} _ {p _ {t}} \left(\widehat {\boldsymbol {\mu}} _ {v, t} ^ {(l)}\right).
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
Prior work has emphasized the enforcement of zero bias as the starting point when constructing samplers; however, we will now argue that broader estimators that do introduce bias should be reconsidered for the following reasons: (i) Zero bias itself may not be especially necessary given that even an unbiased $\widehat{\pmb{\mu}}_{v,t}^{(l)}$ will become biased for approximating $h_{v,t}^{(l + 1)}$ once it is passed through the non-linear activation function. (ii) BanditSampler only tackles the variance reduction after enforcing zero bias in the bias-variance trade-off. However, it is not clear that the optimal approximation error must always be achieved via a zero bias estimator, i.e., designing the reward to minimize the approximation error in aggregate could potentially perform better, even if the estimator involved is biased. (iii) Enforcing a unbiased estimator induces other additional complications: the reward can become numerically unstable and hard to bound in the case of a non-oblivious adversary. And as previously argued, meaningful theoretical analysis must account for optimization dynamics that fall under the non-oblivious setting. Consequently, to address these drawbacks, we propose to trade variance with bias by adopting the biased estimator: $\widehat{\pmb{\mu}}_{v,t}^{(l)} = \frac{D_v}{k}\sum_{i\in S_t}\pmb{z}_{i,t}^{(l)}$ and redefine the reward:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
r _ {i, t} = 2 \boldsymbol {z} _ {i, t} ^ {(l) \top} \bar {\boldsymbol {z}} _ {v, t} ^ {(l)} - \left\| \boldsymbol {z} _ {i, t} ^ {(l)} \right\| ^ {2}, \quad \text {w i t h} \quad \bar {\boldsymbol {z}} _ {v, t} ^ {(l)} = \frac {1}{D _ {v}} \boldsymbol {\mu} _ {v, t} ^ {(l)} = \frac {1}{D _ {v}} \sum_ {j \in \mathcal {N} _ {v}} \boldsymbol {z} _ {j, t} ^ {(l)}. \tag {3}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
Equation (3) is derived by weighting the gradient of bias and variance w.r.t. $p_{i,t}$ equally, which we delegate to Appendix. Additionally, because of partial observability, we approximate $\bar{z}^{(l)}$ with $\frac{1}{k}\sum_{i\in S_t}z_{i,t}^{(l)}$ . We also noticed, due to the exponential function from the Exp3 algorithm (see line
|
| 97 |
+
|
| 98 |
+
6, Algorithm 3), the negative rewards of some neighbors will shrink $w_{i,t}$ considerably, which can adversely diminish their sampling probability making it hard to sample these neighbors again. Consequently, to encourage the exploration on the neighbors with negative rewards, we add ReLU function over rewards (note that our theory from Section 4 will account for this change). The practical reward is then formulated as
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\tilde {r} _ {i, t} = \operatorname {R e L U} \left(2 z ^ {(l) \top} \sum_ {j \in \mathcal {S} _ {t}} \frac {1}{k} z _ {j, t} ^ {(l)} - \| z _ {i, t} ^ {(l)} \| ^ {2}\right). \tag {4}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
The intuition of (3) and by extension (4) is that the neighbors whose weighted embeddings $\boldsymbol{z}_{i,t}^{(l)}$ are closer to $\bar{\boldsymbol{z}}_{v,t}^{(l)}$ will be assigned larger rewards (See Fig. 2c). Namely, our reward will bias the policy towards neighbors that having contributed to the accurate approximation instead of ones with large norm as favored by BanditSampler. And in the case of large but rare weighted embeddings far from $\boldsymbol{\mu}_{v,t}^{(l)}$ , BanditSampler tends to frequently sample these large and rare embeddings, causing significant deviations. The empirical evidence is shown in Section 5.3.
|
| 105 |
+
|
| 106 |
+
The reward (3) possesses following practical and theoretical advantages, which will be expanded more in next sections:
|
| 107 |
+
|
| 108 |
+
- Since it is well bounded by $r_{i,t} = \|\bar{\mathbf{z}}_{v,t}^{(l)}\|^2 - \|\mathbf{z}_{i,t}^{(l)} - \bar{\mathbf{z}}_{v,t}^{(l)}\|^2 \leq \|\bar{\mathbf{z}}_{v,t}^{(l)}\|^2$ , the proposed reward is more numerical stable as we show in Fig. 5 (See Appendix).
|
| 109 |
+
- It will incur a more meaningful notion of regret, meaning the regret defined by (3) is equivalent to the gap between the policy and the oracle w.r.t. approximation error.
|
| 110 |
+
- The variation of reward (3) is tractable to bound as a function of training dynamics of GCN in non-oblivious setting, leading to a provable sublinear regret as the order of $(D_v\ln D_v)^{1/3}(T\sqrt{\ln T})^{2/3}$ , which means the approximation error of policy asymptotically converges to the optimal oracle with a factor of one rather than three.
|
| 111 |
+
|
| 112 |
+
# 3.2 Interpreting the Resulting Regret
|
| 113 |
+
|
| 114 |
+
We focus on the worst-case regret in the following analysis. The regret defined by reward (3) is directly connected to approximation error. More specifically, we notice $r_{i,t} = -\| \pmb{z}_{i,t}^{(l)} - \bar{\pmb{z}}_{v,t}^{(l)}\|^2 +\|\bar{\pmb{z}}_{v,t}^{(l)}\|^2$ . Since $\| \bar{\pmb{z}}_{v,t}^{(l)}\|^2$ will be canceled out in $\mathcal{R}(T)$ , we have $\mathcal{R}(T) = \sum_{t = 1}^{T}(\mathbb{E}\| \pmb{z}_{It,t}^{(l)} - \bar{\pmb{z}}_{v,t}^{(l)}\|^2 - \max_{j\in [D_v]}\mathbb{E}\| \pmb{z}_{j,t}^{(l)} - \bar{\pmb{z}}_{v,t}^{(l)}\|^2)$ , where the former term is the expected approximation error of the policy and the latter is that of the optimal oracle. Consequently, the regret defined by (3) is the gap between the policy and the optimal oracle w.r.t. the approximation error.
|
| 115 |
+
|
| 116 |
+
Then we clarify how to bound this regret. The worst-case regret is a more solid guarantee of optimality than the weak regret. Even though some policies can establish the best achievable weak regret $O(\sqrt{T})$ , their worst-case regret still be linear. This is because the gap between static and dynamic oracles can be a linear function of $T$ if there is no constraint on rewards. For example, consider the following worst-case scenario. Given three arms $\{i_1, i_2, i_3\}$ , at every iteration, one of them will be assigned a reward of 3 while the others receive only 1. In that case, consistently pulling any arm will match the static oracle and any static oracle will have a linear gap with the dynamic oracle. Hence it is impossible to establish a sublinear worst-case regret unless additional assumptions are introduced on the variation of the rewards to bound the gap between static and dynamic oracles [4]. Besbes et al. [4] claim that the worst-case regret can be bounded as a function of the variation budget:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\sum_ {t = 1} ^ {T - 1} \sup _ {i \in [ D _ {v} ]} \left| \mathbb {E} \left[ \tilde {r} _ {i, t + 1} \right] - \mathbb {E} \left[ \tilde {r} _ {i, t} \right] \right| \leq V _ {T} \tag {5}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $V_{T}$ is called the variation budget. Then, Besbes et al. [4] derived the regret bound as $\mathcal{R}(T) = O(K\ln K\cdot V_T^{1 / 3}T^{2 / 3})$ for Rexp3. Hence, if the variation budget is a sublinear function of $T$ in the given environment, the worst-case regret will be sublinear as well.
|
| 123 |
+
|
| 124 |
+
To fix the theoretical drawbacks of BanditSampler, we first drop the assumption of oblivious adversary, i.e. considering the dependence between rewards and previous sampling along the training horizon of GCN. Then to bound the variation budget, we account for GCN training dynamic in practically meaningful setting (i.e. no unrealistic assumptions) as described next.
|
| 125 |
+
|
| 126 |
+
# 3.3 Accounting for the Training Dynamic of GCN
|
| 127 |
+
|
| 128 |
+
One of our theoretical contributions is to study the dynamics of embeddings in the context of GNN training optimized by SGD. We present our assumptions as follows:
|
| 129 |
+
|
| 130 |
+
- Lipschitz Continuous Activation Function: $\forall x, y, \| \sigma(x) - \sigma(y) \| \leq C_{\sigma} \| x - y \|$ and $\sigma(0) = 0$ .
|
| 131 |
+
- Bounded Parameters: For any $1 \leq t \leq T$ and $0 \leq l \leq L - 1$ , $\| W_t^{(l)}\| \leq C_\theta$ .
|
| 132 |
+
- Bounded Gradients: For $1 \leq t \leq T$ , $\exists C_g$ , such that $\sum_{l=0}^{L-1} \|\nabla_{W_t^{(l)}} \widehat{\mathcal{L}}\| \leq C_g$ .
|
| 133 |
+
|
| 134 |
+
Besides, given the graph $\mathcal{G}$ and its feature $X$ , since $a_{vi}$ is fixed in GCN, define $C_x = \max_{v \in \mathcal{V}} \|\sum_{i \in \mathcal{N}_v} a_{vi} x_i\|$ . Define $\bar{D} = \max_{v \in \mathcal{V}} D_v$ , $\bar{A} = \max_{v,i} a_{vi}$ , $G = C_\sigma C_\theta \bar{D} \bar{A}$ , and $\Delta_{t,l}^z = \max_{i \in \mathcal{V}} \| z_{i,t+1}^{(l)} - z_{i,t}^{(l)}\|$ . For SGD, we apply the learning rate schedule as $\alpha_t = 1/t$ . The above assumptions are reasonable. The bounded gradient is generally assumed in the non-convex/convex convergence analysis of SGD [25, 28]. And the learning rate schedule is necessary for the analysis of SGD to decay its constant gradient variance [16]. Then we will bound $\Delta_{t,l}^z$ as a function of gradient norm and step size by recursively unfolding the neighbor aggregation.
|
| 135 |
+
|
| 136 |
+
Lemma 1 (Dynamic of Embedding). Based on our assumptions on GCN, for any $i \in \mathcal{V}$ at the layer $l$ , we have:
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\left\| \boldsymbol {z} _ {i, t} ^ {(l)} \right\| \leq C _ {z}, \quad \left| \tilde {r} _ {i, t} \right| \leq C _ {r}, \quad \left| r _ {i, t} \right| \leq C _ {r}, \tag {6}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
where $C_z = G^{l-1} \bar{A} C_\sigma C_\theta C_x$ and $C_r = 3C_z^2$ . Then, consider the training dynamics of GCN optimized by SGD. For any node $i \in \mathcal{V}$ at the layer $l$ , we have
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
\Delta_ {t, l} ^ {z} = \max _ {i \in \mathcal {V}} \left\| \boldsymbol {z} _ {i, t + 1} ^ {(l)} - \boldsymbol {z} _ {i, t} ^ {(l)} \right\| \leq \alpha_ {t} G ^ {l - 1} \bar {A} C _ {\sigma} C _ {x} C _ {g}. \tag {7}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
Lemma 1 is obtained by recursively unfolding neighbor aggregations and training steps, and can be generally applied to any GCN in practical settings. Based on it, we can derive the variation budget of reward (3) and (4) as a function of $\Delta_{t,l}^{z}$ in the non-oblivious setup.
|
| 149 |
+
|
| 150 |
+
Lemma 2 (Variation Budget). Given the learning rate schedule of SGD as $\alpha_{t} = 1 / t$ and our assumptions on the GCN training, for any $T\geq 2$ , any $v\in \mathcal{V}$ , the variation of the expected reward in (3) and (4) can be bounded as:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\sum_ {t = 1} ^ {T} \left| \mathbb {E} \left[ r _ {i, t + 1} \right] - \mathbb {E} \left[ r _ {i, t} \right] \right| \leq V _ {T} = \bar {C} _ {v} \ln T, \quad \sum_ {t = 1} ^ {T} \left| \mathbb {E} \left[ \tilde {r} _ {i, t + 1} \right] - \mathbb {E} \left[ \tilde {r} _ {i, t} \right] \right| \leq V _ {T} = \bar {C} _ {v} \ln T \tag {8}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $\bar{C}_v = 12G^{2(l - 1)}C_\sigma^2 C_x^2\bar{A}^2 C_\theta C_g$
|
| 157 |
+
|
| 158 |
+
The derivation of Lemma 2 is attributed to that our reward variation can be explicitly formulated as a function of embeddings' variation. In contrast, $p_{i,t}$ emerging in the denominator of (1) incurs not only the numerically unstable reward but hardship to bound its variation. More specifically, $p_{i,t}$ is proportional to the summation of observed reward history of neighbor $i$ , which is hard to bound due to the complication to explicitly keep track of overall sampling trajectory as well as its bilateral dependency with $p_{i,t}$ . It is potentially why BanditSampler's regret (2) ignores the dependency between rewards and previous training/sampling steps. On the contrary, our rewards are tractable to bound as a function of embeddings' dynamic in practical non-oblivious setting, leading to a sublinear variation budget (8), and further a solid near-optimal worst-case regret as presented next.
|
| 159 |
+
|
| 160 |
+
# 4 Main Result: Thanos and Near-Optimal Regret
|
| 161 |
+
|
| 162 |
+
# Algorithm 1 Thanos
|
| 163 |
+
|
| 164 |
+
1: Input: $\eta > 0, \gamma \in (0,1), k, T, \Delta_T, \mathcal{G}, X, \{\alpha_t\}_{t=1}^T$ .
|
| 165 |
+
2: Initialize: For any $v \in \mathcal{V}$ , any $i \in \mathcal{N}_v$ , set $p_{i,1} = 1 / D_v$ .
|
| 166 |
+
3: for $t = 1,2,\ldots ,T$ do
|
| 167 |
+
4: Reinitialize the policy every $\Delta_T$ steps: $\forall v\in \mathcal{V},\forall i\in \mathcal{N}_v$ , set $p_{i,t} = 1 / D_v$
|
| 168 |
+
5: Sample $k$ neighbors with $p_t$ and estimate $\pmb{\mu}_{v,t}^{(l)}$ with the estimator $\widehat{\pmb{\mu}}_{v,t}^{(l)} = \frac{D_v}{k}\sum_{i\in S_t}\pmb{z}_{i,t}^{(l)}$ .
|
| 169 |
+
6: Forward GNN model and calculate the reward $r_{i,t}$ according to (4).
|
| 170 |
+
7: Update the policy and optimize the model following [24] using $\eta$ , $\gamma$ , and $\{\alpha_t\}_{t=1}^T$ .
|
| 171 |
+
8: end for
|
| 172 |
+
|
| 173 |
+
Algorithm 1 presents the condensed version of our proposed algorithm. See Algorithm 2 in Appendix B for the detailed version. Besides the trade-off between bias and variance, and exploration and exploitation, our proposed algorithm also accounts for a third trade-off between remembering and forgetting: given the non-stationary reward distribution, while keeping track of more observations
|
| 174 |
+
|
| 175 |
+
can decrease the variance of reward estimation, the non-stationary environment implies that "old" information is potentially less relevant due to possible changes in the underlying rewards. The changing rewards give incentive to dismiss old information, which in turn encourages exploration. Therefore, we apply Rexp3 algorithm [4] to tackle the trade-off between remembering and forgetting by reinitializing the policy every $\Delta_T$ steps (line 4 in Algorithm 1).
|
| 176 |
+
|
| 177 |
+
Then, we present our main result: bounding the worst-case regret of the proposed algorithm:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\mathcal {R} (T) = \sum_ {t = 1} ^ {T} \sum_ {i \in \mathcal {N} _ {k} ^ {*}} \mathbb {E} \left[ r _ {i, t} \right] - \sum_ {t = 1} ^ {T} \sum_ {I _ {t} \in \mathcal {S} _ {t}} \mathbb {E} ^ {\pi} \left[ r _ {I _ {t}, t} \right]. \tag {9}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
where $\mathcal{N}_k^* = \operatorname{argmax}_{\mathcal{N}_k \subset \mathcal{N}_v} \sum_{i \in \mathcal{N}_k} \mathbb{E}[r_{i,t}]$ , $|\mathcal{N}_k^*| = k$ . Because we consider the non-oblivious adversary, $\mathbb{E}[r_{i,t}]$ is taken over the randomness of rewards caused by the previous history of randomized arm pulling. $\mathbb{E}^\pi[r_{I_t,t}]$ is taken over the joint distribution $\pi$ of the action sequence $(S_1, S_2, \ldots, S_T)$ .
|
| 184 |
+
|
| 185 |
+
Theorem 3 (Regret Bound). Consider Algorithm 1 as the neighbor sampling algorithm for training GCN. Given either (3) or (4) as reward function, we can bound its regret as follows. Let $\Delta_T = (\bar{C}_v\ln T)^{-\frac{2}{3}}(D_v\ln D_v)^{\frac{1}{3} T^{\frac{2}{3}}}$ , $\eta = \sqrt{\frac{2k\ln(D_v / k)}{C_r(\exp(C_r) - 1)D_vT}}$ , and $\gamma = \min\{1, \sqrt{\frac{(\exp(C_r) - 1)D_v\ln(D_v / k)}{2kC_rT}}\}$ . Given the variation budget in (8), for every $T \geq D_v \geq 2$ , we have the regret bound for either (3) or (4) as
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\mathcal {R} (T) \leq \bar {C} \left(D _ {v} \ln D _ {v}\right) ^ {\frac {1}{3}} \cdot \left(T \sqrt {\ln T}\right) ^ {\frac {2}{3}}. \tag {10}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
where $\bar{C}$ is a absolute constant independent with $D_v$ and $T$ .
|
| 192 |
+
|
| 193 |
+
The obtained regret is as the order of $(D_v\ln D_v)^{1/3}(T\sqrt{\ln T})^{2/3}$ . According to Theorem 1 in [4], the worst-case regret of any policy is lower bounded by $O((D_vV_T)^{1/3}T^{2/3})$ , suggesting our algorithm is near-optimal (with a modest $\ln T$ factor from optimal). In that sense, we name our algorithm as Thanos from "Thanos Has A Near-Optimal Sampler."
|
| 194 |
+
|
| 195 |
+
The near-optimal regret from Theorem 3 can be obtained due to the following reasons: (i) Our proposed reward leads to a more meaningful notion of regret which is directly connected to approximation error. (ii) Its variation budget is tractable to be formulated by the dynamic of embeddings. (iii) We explicitly study training dynamic of GCN to bound embeddings' dynamic by recursively unfolding the neighbor aggregation and training steps in the practical setting.
|
| 196 |
+
|
| 197 |
+
As mentioned in Section 3.2, the regret based on rewards (3) is equivalent to approximation error. The result of Theorem 3 says the approximation error of Thanos asymptotically converges to that of the optimal oracle with the near-fastest convergence rate. In the case of enforcing zero bias like BanditSampler, sampling variance is the exact approximation error. However, even if we ignore other previously-mentioned limitations, its regret (2) suggests the approximation error of their policy asymptotically converges to three (as opposed to one) times of the oracle's approximation error, so the regret is still linear. We compare the existing theoretical convergence guarantees in Table 1.
|
| 198 |
+
|
| 199 |
+
Table 1: Comparison of existing asymptotic convergence guarantees.
|
| 200 |
+
|
| 201 |
+
<table><tr><td></td><td>Dyanmic policy</td><td>Convergence analysis</td><td>Theory accounts for practical training</td><td>Bound reward var- iation explicitly</td><td>Sublinear gap to the optimal oracle</td><td>Stable re- ward/policy</td></tr><tr><td>Uniform policy</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>✓</td></tr><tr><td>BanditSampler</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>X</td></tr><tr><td>Thanos</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 202 |
+
|
| 203 |
+
# 5 Experiments
|
| 204 |
+
|
| 205 |
+
We describe the experiments to verify the effectiveness of Thanos and its improvement over Bandit-Sampler in term of sampling approximation error and final practical performance.
|
| 206 |
+
|
| 207 |
+
# 5.1 Illustrating Policy Differences via Synthetic Stochastic Block Model Data
|
| 208 |
+
|
| 209 |
+
As mentioned in Section 3.1, our reward will bias to sample the neighbors having contributed to accurate approximation. Fig. 2c is the visualization of this intuition: after setting $\bar{z}_{v,t} = (1,1)^{\top}$ ,
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
(a) Average $p_{\mathrm{intra}}$ among $\mathcal{V}_1\cap \mathcal{V}_{\mathrm{train}}$
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
(b) Test Accuracy
|
| 216 |
+
Figure 2: Figs. 2a and 2b illustrate the policy difference and compare their practical performance via cSBM synthetic data. Fig. 2c plot our reward function (3) after setting $\bar{z}_{v,t}^{(l)} = (1,1)^{\top}$ .
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(c) Visualization of our reward (3)
|
| 220 |
+
|
| 221 |
+
the reward inside the dashed circle is positive; otherwise negative. And the embeddings closer to $\overline{z}_{v,t}$ will have larger rewards. In order to understand how this bias differentiates the policy of two samplers given different distribution of features and edges, we propose to use cSBM[14, 11] to generate synthetic graphs. We consider a cSBM [14] with two classes, whose node set $\mathcal{V}_1$ and $\mathcal{V}_2$ have 500 nodes. The node features are sampled from class-specific Gaussians $N_1$ , $N_2$ . We set feature size to 100, average degree $2\bar{d} = 20$ , $k = 10$ , and $\mu = 1$ , and we note that $\mu$ controls the difference between two Gaussian's mean [14]. The average number of inter-class and intra-class edges per node is $\bar{d} - \lambda \bar{d}^{1/2}$ and $\bar{d} + \lambda \bar{d}^{1/2}$ respectively. Then, we scale down the node features of $\mathcal{V}_1$ by 0.1 to differentiate the distribution of feature norm and test the sampler's sensitivity to it. The configuration of training and samplers is same as Section 5.4 and listed in Appendix.
|
| 222 |
+
|
| 223 |
+
In the case of cSBMs, an ideal sampler should sample more intra-class neighbors than inter-class neighbors to get linear-separable embeddings and better classification. Thus, we inspect for each $v$ the $k$ neighbors having the top- $k$ highest sampling probability, and compute the ratio of intra-class neighbors among them, i.e. $p_{\mathrm{intra}} = \sum_{i \in \mathcal{N}_v} \mathbf{1}\{(y_i = y_v) \cap (p_{i,t} \text{ is top-} k)\} / k$ . We report the average of $p_{\mathrm{intra}}$ for $\mathcal{V}_1 \cap \mathcal{V}_{\mathrm{train}}$ versus $t$ in Fig. 2a. For the scaled community $\mathcal{V}_1$ , Thanos will be biased to sample more intra-class neighbors due to the intuition explained by Fig. 2c, leading to more accurate approximation and improvement on test accuracy over BanditSampler as shown in Fig. 3a and 2b. This claim holds true under different edge distributions ( $\lambda \in \{0.5, 1, 1.5\}$ ). We additionally report the results on unscaled $\mathcal{V}_2$ for comparison in Appendix.
|
| 224 |
+
|
| 225 |
+
# 5.2 Evaluating the Sampling Approximation Error
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
(a) Comparison on cSBM data.
|
| 229 |
+
Figure 3: Compare the approximation error between two samplers in cSBM synthetic graphs (Fig. 3a) and training of GCN (Fig. 3b) and GAT (Fig. 3c) on Cora. In Fig. 3b and 3c, negative values indicate that Thanos has a lower approximation error than BanditSampler.
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
(b) GCN on CoraFull.
|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
(c) GAT on CoraFull.
|
| 236 |
+
|
| 237 |
+
We numerically compare the approximation error between two samplers in the training of GCN and GAT on Cora dataset from Kipf and Welling [21] as well as cSBM synthetic data in Section 5.1. At each iteration, given a batch of nodes $\mathcal{V}_L$ at the top layer, we perform sampling with BanditSampler and Thanos respectively, getting two subgraphs $\mathcal{G}_{bs}$ and $\mathcal{G}_{our}$ . For Cora, we perform forward propagation on the original graph $\mathcal{G}$ as well as $\mathcal{G}_{bs}$ and $\mathcal{G}_{our}$ respectively with the same model parameters $\{W_t^{(l)}\}_{l\in [L]}$ , and we get the accurate $\pmb{\mu}_{v,t}^{(1)}$ of the first layer aggregation as well as its estimated values $\widehat{\pmb{\mu}}_{v,t}^{(bs)}$ and $\widehat{\pmb{\mu}}_{v,t}^{(our)}$ from both samplers. We compute $\mathrm{dist}_{bs} = \sum_{v\in \mathcal{V}_L}\| \widehat{\pmb{\mu}}_{v,t}^{(bs)} - \pmb{\mu}_{v,t}^{(1)}\|$ and $\mathrm{dist}_{our} = \sum_{v\in \mathcal{V}_L}\| \widehat{\pmb{\mu}}_{v,t}^{(our)} - \pmb{\mu}_{v,t}^{(1)}\|$ . We set $k = 2$ , $\gamma = 0.1$ , $\eta = 0.1$ for Thanos, $\eta = 0.01$ for BanditSampler (since its unstable rewards require smaller $\eta$ ), $\Delta_T = 200$ , $\alpha_t = 0.001$ , $L = 2$ and the dimension of hidden embeddings $d = 16$ . Fig. 3 plots the mean and the standard deviation of $\Delta_{\mathrm{dist}} = \sum_{t=1}^{T}\mathrm{dist}_{our} - \sum_{t=1}^{T}\mathrm{dist}_{bs}$ with 10 trials. The mean curves of both GCN and GAT are
|
| 238 |
+
|
| 239 |
+
below zero, suggesting Thanos establishes lower approximation error in practice. For cSBM synthetic graphs, we follow the setting as Section 5.1, compare two samplers under different edge distributions $(\lambda \in \{0.5, 1, 1.5\})$ and directly plot $\sum_{t} \mathrm{dist}_{bs}$ (blue) and $\sum_{t} \mathrm{dist}_{our}$ (red). From Fig. 3a, we know Thanos achieves quite lower approximation error and higher test accuracy (Fig. 2b) in the setting of less inter-edges (e.g. $\lambda = 1$ or 1.5) due to the intuition manifested by Fig. 2c, whereas BanditSampler is biased to sample large-norm neighbors, resulting in high approximation error and degenerated performance. For small $\lambda = 0.5$ , the almost-equal number of inter/intra edges will shift $\mu_{v,t}$ to the unscaled community $\mathcal{V}_2$ . Hence two samplers' approximation error are close.
|
| 240 |
+
|
| 241 |
+
# 5.3 Sensitivity to Embedding Norms
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
(a) Times of sampling corrupted nodes.
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
(b) Test Accuracy
|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
(c) Test Acc. vs. sample size $k$ .
|
| 251 |
+
Figure 4: Fig. 4a plots the average number of times the corrupted/rescaled nodes were sampled per epoch by both samplers on corrupted CoraFull. And Fig. 4b compares their corresponding test accuracy, suggesting the performance of BanditSampler will be degenerate by its sensitivity of embedding norm. Fig. 4c shows the ablation study on sample size $k$ with ogbn-arxiv.
|
| 252 |
+
|
| 253 |
+
Previously, we claim that BanditSampler will bias policy to the neighbors with large norm, potentially leading to severe deviation from $\mu_{v,t}$ as well as a drop of performance. In this section, we present the evidence on CoraFull [5] with corrupted features and demonstrate that our algorithm resolves this issue. For CoraFull, we randomly corrupt 800 (roughly $5\%$ of) training nodes by multiplying their features by 40. We run both samplers 300 epochs with the corrupted CoraFull and count the total number of times that these corrupted nodes were sampled per epoch. We set $k = 3$ , $\eta = 1$ , $\gamma = 0.2$ and the other hyperparameters the same as Section 5.4. We repeat 5 trials for each algorithm and report the average over epochs and trials. We also record the test accuracy with the best validation accuracy in each trial and report its mean across trials. From Fig. 4a, we can tell BanditSampler biases to corrupted nodes, degenerating its performance more as shown in Fig. 4b.
|
| 254 |
+
|
| 255 |
+
# 5.4 Accuracy Comparisons across Real-World Benchmark Datasets
|
| 256 |
+
|
| 257 |
+
We conduct node classification experiments on several benchmark datasets with large graphs: ogbn-arxiv, ogbn-products [18], CoraFull, Chameleon [11] and Squirrel [27]. The models include GCN and GAT. For GCN, we compare the test accuracy among Thanos, BanditSampler, GraphSage[17], LADIES[37], GraphSaint[35], ClusterGCN[10] and vanilla GCN. For GAT, we compare test accuracy among Thanos, BanditSampler and vanilla GAT. The experimental setting is similar with Liu et al. [24]. The dimension of hidden embedding $d$ is 16 for Chameleon and Squirrel, 256 for the others. The number of layers is fixed as 2. We set $k = 3$ for CoraFull; $k = 5$ for ogbn-arxiv, Chameleon, Squirrel; $k = 10$ for ogbn-products. We searched the learning rate among $\{0.001, 0.002, 0.005, 0.01\}$ and found 0.001 optimal. And we set the penalty weight of $l_2$ regularization 0.0005 and dropout rate 0.1. We do grid search for sampling hyperparameters: $\eta, \gamma$ , and $\Delta_T$ and choose optimal ones for each. Their detailed settings and dataset split are listed in Appendix. Also we apply neighbor sampling for test nodes for all methods, which is consistent with prior LADIES and GraphSaint experiments, and is standard for scalability in practical setting. From Table 2, we can tell our algorithm achieves superior performance over BanditSampler for training GAT, and competitive or superior performance for training GCN.
|
| 258 |
+
|
| 259 |
+
# 5.5 Sample Size Ablation
|
| 260 |
+
|
| 261 |
+
To verify the sensitivity of Thanos w.r.t. sample size $k$ , we compare the test accuracy between Thanos and BanditSampler as sample size $k$ increases on Ogbn-arxiv. The other hyperparameter setting is the same as Section 5.4. We compare two samplers with $k = 3, k = 5, k = 10, k = 15$ . The result from Fig. 4c suggests Thanos still exhibits a mainfest improvement over BanditSampler as $k$ increases.
|
| 262 |
+
|
| 263 |
+
Table 2: Test accuracy. $\times$ means the program crashed after a few epochs due to the massive memory cost and segmentation fault in TensorFlow. Bold indicates first; red second.
|
| 264 |
+
|
| 265 |
+
<table><tr><td rowspan="2" colspan="2">Methods</td><td colspan="5">Test Accuracy</td></tr><tr><td>Chameleon</td><td>Squirrel</td><td>Ogbn-arxiv</td><td>CoraFull</td><td>Ogbn-products</td></tr><tr><td rowspan="7">GCN</td><td>Vanilla GCN</td><td>0.518(±0.021)</td><td>0.327(±0.023)</td><td>0.659(±0.004)</td><td>0.565(±0.004)</td><td>×</td></tr><tr><td>GraphSage</td><td>0.559(±0.013)</td><td>0.385(±0.007)</td><td>0.652(±0.005)</td><td>0.554(±0.004)</td><td>0.753(±0.002)</td></tr><tr><td>LADIES</td><td>0.547(±0.008)</td><td>0.338(±0.021)</td><td>0.651(±0.003)</td><td>0.564(±0.001)</td><td>0.673(±0.004)</td></tr><tr><td>GraphSaint</td><td>0.525(±0.022)</td><td>0.352(±0.007)</td><td>0.565(±0.002)</td><td>0.583(±0.003)</td><td>0.746(±0.005)</td></tr><tr><td>ClusterGCN</td><td>0.577(±0.022)</td><td>0.391(±0.015)</td><td>0.575(±0.004)</td><td>0.390(±0.005)</td><td>0.746(±0.014)</td></tr><tr><td>BanditSampler</td><td>0.578(±0.016)</td><td>0.383(±0.005)</td><td>0.652(±0.005)</td><td>0.555(±0.009)</td><td>0.754(±0.007)</td></tr><tr><td>Thanos</td><td>0.607(±0.012)</td><td>0.401(±0.013)</td><td>0.663(±0.006)</td><td>0.574(±0.010)</td><td>0.759(±0.001)</td></tr><tr><td rowspan="3">GAT</td><td>Vanilla GAT</td><td>0.558(±0.009)</td><td>0.339(±0.011)</td><td>0.682(±0.005)</td><td>0.519(±0.012)</td><td>×</td></tr><tr><td>BanditSampler</td><td>0.602(±0.005)</td><td>0.386(±0.006)</td><td>0.675(±0.002)</td><td>0.544(±0.002)</td><td>0.756(±0.001)</td></tr><tr><td>Thanos</td><td>0.620(±0.014)</td><td>0.412(±0.003)</td><td>0.680(±0.001)</td><td>0.559(±0.011)</td><td>0.759(±0.002)</td></tr></table>
|
| 266 |
+
|
| 267 |
+
# 6 Related Work
|
| 268 |
+
|
| 269 |
+
Hamilton et al. [17] initially proposed to uniformly sample subset for each root node. Many other methods extend this strategy, either by reducing variance [9], by redefining neighborhoods [34] [36] [22], or by reweighting the policy with MAB [24] and reinforcement learning [26]. Layer-wise sampling further reduces the memory footprint by sampling a fixed number of nodes for each layer. Recent layer-wise sampling approaches include [8] and [37] that use importance sampling according to graph topology, as well as [20] and [12] that also consider node features. Moreover, training GNNs with subgraph sampling involves taking random subgraphs from the original graph and apply them for each step. Chiang et al. [10] partitions the original graph into smaller subgraphs before training. Zeng et al. [35] and Hu et al. [19] samples subgraphs in an online fashion. However, most of them do not provide any convergence guarantee on the sampling variance. We are therefore less likely to be confident of their behavior as GNN models are applied to larger and larger graphs.
|
| 270 |
+
|
| 271 |
+
# 7 Conclusion
|
| 272 |
+
|
| 273 |
+
In this paper, we build upon bandit formulation for GNN sampling and propose a newly-designed reward function that introduces some degree of bias to reduce variance and avoid numerical instability. Then, we study the dynamic of embeddings introduced by SGD so that bounding the variation of our rewards. Based on that, we prove our algorithm incurs a new-optimal regret. Besides, our algorithm named Thanos addresses another trade-off between remembering and forgetting caused by the non-stationary rewards by employing Rexp3 algorithm. The empirical results demonstrate the improvement of Thanos over BanditSampler in term of approximation error and test accuracy.
|
| 274 |
+
|
| 275 |
+
# Acknowledgements
|
| 276 |
+
|
| 277 |
+
We would like to thank Amazon Web Service for supporting the computational resources, Hanjun Dai for the extremely helpful discussion, and the anonymous reviewers for providing constructive feedback on our manuscript.
|
| 278 |
+
|
| 279 |
+
# References
|
| 280 |
+
|
| 281 |
+
[1] P. Auer, N. Cesa-Bianchi, Y. Freund, and R. E. Schapire. The nonstochastic multiarmed bandit problem. SIAM journal on computing, 32(1):48-77, 2002.
|
| 282 |
+
[2] P. W. Battaglia, J. B. Hamrick, V. Bapst, A. Sanchez-Gonzalez, V. Zambaldi, M. Malinowski, A. Tacchetti, D. Raposo, A. Santoro, R. Faulkner, et al. Relational inductive biases, deep learning, and graph networks. arXiv preprint arXiv:1806.01261, 2018.
|
| 283 |
+
[3] R. v. d. Berg, T. N. Kipf, and M. Welling. Graph convolutional matrix completion. arXiv preprint arXiv:1706.02263, 2017.
|
| 284 |
+
[4] O. Besbes, Y. Gur, and A. Zeevi. Stochastic multi-armed-bandit problem with non-stationary rewards. In Advances in neural information processing systems, pages 199–207, 2014.
|
| 285 |
+
|
| 286 |
+
[5] A. Bojchevski and S. Gunnemann. Deep gaussian embedding of graphs: Unsupervised inductive learning via ranking. arXiv preprint arXiv:1707.03815, 2017.
|
| 287 |
+
[6] M. M. Bronstein, J. Bruna, Y. LeCun, A. Szlam, and P. Vandergheynst. Geometric deep learning: going beyond euclidean data. IEEE Signal Processing Magazine, 34(4):18-42, 2017.
|
| 288 |
+
[7] S. Bubeck and N. Cesa-Bianchi. Regret analysis of stochastic and nonstochastic multi-armed bandit problems. arXiv preprint arXiv:1204.5721, 2012.
|
| 289 |
+
[8] J. Chen, T. Ma, and C. Xiao. Fastgcn: fast learning with graph convolutional networks via importance sampling. arXiv preprint arXiv:1801.10247, 2018.
|
| 290 |
+
[9] J. Chen, J. Zhu, and L. Song. Stochastic training of graph convolutional networks with variance reduction. In Proceedings of the 35th International Conference on Machine Learning, pages 942-950, 2018.
|
| 291 |
+
[10] W.-L. Chiang, X. Liu, S. Si, Y. Li, S. Bengio, and C.-J. Hsieh. Cluster-gcn: An efficient algorithm for training deep and large graph convolutional networks. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 257-266, 2019.
|
| 292 |
+
[11] E. Chien, J. Peng, P. Li, and O. Milenkovic. Adaptive universal generalized pagerank graph neural network. In International Conference on Learning Representations. https://openreview.net/forum, 2021.
|
| 293 |
+
[12] W. Cong, R. Forsati, M. Kandemir, and M. Mahdavi. Minimal variance sampling with provable guarantees for fast training of graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1393-1403, 2020.
|
| 294 |
+
[13] H. Dai, B. Dai, and L. Song. Discriminative embeddings of latent variable models for structured data. In International conference on machine learning, pages 2702-2711. PMLR, 2016.
|
| 295 |
+
[14] Y. Deshpande, S. Sen, A. Montanari, and E. Mossel. Contextual stochastic block models. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. URL https://proceedings.neurips.cc/paper/2018/file/08fc80de8121419136e443a70489c123-Paper.pdf.
|
| 296 |
+
[15] A. Fout, J. Byrd, B. Shariat, and A. Ben-Hur. Protein interface prediction using graph convolutional networks. In Advances in Neural Information Processing Systems, pages 6530-6539, 2017.
|
| 297 |
+
[16] R. Ge, S. M. Kakade, R. Kidambi, and P. Netrapalli. The step decay schedule: A near optimal, geometrically decaying learning rate procedure for least squares. arXiv preprint arXiv:1904.12838, 2019.
|
| 298 |
+
[17] W. Hamilton, Z. Ying, and J. Leskovec. Inductive representation learning on large graphs. In Advances in neural information processing systems, pages 1024-1034, 2017.
|
| 299 |
+
[18] W. Hu, M. Fey, M. Zitnik, Y. Dong, H. Ren, B. Liu, M. Catasta, and J. Leskovec. Open graph benchmark: Datasets for machine learning on graphs. arXiv preprint arXiv:2005.00687, 2020.
|
| 300 |
+
[19] Z. Hu, Y. Dong, K. Wang, and Y. Sun. Heterogeneous graph transformer. In Proceedings of The Web Conference 2020, pages 2704-2710, 2020.
|
| 301 |
+
[20] W. Huang, T. Zhang, Y. Rong, and J. Huang. Adaptive sampling towards fast graph representation learning. In Advances in neural information processing systems, pages 4558-4567, 2018.
|
| 302 |
+
[21] T. N. Kipf and M. Welling. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017.
|
| 303 |
+
[22] A. Li, Z. Qin, R. Liu, Y. Yang, and D. Li. Spam review detection with graph convolutional networks. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management, pages 2703-2711, 2019.
|
| 304 |
+
|
| 305 |
+
[23] Z. Liu, C. Chen, X. Yang, J. Zhou, X. Li, and L. Song. Heterogeneous graph neural networks for malicious account detection. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pages 2077-2085, 2018.
|
| 306 |
+
[24] Z. Liu, Z. Wu, Z. Zhang, J. Zhou, S. Yang, L. Song, and Y. Qi. Bandit samplers for training graph neural networks. In H. Larochelle, M. Ranzato, R. Hadsell, M. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020. URL https://proceedings.neurips.cc/paper/2020/bit/4cea2358d3cc5f8cd32397ca9bc51b94-Abstract.html.
|
| 307 |
+
[25] Y. Nesterov. Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media, 2003.
|
| 308 |
+
[26] J. Oh, K. Cho, and J. Bruna. Advancing graphsage with a data-driven node sampling. arXiv preprint arXiv:1904.12935, 2019.
|
| 309 |
+
[27] H. Pei, B. Wei, K. C.-C. Chang, Y. Lei, and B. Yang. Geom-gcn: Geometric graph convolutional networks. arXiv preprint arXiv:2002.05287, 2020.
|
| 310 |
+
[28] S. J. Reddi, A. Hefny, S. Sra, B. Poczos, and A. Smola. Stochastic variance reduction for nonconvex optimization. In International conference on machine learning, pages 314-323. PMLR, 2016.
|
| 311 |
+
[29] F. Salehi, L. E. Celis, and P. Thiran. Stochastic optimization with bandit sampling. arXiv preprint arXiv:1708.02544, 2017.
|
| 312 |
+
[30] M. Schlichtkrull, T. N. Kipf, P. Bloem, R. Van Den Berg, I. Titov, and M. Welling. Modeling relational data with graph convolutional networks. In European semantic web conference, pages 593-607. Springer, 2018.
|
| 313 |
+
[31] T. Uchiya, A. Nakamura, and M. Kudo. Algorithms for adversarial bandit problems with multiple plays. In International Conference on Algorithmic Learning Theory, pages 375-389. Springer, 2010.
|
| 314 |
+
[32] P. Velicković, G. Cucurull, A. Casanova, A. Romero, P. Lio, and Y. Bengio. Graph attention networks. arXiv preprint arXiv:1710.10903, 2017.
|
| 315 |
+
[33] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, and S. Y. Philip. A comprehensive survey on graph neural networks. IEEE transactions on neural networks and learning systems, 2020.
|
| 316 |
+
[34] R. Ying, R. He, K. Chen, P. Eksombatchai, W. L. Hamilton, and J. Leskovec. Graph convolutional neural networks for web-scale recommender systems. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 974–983, 2018.
|
| 317 |
+
[35] H. Zeng, H. Zhou, A. Srivastava, R. Kannan, and V. Prasanna. GraphSAINT: Graph sampling based inductive learning method. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=BJe8pkHFwS.
|
| 318 |
+
[36] C. Zhang, D. Song, C. Huang, A. Swami, and N. V. Chawla. Heterogeneous graph neural network. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, page 793–803, 2019.
|
| 319 |
+
[37] D. Zou, Z. Hu, Y. Wang, S. Jiang, Y. Sun, and Q. Gu. Layer-dependent importance sampling for training deep and large graph convolutional networks. In Advances in Neural Information Processing Systems, pages 11249-11259, 2019.
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65aa9cd66485df93006e80c3a776aa502028fcf402d65370701bfa49efbc8d18
|
| 3 |
+
size 361666
|
abiasedgraphneuralnetworksamplerwithnearoptimalregret/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:321f8e3e3187bb4b5d62bc5750d831523f92d3cae2503d808bfb23247d589b5a
|
| 3 |
+
size 527036
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3765a88f526c8032f4902f3a706275d900fb6e5b2146e71a024a7f5c4571e2f7
|
| 3 |
+
size 92984
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88ae7addb2c6951a433e10edd33e53e85fdc90495b39308b29b087c8bed2efbb
|
| 3 |
+
size 119185
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/89571d73-fc11-46e1-8e19-aebbd46f5ab5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a82d13edfc78d58b337f538e3200c2cef2cf7f6fd946de9e864f56b80498906a
|
| 3 |
+
size 547005
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/full.md
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Bi-Level Framework for Learning to Solve Combinatorial Optimization on Graphs
|
| 2 |
+
|
| 3 |
+
Runzhong Wang $^{1*}$
|
| 4 |
+
Feng Qi $^{2}$
|
| 5 |
+
Zhigang Hua $^{2}$
|
| 6 |
+
Shuang Yang $^{2}$
|
| 7 |
+
Gan Liu $^{2}$
|
| 8 |
+
Jiayi Zhang $^{1}$
|
| 9 |
+
Junchi Yan $^{1(\boxtimes)}$ †
|
| 10 |
+
|
| 11 |
+
$^{1}$ Department of CSE and MoE Key Lab of AI, Shanghai Jiao Tong University $^{2}$ Ant Group {runzhong.wang,zhangjiayirr,yanjunchi,xkyang}@sjtu.edu.cn {z.hua,liugan/lg,feng.qi,shuang.yang,jun.zhoujun}@antgroup.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Combinatorial Optimization (CO) has been a long-standing challenging research topic featured by its NP-hard nature. Traditionally such problems are approximately solved with heuristic algorithms which are usually fast but may sacrifice the solution quality. Currently, machine learning for combinatorial optimization (MLCO) has become a trending research topic, but most existing MLCO methods treat CO as a single-level optimization by directly learning the end-to-end solutions, which are hard to scale up and mostly limited by the capacity of ML models given the high complexity of CO. In this paper, we propose a hybrid approach to combine the best of the two worlds, in which a bi-level framework is developed with an upper-level learning method to optimize the graph (e.g. add, delete or modify edges in a graph), fused with a lower-level heuristic algorithm solving on the optimized graph. Such a bi-level approach simplifies the learning on the original hard CO and can effectively mitigate the demand for model capacity. The experiments and results on several popular CO problems like Directed Acyclic Graph scheduling, Graph Edit Distance and Hamiltonian Cycle Problem show its effectiveness over manually designed heuristics and single-level learning methods. Code available at https://github.com/Thinklab-SJTU/PP0-BiHyb.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Combinatorial Optimization (CO) is a family of long-standing optimization problems. A large portion of CO problems is NP-hard due to the combinatorial nature, raising challenges for traditional (exact) solvers on even medium-sized problems. Heuristic algorithms are often adopted to approximately solve CO problems within an acceptable time, and there is a growing trend adopting modern data-driven approaches to solve CO problems that achieve better and faster results [30].
|
| 20 |
+
|
| 21 |
+
The major line of works solving CO with machine learning (ML) is single-level [8, 25, 29, 30, 39, 41, 57, 59, 64], where the prediction of ML module lies in the solution space, assuming the model has enough capacity learning the input-output mapping of the CO problem. However, achieving such an assumption is non-trivial, leading to the following two aspects of challenges. On the one hand, it is challenging to design a model with enough capacity with limited computational resources, and existing models are usually tailored for specific problems which require heavy trail-and-error [25, 57, 59]. On the other hand, training such a heavy model requires either supervision from high-quality labels [31, 57, 60] which are infeasible to obtain for large-sized problems due to the NP-hard nature, or reinforcement learning (RL) [8, 30, 38, 39] which might be unstable due to the challenges of large action space and sparse reward especially for large-sized problems [52].
|
| 22 |
+
|
| 23 |
+
An alternative approach resorts to a hybrid machine learning and traditional optimization pipeline [11, 18, 26, 31, 51, 60, 61] hoping to utilize the power of traditional optimization methods. However, designing a general hybrid MLCO approach is still non-trivial, as existing methods [11, 60] usually require domain-specific knowledge for the model design. It is again challenging to obtain high-quality supervision labels, and existing methods are based on either problem-specific surrogate labels [18, 31, 60], or learned with RL while the challenges of RL still exist [11, 51].
|
| 24 |
+
|
| 25 |
+
In this paper, we propose a general hybrid MLCO approach over graphs. We first reduce the complexity of deep learning model by reformulating the original CO into a bi-level optimization, whose objective is to minimize the long-term upper-level objective by optimizing the graph structure, and the lower-level problem is handled by an existing heuristic. We resort to RL and the traditional heuristic can be absorbed as part of the environment, and it is shown that the sparse reward issue is mitigated for the resulting RL problem compared to previous RL-based methods [8, 30, 38, 39]. Specifically, our model is built with standard building blocks: the input graph is encoded by Graph Convolutional Network (GCN) [32], and the actor and critic modules are based on ResNet blocks [22] and attention models [55]. All modules are learned with the Proximal Policy Optimization (PPO) algorithm [48]. The contributions of this paper include:
|
| 26 |
+
|
| 27 |
+
- To combine the best of the two worlds, we propose a general hybrid approach that integrates traditional heuristic solvers with machine learning algorithm.
|
| 28 |
+
- We propose a bi-level optimization formulation for learning to solve CO on graphs. The upper-level optimization adopts a reinforcement learning agent to adaptively modify the graphs, while the lower-level optimization involves traditional learning-free heuristics to solve combinatorial optimization tasks on the modified graphs. Our approach does not require ground truth labels.
|
| 29 |
+
- The experiments for graphs up to thousands of nodes on several popular tasks such as Directed Acrylic Graph scheduling (DAG scheduling), Graph Edit Distance (GED), and Hamiltonian Cycle Problem (HCP) show that our method notably surpasses both traditional learning-free heuristics and single-level learning method. Our method generalizes well to graphs of different sizes while having comparable overhead to the single-level learning methods.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Here we discuss related works listed in recent surveys [4, 62, 63]. We present their methodologies and compare with our method.
|
| 34 |
+
|
| 35 |
+
Learning the end-to-end solution as a sequence. The major line of existing MLCO works mainly focus on tackling the problem end-to-end by predicting a sequence of solutions [25, 29, 30, 35, 39, 41, 57, 59, 64]. The pioneering work [57] is designated for traveling salesman problem (TSP), whereby the sequence-to-sequence Pointer Network (PtrNet) model is learned with supervised learning. In [30], a graph-to-sequence framework is proposed with graph embedding [10] and Q-learning [40] for general CO over graphs, and this general framework inspires the major line of MLCO works with applications to DAG scheduling [39], graph matching [36], and job shop scheduling [64]. [35] extends [30] by problem reduction and supervised learning-based tree search. The framework in [30] is treated as the RL baseline in this paper. Our approach differs from these single-level end-to-end RL methods [30, 36, 38, 39], which lack the flexibility to borrow the power of traditional methods and often suffer from the fundamental issues in RL: sparse reward and large action space.
|
| 36 |
+
|
| 37 |
+
Learning to rewrite end-to-end solutions. Another line of end-to-end learning works predicts rewriting strategies of an existing solution [8, 38], where the model predictions also lie in the solution space. The agents learn to improve an existing solution in a decision sequence, and problems like job scheduling, expression simplification and routing problems have been studied. These learning-based local search heuristics also fall within the single-level paradigm, while our method incorporates another optimization over the graph structure. The sparse reward issue also exists for [8, 38], as a long episode is usually required for searching for a satisfying result.
|
| 38 |
+
|
| 39 |
+
One-shot unsupervised end-to-end learning. There is also a growing trend applying one-shot unsupervised methods for the input-output mapping of CO, with maximum clique and graph cut solved by [29], and quadratic assignment problem solved by [59]. However, the generalization ability of these methods is still an open question, and complicated constraints are often non-trivial to be
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 1: An overview of our bi-level hybrid MLCO solver. The graph structure is optimized at the upper level by an RL agent, and the optimized graphs are solved by heuristics at the lower level. The actions can be any modifications of the edges (i.e. adding, deleting edges and modifying edge attributes), and edge deletion is presented in this example.
|
| 43 |
+
|
| 44 |
+
encoded in the network's prediction. Moreover, such a one-shot end-to-end network often calls for much higher model capacity compared with our multi-round alternating optimization paradigm.
|
| 45 |
+
|
| 46 |
+
Hybrid of machine learning and traditional solvers. Different from learning the solution end-to-end, researchers also propose hybrid machine learning and traditional solver approaches. ML modules are studied as sub-routines for traditional solvers, especially predicting branching strategies for branch-and-bound with either supervised learning [18, 31] or reinforcement learning [51]. In [60], the heuristic routine in $\mathbf{A}^*$ algorithm is replaced by graph neural network to solve graph edit distance. However, these methods are tailored for special problems, and our aim is to develop a more general framework, where the learning part and heuristic module are two peers alternatively performed.
|
| 47 |
+
|
| 48 |
+
Bi-level optimization. Our method is based on bi-level optimization, which is a family of optimization problems where a lower-level optimization is nested inside an upper-level optimization. Bi-level optimization is in general NP-hard [27, 56], and the applications of bi-level optimization can be found ranging from multi-player games [27] to vision tasks [37], and there is a loosely relevant attempt [2] adopting supervised learning to solve a bi-level optimization for transportation.
|
| 49 |
+
|
| 50 |
+
# 3 Our Approach
|
| 51 |
+
|
| 52 |
+
In this paper, we propose a Bi-level Hybrid (BiHyb) machine learning and traditional heuristic approach. Sec. 3.1 shows both single-level and bi-level formulations of CO, and Sec. 3.2 shows the RL approach to the bi-level CO.
|
| 53 |
+
|
| 54 |
+
# 3.1 Bi-level Reformulation of Combinatorial Optimization
|
| 55 |
+
|
| 56 |
+
Without loss of generality, we consider the classic single-level CO with a single graph $\mathcal{G}$ as:
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\min _ {\mathbf {x}} f (\mathbf {x} \mid \mathcal {G}) \quad s. t. \quad h _ {i} (\mathbf {x}, \mathcal {G}) \leq 0, \text {f o r} i = 1 \dots I \tag {1}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
where $\mathbf{x}$ denotes the decision variable (i.e. solution), $f(\mathbf{x}|\mathcal{G})$ denotes the objective function given input graph $\mathcal{G}$ and $h_i(\mathbf{x},\mathcal{G}) \leq 0$ represents the set of constraints. For example, in DAG scheduling, the constraints enforce that the solution $\mathbf{x}$ , i.e. the execution order of the DAG job nodes, lies in the feasible space and does not conflict the topological dependency structure of $\mathcal{G}$ . The popular framework of existing MLCO methods regards Eq. 1 as a straight-forward end-to-end learning task, and various training methods have been developed, including: 1) supervised learning [41, 57] obtaining training labels by solving small-scaled Eq. 1 with traditional solvers, however it is nearly infeasible to solve larger NP-hard problems; 2) unsupervised learning [29, 59] adopting the continuous relaxation of Eq. 1 as the learning objective, but existing methods face challenges when dealing with complicated constraints; and 3) reinforcement learning [8, 30] by predicting $\mathbf{x}$ sequentially, but the reward signal is unavailable until $\mathbf{x}$ reaches a complete solution, leading to the sparse reward issue.
|
| 63 |
+
|
| 64 |
+
To ease the challenges introduced by the single-level formulation, we resort to the classic idea of modifying the original problem to aid problem solving, e.g. adding cutting planes for integer programming [19, 54]. Our proposed framework is capable of handling CO on graphs if all constraints can be encoded by the graph structure, and our motivation is described by the following hypothesis:
|
| 65 |
+
|
| 66 |
+
Algorithm 1: Policy Roll-out for Bi-level Learning of Hybrid MLCO Solver (BiHyb)
|
| 67 |
+
Input: Original graph $\mathcal{G}$ Max number of actions $K$
|
| 68 |
+
1 $\mathcal{G}^0\gets \mathcal{G}$
|
| 69 |
+
2 for $k\gets 0\dots (K - 1)$ do
|
| 70 |
+
3 Predict $P(\mathbf{a}_1),P(\mathbf{a}_2|\mathbf{a}_1)$ on $\mathcal{G}^k$ and sample $\mathbf{a}_1,\mathbf{a}_2$ ; # upper-level optimization for learning
|
| 71 |
+
4 $\mathcal{G}^{k + 1}\gets$ add, delete or modify the edge $(\mathbf{a}_1,\mathbf{a}_2)$ in $\mathcal{G}^k$ ; # state-transition
|
| 72 |
+
5 $\mathbf{x}^{k + 1}\gets$ solve arg $\min_{\mathbf{x}^{k + 1}}f(\mathbf{x}^{k + 1}|\mathcal{G}^{k + 1})$ by heuristic algorithm; # lower-level optimization
|
| 73 |
+
6 $r_k\gets f(\mathbf{x}^k |\mathcal{G}) - f(\mathbf{x}^{k + 1}|\mathcal{G})$ ; # reward
|
| 74 |
+
Output: A list of rewards $\{r_0\dots r_{K - 1}\}$
|
| 75 |
+
|
| 76 |
+
The optimal solution $\mathbf{x}^*$ to $\mathcal{G}$ can be acquired by modifying $\mathcal{G}$ . And we show the feasibility of this hypothesis for a family of problems by introducing the following proposition:
|
| 77 |
+
|
| 78 |
+
Proposition. We define $\mathbb{G}$ as the set of all graphs that can be modified from $\mathcal{G}$ , and $\mathbb{X}$ as the set of all feasible solutions of $\mathcal{G}$ . If the heuristic algorithm is a surjection from $\mathbb{G}$ to $\mathbb{X}$ , for $\mathcal{G}$ and its optimal solution $\mathbf{x}^*$ , there must exist $\mathcal{G}^* \in \mathbb{G}$ , such that $\mathbf{x}^*$ is the output of the heuristic by solving $\mathcal{G}^*$ .
|
| 79 |
+
|
| 80 |
+
Proof. By the definition of surjection, since $\mathbf{x}^* \in \mathbb{X}$ , there must exist at least one graph $\mathcal{G}^* \in \mathbb{G}$ such that $\mathbf{x}^*$ is the output of the heuristic algorithm by solving $\mathcal{G}^*$ .
|
| 81 |
+
|
| 82 |
+
We take DAG scheduling as an example to clarify this Proposition. Without loss of generality, we define processing the nodes 1 to $n$ sequentially as a feasible solution. Then, we can modify the graph as follows: if the edge connecting $i$ to $i + 1$ does not exist, it is added. After adding all edges from 1 to $n$ , processing the nodes from 1 to $n$ sequentially is the only feasible solution, which is also the output of any heuristic algorithm. The above construction method applies for all solutions in $\mathbb{X}$ .
|
| 83 |
+
|
| 84 |
+
Some Remarks. Our hypothesis and proposition provide the theoretical grounding of developing graph-modification methods to tackle combinatorial problems. It is worth noting that $\mathcal{G}^*$ only suggests that graph modification is a promising direction and finding $\mathcal{G}^*$ given $\mathcal{G}$ is usually NP-hard in practice. In this paper, we propose to improve the solving quality for heuristic algorithms by finding optimized (not necessarily optimal) graphs by learning based on the bi-level reformulation of the original single-level problem. Due to practical reasons, we restrict the max number of modifications.
|
| 85 |
+
|
| 86 |
+
In the bi-level reformulation of the original single-level problem, an optimized graph $\mathcal{G}'$ is introduced:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\begin{array}{l l} \min _ {\mathbf {x} ^ {\prime}, \mathcal {G} ^ {\prime}} f (\mathbf {x} ^ {\prime} | \mathcal {G}) & \text {s . t .} H _ {j} (\mathcal {G} ^ {\prime}, \mathcal {G}) \leq 0, \text {f o r} j = 1 \dots J \\ & \mathbf {x} ^ {\prime} \in \arg \min _ {\mathbf {x} ^ {\prime}} \left\{f \left(\mathbf {x} ^ {\prime} \mid \mathcal {G} ^ {\prime}\right): h _ {i} \left(\mathbf {x} ^ {\prime}, \mathcal {G} ^ {\prime}\right) \leq 0, \text {f o r} i = 1 \dots I \right\} \end{array} \tag {2}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $f(\mathbf{x}'|\mathcal{G})$ , $f(\mathbf{x}'|\mathcal{G}')$ are the objectives for upper- and lower-level problems, respectively. The lower-level problem is the CO given the optimized graph $\mathcal{G}'$ , which is solved by a heuristic algorithm. The solved decision variable $\mathbf{x}'$ is further fed to the upper-level problem, whose objective $f(\mathbf{x}'|\mathcal{G})$ denotes the original CO objective computed by $\mathbf{x}'$ . The upper-level constraints $H_{j}(\mathcal{G}',\mathcal{G})\leq 0$ ensure that the feasible space of $\mathcal{G}'$ is a subset of $\mathcal{G}$ , and $\mathcal{G}'$ has at most $K$ modification steps from $\mathcal{G}$ . The upper-level problem optimizes $\mathcal{G}'$ by an RL agent, by regarding Eq. 2 as the environment.
|
| 93 |
+
|
| 94 |
+
# 3.2 Reinforcement Learning Algorithm
|
| 95 |
+
|
| 96 |
+
We resort to reinforcement learning to optimize $\mathcal{G}'$ in Eq. 2, which can be viewed as a data-driven embodiment of the classic bi-level optimization method by solving two levels of problems alternatively [53]. In Sec. 3.2.1 we present the Markov Decision Process (MDP) formulation of the bi-level optimization in Eq. 2, in Sec. 3.2.2 we describe the PPO learning algorithm in our approach.
|
| 97 |
+
|
| 98 |
+
# 3.2.1 The MDP Formulation
|
| 99 |
+
|
| 100 |
+
Eq. 2 is treated as the learning objective and optimized by RL in a data-driven manner. In this section, we discuss the Markov Decision Process (MDP) formulation for adopting RL to this bi-level optimization problem. The policy roll-out steps are summarized in Alg. 1. In the following, $\mathcal{G}^0$ equals $\mathcal{G}$ meaning the original graph, and $\mathcal{G}^k (k\neq 0)$ equals $\mathcal{G}'$ meaning the modified graph after action $k$ .
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
(a) DAG Scheduling
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
(b) Graph Edit Distance
|
| 107 |
+
Figure 2: Illustration of the state, action and reward for all problems discussed in this paper.
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
(c) Hamiltonian Cycle Problem
|
| 111 |
+
|
| 112 |
+
State. The current graph $\mathcal{G}^k$ is treated as the state, whose nodes and edges encode both the problem input and the current constraints. The starting state $\mathcal{G}^0$ represents the original CO problem.
|
| 113 |
+
|
| 114 |
+
Action. The action is defined as adding, removing or modifying an edge in $\mathcal{G}^k$ . Since there are at most $m^2$ edges if $\mathcal{G}^k$ has $m$ nodes, we shrink the action space to $O(m)$ by decomposing the edge selection as two steps: firstly selecting the starting node, and then selecting the ending node.
|
| 115 |
+
|
| 116 |
+
State transition. After taking an action, $\mathcal{G}^k$ transforms to $\mathcal{G}^{k + 1}$ with one edge modified. The new graph $\mathcal{G}^{k + 1}$ is regarded as the new state and is adopted for reward computation. The episode ends when it reaches the max number of actions $K$ . In our implementation, we empirically set $K\leq 20$ for graphs up to thousands of nodes, therefore the sparse reward issue is mitigated compared to single-level RL methods (20 actions v.s. $1000+$ actions per episode).
|
| 117 |
+
|
| 118 |
+
Reward. The new graph $\mathcal{G}^{k + 1}$ results in a modified lower-level optimization problem whose objective becomes $f(\mathbf{x}^{k + 1}|\mathcal{G}^{k + 1})$ , and $\mathbf{x}^{k + 1}$ is solved by an existing heuristic. The reward is computed as the decrease of upper-level objective function given $\mathbf{x}^{k + 1}$ : reward $= f(\mathbf{x}^k |\mathcal{G}) - f(\mathbf{x}^{k + 1}|\mathcal{G})$
|
| 119 |
+
|
| 120 |
+
# 3.2.2 Proximal Policy Optimization (PPO)
|
| 121 |
+
|
| 122 |
+
We resort to the popular Proximal Policy Optimization (PPO) [48] as the RL framework. PPO is the simplified version of Trust Region Policy Optimization (TRPO) [47] where the model update is restricted within a "trust region" to avoid model collapse. PPO is easier to implement than TRPO with comparative performance [48], maximizing the objective: $J(\theta) = \min(r_{\theta} \cdot A, \text{clip}(r_{\theta}, 1 - \epsilon, 1 + \epsilon) \cdot A)$ where $r_{\theta}$ is the importance sampling ratio parameterized by model parameter $\theta$ , $A$ is the advantage value computed by the discounted accumulative reward minus the critic network prediction, and $\epsilon$ is a hyperparameter controlling the boundary of trust region. Some common policy-gradient training tricks are also adopted: we normalize the accumulated rewards during model update, and we add an entropy regularizer to encourage exploration beyond local optima.
|
| 123 |
+
|
| 124 |
+
# 4 Experiments and Case Studies
|
| 125 |
+
|
| 126 |
+
We show the implementations and experiments on three challenging CO problems: DAG scheduling in Sec. 4.1, graph edit distance (GED) in Sec. 4.2, and Hamiltonian cycle problem (HCP) in Sec. 4.3. Our bi-level RL method PPO-BiHyb is compared with learning-free heuristics, and a single-level RL peer method namely PPO-Single following the general framework [30], which also covers the majority of RL-based methods [15, 25, 33, 36, 39, 64]. We also implement Random-BiHyb which performs random graph modification under our bi-level optimization framework. The model capacity and training/inference time of PPO-Single are kept in line with PPO-BiHyb for fair comparison.
|
| 127 |
+
|
| 128 |
+
# 4.1 Case 1: DAG Scheduling
|
| 129 |
+
|
| 130 |
+
The Directed Acyclic Graph (DAG) is the natural representation of real-world jobs with dependency, and the DAG scheduling problem is the abstraction of parallel job scheduling in computer clusters: each node represents a computation job with a running time and a resource requirement, and the node may have several parents and children representing data dependency. The cluster has limited total resources, and jobs can be executed in parallel if there are enough resources and the concurrent jobs do not have data dependency. Such an optimization problem is in general NP-hard [14], and the objective is to minimize the makespan of all jobs, i.e. finish all jobs as soon as possible.
|
| 131 |
+
|
| 132 |
+
# 4.1.1 Implementation Components
|
| 133 |
+
|
| 134 |
+
MDP Formulation. As shown in Fig. 2(a), state is the current DAG $\mathcal{G}^k$ , action is defined as adding an edge to $\mathcal{G}^k$ , resulting in a new DAG $\mathcal{G}^{k + 1}$ . The added edge enforces additional constraints such that the decision space is cut down to elevate the heuristic's performance. Here $\mathbf{x}$ represents the scheduled execution order, and the Critical Path heuristic is adopted on $\mathcal{G}^{k + 1}$ to compute $\mathbf{x}^{k + 1}$ . The reward is computed by subtracting the previous makespan by the new makespan: $f(\mathbf{x}^k |\mathcal{G}) - f(\mathbf{x}^{k + 1}|\mathcal{G})$
|
| 135 |
+
|
| 136 |
+
State Encoder. We adopt GCN [32] to encode the state represented by DAG. Considering the structure of DAG, we design two GCNs: the first GCN processes the original DAG, and the second GCN processes the DAG with all edges reversed. The node embeddings from two GCN modules are concatenated, and an attention pooling layer is adopted to extract a graph-level embedding.
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\mathbf {n} = \left[ \operatorname {G C N} _ {1} \left(\mathcal {G} ^ {k}\right) \mid \mid \operatorname {G C N} _ {2} (\operatorname {r e v e r s e} \left(\mathcal {G} ^ {k}\right)) \right], \mathbf {g} = \operatorname {A t t} (\mathbf {n}) \tag {3}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
where $\mathbf{n}$ (# of nodes $\times$ embedding dim) is the node embedding, $[\cdot ||\cdot ]$ means concatenation.
|
| 143 |
+
|
| 144 |
+
Actor Net. To reduce action space, the edge selection is decomposed into two steps: first selecting the starting node and then the ending node. The action probabilities of selecting the starting and ending nodes are predicted by two independent 3-layer ResNets blocks [22], respectively, and the input of the second ResNet contains additionally the feature vector of the selected starting node.
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
P \left(\mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\operatorname {R e s N e t} _ {1} ([ \mathbf {n} \mid | \mathbf {g} ])\right), P \left(\mathbf {a} _ {2} \mid \mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\operatorname {R e s N e t} _ {2} ([ \mathbf {n} \mid | \mathbf {n} [ \mathbf {a} _ {1} ] \mid | \mathbf {g} ])\right) \tag {4}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
the subscript of $\mathbf{n}[\mathbf{a}_1]$ denotes the embedding for a node $\mathbf{a}_1$ . For training, we sample $\mathbf{a}_1, \mathbf{a}_2$ according to their probabilities $P(\mathbf{a}_1), P(\mathbf{a}_2|\mathbf{a}_1)$ respectively. For testing, beam search is performed with a width of 3: actions with top-3 highest probabilities are searched, and all searched actions are evaluated by the improvement of makespan, and only the top-3 actions are maintained for the next search step.
|
| 151 |
+
|
| 152 |
+
Critic Net. It is built by max pooling over all node features, and the pooled feature is concatenated with the graph feature from state encoder, and finally processed by another ResNet for value prediction.
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\widetilde {V} \left(\mathcal {G} ^ {k}\right) = \operatorname {R e s N e t} _ {3} \left(\left[ \operatorname {m a x p o o l} (\mathbf {n}) \mid \mid \mathbf {g} \right]\right) \tag {5}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
Heuristic Methods. Solving DAG scheduling with hundreds of nodes is nearly infeasible for existing commercial solvers, and real-world scheduling problems are usually tackled by fast heuristics e.g. Shortest Job First which schedules the shortest job greedily and Critical Path algorithm which prioritizes jobs on the critical path. We also consider the novel Tetris scheduling [20] where jobs are arranged as a Tetris game on the two-dimension space of makespan and
|
| 159 |
+
|
| 160 |
+
resource. Since Critical Path is empirically effective, we use it as the lower level optimization
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
Figure 3: Generalization result on TPC-H dataset. algorithm in our PPO-BiHyb learning method.
|
| 164 |
+
|
| 165 |
+
Learning Methods. There are some efforts to train a scheduler in data centers with RL [39] and learning a job shop scheduler with RL [64]. They can be viewed as embodiments with tailored techniques for specific problems based on the end-to-end single-level pipeline [30]. Since the problem settings in [39, 64] are different with ours, we re-implement the single-level RL baseline PPO-Single following [30], where the model details are in line with our PPO-BiHyb. To validate the effectiveness of PPO, we also compare Random-BiHyb which performs random search instead of PPO when modifying the graph, and its search steps are equal to PPO-BiHyb.
|
| 166 |
+
|
| 167 |
+
# 4.1.2 Experimental Results
|
| 168 |
+
|
| 169 |
+
Results are reported for scheduling jobs from TPC-H dataset<sup>3</sup>, which is composed of business-oriented queries and concurrent data modifications, represented by Directed Acyclic Graphs (DAGs). Each DAG represents a unique computation job, and each node in DAG has two properties: execution time and resource requirement. The DAG in TPC-H dataset has 9.18 nodes in average, and the smallest job has 2 nodes and the largest one has 18 nodes. The average resource requirement is
|
| 170 |
+
|
| 171 |
+
Table 1: Experimental results on DAG scheduling problems from TPC-H dataset with the average number of nodes reported in brackets. The dataset name TPC-H-X means X jobs are jointly scheduled. The upper half is learning-free heuristics, and the lower half is RL-based methods where PPO-Single can be viewed as our implementation of the peer single-level RL methods [39, 64]. Note "objective" means the objective score i.e. the total makespan time (in seconds, the smaller the better) to finish all jobs, and "relative" is computed by the solved makespan time w.r.t. Critical Path heuristic.
|
| 172 |
+
|
| 173 |
+
<table><tr><td rowspan="2">method\dataset</td><td colspan="2">TPC-H-50 (#nodes=467.2)</td><td colspan="2">TPC-H-100 (#nodes=929.8)</td><td colspan="2">TPC-H-150 (#nodes=1384.5)</td></tr><tr><td>objective ↓</td><td>relative ↓</td><td>objective ↓</td><td>relative ↓</td><td>objective ↓</td><td>relative ↓</td></tr><tr><td>Shortest Job First</td><td>12818±2214</td><td>30.5%</td><td>19503±3260</td><td>15.3%</td><td>27409±2748</td><td>12.2%</td></tr><tr><td>Tetris [20]</td><td>12113±1398</td><td>23.3%</td><td>18291±2223</td><td>8.1%</td><td>25325±2842</td><td>3.7%</td></tr><tr><td>Critical Path</td><td>9821±1176</td><td>0.0%</td><td>16914±2499</td><td>0.0%</td><td>24429±2484</td><td>0.0%</td></tr><tr><td>PPO-Single [30, 39]</td><td>10578±2092</td><td>7.7%</td><td>17282±3821</td><td>2.2%</td><td>24822±2707</td><td>1.6%</td></tr><tr><td>Random-BiHyb</td><td>9270±1143</td><td>-5.6%</td><td>15580±2409</td><td>-7.9%</td><td>22930±2408</td><td>-6.1%</td></tr><tr><td>PPO-BiHyb (ours)</td><td>8906±922</td><td>-9.3%</td><td>15193±2275</td><td>-10.2%</td><td>22371±2538</td><td>-8.4%</td></tr></table>
|
| 174 |
+
|
| 175 |
+
125.8, and the minimum is 1 and the maximum is 593. The average task duration is 1127.2 sec, and the minimum is 16.3 sec and the maximum is 4964.5 sec. We jointly schedule multiple DAGs and assume there are 6000 total resources (i.e. the heaviest job node consumes around $10\%$ of total resources), to reflect real-world business scenarios. We build 50 training and 10 testing samples for all experiments, where each sample is composed of a number of DAGs (e.g. 50, 100, 150) which are randomly sampled from the TPC-H dataset with fixed random seed.
|
| 176 |
+
|
| 177 |
+
Table 1 reports the result on scheduling jobs from TPC-H dataset, where our PPO-BiHyb outperforms learning-free heuristics and the single level learning method. We control the beam search width of PPO-Single as 10 allowing its inference time to be $10 \times$ longer than ours, but its performance is still inferior to the heuristic Critical Path method. Random-BiHyb can improve the lower-level heuristic algorithm but its performance is still inferior to PPO-BiHyb, suggesting the effectiveness of both our bi-level optimization framework and PPO training. We also study the generalization of our learning approach, which is critically important for real-world applications. As shown in Fig. 3, our models are learned with TPC-H-50/100/150 datasets, and we report their test results on the unseen datasets ranging from TPC-H-50 to 300 at 50 interval. The plotted results are the improvement of makespan w.r.t. Critical Path. All our learning models generalize well with unseen data, even on larger problems with up to 300 DAGs ( $\sim$ 2800 nodes). The generalization results of PPO-Single are omitted because they are consistently inferior to the Critical Path heuristic. The reason that our method generalizes soundly, in our analysis, is because learning the graph-optimization strategy is easier compared to directly learning the solution, and the strength of heuristics is exploited with our approach.
|
| 178 |
+
|
| 179 |
+
# 4.2 Case 2: Graph Edit Distance
|
| 180 |
+
|
| 181 |
+
The graph edit distance (GED) problem is NP-hard [1] and can be readily used in modeling a family of pattern recognition tasks requiring to measure the similarity between graphs, with applications to drug discovery [43], malware detection [34] and scene graph edition [7]. Given two graphs, the objective is finding the cheapest edit path from one graph to the other, and the costs of edit operations are defined by problem-specific distance metrics. For example, for molecule graphs, we may define an edit cost between different atom types (i.e. different node types), and also an edit cost for creating/removing a chemical bound (i.e. adding/removing an edge).
|
| 182 |
+
|
| 183 |
+
# 4.2.1 Implementation Components
|
| 184 |
+
|
| 185 |
+
Bi-level Optimization Formulation. Since GED involves two graphs, the formulation of the bi-level optimization of GED is a generalization from Eq. 2:
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\begin{array}{l} \min _ {\mathbf {x} ^ {\prime}, \mathcal {G} _ {1} ^ {\prime}} f \left(\mathbf {x} ^ {\prime} \mid \mathcal {G} _ {1}, \mathcal {G} _ {2}\right) \quad s. t. \quad H _ {j} \left(\mathcal {G} _ {1} ^ {\prime}, \mathcal {G} _ {1}\right) \leq 0, \text {f o r} j = 1 \dots J \tag {6} \\ \mathbf {x} ^ {\prime} \in \arg \min _ {\mathbf {x} ^ {\prime}} \left\{f \left(\mathbf {x} ^ {\prime} \mid \mathcal {G} _ {1} ^ {\prime}, \mathcal {G} _ {2}\right): h _ {i} \left(\mathbf {x} ^ {\prime}, \mathcal {G} _ {1} ^ {\prime}, \mathcal {G} _ {2}\right) \leq 0, \text {f o r} i = 1 \dots I \right\} \\ \end{array}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
where $f(\mathbf{x}'|\mathcal{G}_1,\mathcal{G}_2)$ is the upper-level objective: the graph edit cost given $\mathcal{G}_1, \mathcal{G}_2$ and $\mathbf{x}'$ . The decision variable $\mathbf{x}'$ encodes the node editions from $\mathcal{G}_1$ to $\mathcal{G}_2$ , based on which the edge editions can be induced. The upper-level constraints $H_{j}(\mathcal{G}_{1}',\mathcal{G}_{1})\leq 0$ ensure that $\mathcal{G}_1'$ has at most $K$ modification steps from $\mathcal{G}_1$ . The lower-level GED problem is defined between $\mathcal{G}_2$ and the modified $\mathcal{G}_1'$ , where $f(\mathbf{x}'|\mathcal{G}_1',\mathcal{G}_2)$ is the lower-level objective, and $h_i(\mathbf{x}',\mathcal{G}_1',\mathcal{G}_2)\leq 0$ are the constraints of the lower-level GED.
|
| 192 |
+
|
| 193 |
+
MDP Formulation. As shown in Fig. 2(b), since there are two graphs, we discriminate the graphs by subscripts $\mathcal{G}_1, \mathcal{G}_2$ , and the MDP formulation is a generalized version of Sec. 3.2.1. More specifically, the RL agent is restricted to modifying $\mathcal{G}_1$ and keeping $\mathcal{G}_2$ fixed. The state is defined as the current version of first graph $\mathcal{G}_1^k$ , and action is defined as adding or removing an edge from $\mathcal{G}_1^k$ . The new graph $\mathcal{G}_1^{k+1}$ is designed to better align with $\mathcal{G}_2$ , where the node-to-node alignment is encoded by the decision variable $\mathbf{x}^k$ which is obtained by solving $f(\mathbf{x}^k | \mathcal{G}_1^k, \mathcal{G}_2)$ with IPFP heuristic. The upper-level objective $f(\mathbf{x}^k | \mathcal{G}_1, \mathcal{G}_2)$ is computed as the edit distance between the original graphs $\mathcal{G}_1, \mathcal{G}_2$ using $\mathbf{x}^k$ .
|
| 194 |
+
|
| 195 |
+
State Encoder. The state encoder is built with GCN [32] to aggregate graph features and Sinkhorn-Knopp (SK) network [49] for propagation across two graphs. Following [58], the SK module accepts a similarity matrix (by inner-product of $\mathbf{n}_1$ and $\mathbf{n}_2$ ), and outputs a doubly-stochastic matrix which can be utilized as the cross-graph propagation weights. As the action is taken on the first graph, we compute the difference of the node features of graph 1 and the propagated features from graph 2 through SK net. Similar to the DAG model, graph-level features are obtained via attention pooling.
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
\mathbf {n} _ {1} = \operatorname {G C N} \left(\mathcal {G} _ {1} ^ {k}\right), \mathbf {n} _ {2} = \operatorname {G C N} \left(\mathcal {G} _ {2}\right), \mathbf {n} = \mathbf {n} _ {1} - \operatorname {S K} \left(\mathbf {n} _ {1} \mathbf {n} _ {2} ^ {\top}\right) \cdot \mathbf {n} _ {2}; \mathbf {g} _ {1} = \operatorname {A t t} \left(\mathbf {n} _ {1}\right), \mathbf {g} _ {2} = \operatorname {A t t} \left(\mathbf {n} _ {2}\right) \tag {7}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
Actor Net. The action of selecting an edge to add or delete is also decomposed by two node-selection steps. The starting node selection is predicted by 3-layer ResNet module, and the ending node selection is predicted by an attention query. The edge is deleted if it already exists, or added otherwise. The beam width is set as 3 during evaluation.
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
P \left(\mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\operatorname {R e s N e t} (\mathbf {n})\right), P \left(\mathbf {a} _ {2} \mid \mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\mathbf {n} \cdot \tanh \left(\operatorname {L i n e a r} \left(\mathbf {n} [ \mathbf {a} _ {1} ]\right)\right) ^ {\top}\right) \tag {8}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
Critic Net. We follow the graph-wise similarity learning method [3] to implement the critic net, where the graph-level features are processed by a neural tensor network (NTN) [50] followed by 2 fully-connected regression layers whose output is one-dimensional:
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\widetilde {V} \left(\mathcal {G} _ {1} ^ {k}, \mathcal {G} _ {2}\right) = \operatorname {f c} \left(\operatorname {N T N} \left(\mathbf {g} _ {1}, \mathbf {g} _ {2}\right)\right) \tag {9}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
Heuristic Methods. Based on the comprehensive evaluation on different GED heuristics by [5], we select 4 best-performing heuristics: Hungarian [44] which simplifies the original problem to a bipartite matching problem, RRWM [9] which solves GED via relaxed quadratic programming, Hungarian-Search [45] which is a search algorithm guided by Hungarian heuristic, and IPFP [6] which combines searching and quadratic programming. We empirically find IPFP best performs among all heuristics, therefore we base our PPO-BiHyb method on it.
|
| 214 |
+
|
| 215 |
+
Learning Methods. There are efforts to learn graph-wise similarity via deep learning [3, 34], which are regression models and ignores the combinatorial nature of graph similarity problems. [60] proposes neural-guided A* search, however, the learning is supervised. In this paper, we compare with PPO-Single by reimplementing [36] for GED. The model details and RL algorithm are kept in line with our PPO-BiHyb for fair comparison. The beam search width of PPO-Single is set to 20 so that the inference time of PPO-Single is comparable with our hybrid method. We also compare with Random-BiHyb which performs equal step numbers of random search w.r.t. PPO-BiHyb.
|
| 216 |
+
|
| 217 |
+
# 4.2.2 Experimental Results
|
| 218 |
+
|
| 219 |
+
Results on GED are reported on AIDS dataset<sup>4</sup> containing chemical compounds for anti-HIV research [43]. The atoms are treated as nodes, and the atom types are encoded by one-hot features and we define an edit cost of 1 for different node types. Besides, we also define the cost for node/edge addition/deletion as 1. The AIDS dataset is split into three subsets w.r.t. the size of graphs, namely AIDS-20/30, AIDS-30/50, and AIDS-50+, and we exclude graphs smaller than 20 nodes because they are less challenging and can be solved exactly within several hours. We randomly build 50 training and 10 testing samples for all tests with fixed random seed.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 4: Sensitivity by training and testing sizes on AIDS.
|
| 223 |
+
|
| 224 |
+
The evaluation for GED on AIDS is presented in Tab. 2, where our method surpasses all learning-free heuristics and also performs better than single-level RL baseline PPO-Single. We also conduct a
|
| 225 |
+
|
| 226 |
+
Table 2: Results on graph edit distance (GED) problems from AIDS dataset with the average number of nodes reported in brackets. The naming convention here AIDS-X/Y means the number of nodes are within the range of X and Y. PPO-Single can be viewed as our implementation of the peer RL method [36]. Here "objective" means the objective score i.e. the solved edit distance between two graphs, and "relative" is computed by the solved edit distance w.r.t. the best heuristic IPFP.
|
| 227 |
+
|
| 228 |
+
<table><tr><td rowspan="2">dataset method</td><td colspan="2">AIDS-20/30 (#nodes=22.6)</td><td colspan="2">AIDS-30/50 (#nodes=37.9)</td><td colspan="2">AIDS-50+ (#nodes=59.6)</td></tr><tr><td>objective ↓</td><td>relative ↓</td><td>objective ↓</td><td>relative ↓</td><td>objective ↓</td><td>relative ↓</td></tr><tr><td>Hungarian [44]</td><td>72.9±19.2</td><td>94.9%</td><td>153.4±28.0</td><td>117.9%</td><td>225.6±33.9</td><td>121.4%</td></tr><tr><td>RRWM [9]</td><td>72.1±23.7</td><td>92.8%</td><td>139.8±31.9</td><td>98.6%</td><td>214.6±41.3</td><td>110.6%</td></tr><tr><td>Hungarian-Search [45]</td><td>44.6±8.5</td><td>19.3%</td><td>103.9±22.7</td><td>47.6%</td><td>143.8±31.5</td><td>41.1%</td></tr><tr><td>IPFP [6]</td><td>37.4±8.5</td><td>0.0%</td><td>70.4±15.1</td><td>0.0%</td><td>101.9±13.1</td><td>0.0%</td></tr><tr><td>PPO-Single [36]</td><td>56.5±14.4</td><td>51.1%</td><td>110.0±19.2</td><td>56.3%</td><td>183.9±16.9</td><td>80.5%</td></tr><tr><td>Random-BiHyb</td><td>33.1±9.0</td><td>-11.5%</td><td>66.0±15.2</td><td>-6.3%</td><td>82.4±20.3</td><td>-19.1%</td></tr><tr><td>PPO-BiHyb (ours)</td><td>29.1±8.9</td><td>-22.2%</td><td>61.1±14.2</td><td>-13.2%</td><td>77.0±19.4</td><td>-24.4%</td></tr></table>
|
| 229 |
+
|
| 230 |
+
generalization study among different problem sizes, and Fig. 4 shows the objective scores from the sensitivity test for training/testing on GED problems with different sizes. The color map represents the percentage of improvement w.r.t. IPFP, where the darker color means more improvement. Our model can generalize to problem sizes unseen during training, but the generalized performance is usually inferior compared to training and testing with the same problem size.
|
| 231 |
+
|
| 232 |
+
# 4.3 Case 3: Hamiltonian Cycle Problem
|
| 233 |
+
|
| 234 |
+
The Hamiltonian cycle problem (HCP) arises from the notable problem of seven bridges of Königsberg proposed by Leonhard Eule [12]. Given a graph, the HCP arises as a decision problem on whether there exists a Hamiltonian cycle, which is known to be NP-complete [17]. We handle HCP by transforming it into the more general traveling salesman problem (TSP) in a fully-connected graph, where the existing edges in HCP are defined with length 0, and non-existing edges are with length 1. If a tour is found whose length is 0, then it is a Hamiltonian cycle. It is worth noting that HCP has not been discussed by most ML-TSP works [15, 28, 33, 57], which focus on the special case of Euclidean TSP with 2D coordinates.
|
| 235 |
+
|
| 236 |
+
# 4.3.1 Implementation Components
|
| 237 |
+
|
| 238 |
+
MDP Formulation. The HCP instances are converted to TSP as discussed above, to leverage existing powerful TSP heuristics. As shown in Fig. 2(c), state is defined as the current graph $\mathcal{G}^k$ and action is defined as increasing an edge length in $\mathcal{G}^k$ , resulting in a new graph $\mathcal{G}^{k + 1}$ . The increase of edge length softly prohibits the heuristic from traveling this edge and the LKH method [23] is adopted on graph $\mathcal{G}^{k + 1}$ to obtain the new tour $\mathbf{x}^{k + 1}$ . The reward is the decrease of the current tour length w.r.t. the previous tour length: $f(\mathbf{x}^k |\mathcal{G}) - f(\mathbf{x}^{k + 1}|\mathcal{G})$ .
|
| 239 |
+
|
| 240 |
+
State Encoder. We encode HCP graph with GCN [32]. The node embeddings from GCN modules are then processed by an attention pooling layer to extract a graph-level embedding.
|
| 241 |
+
|
| 242 |
+
$$
|
| 243 |
+
\mathbf {n} = \operatorname {G C N} \left(\mathcal {G} ^ {k}\right), \mathbf {g} = \operatorname {A t t} (\mathbf {n}) \tag {10}
|
| 244 |
+
$$
|
| 245 |
+
|
| 246 |
+
Actor Net. After obtaining the tour solved by the heuristic algorithm, we increase the length of any selected edge by 10 from the tour (10 is randomly set, and our approach seems not sensitive to this number), which empirically makes the edge harder to be selected in a tour later. The first action probabilities of selecting the starting node are predicted by a 3-layer ResNet block [22]. The second action is selecting the ending node, which is adjacent to the starting node on the tour and is predicted by an attention query.
|
| 247 |
+
|
| 248 |
+
$$
|
| 249 |
+
P \left(\mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\operatorname {R e s N e t} _ {1} ([ \mathbf {n} | | \mathbf {g} ])\right), P \left(\mathbf {a} _ {2} \mid \mathbf {a} _ {1}\right) = \operatorname {s o f t m a x} \left(\mathbf {n} \cdot \tanh \left(\operatorname {L i n e a r} (\mathbf {n} [ \mathbf {a} _ {1} ])\right) ^ {\top}\right) \tag {11}
|
| 250 |
+
$$
|
| 251 |
+
|
| 252 |
+
For training, we sample $\mathbf{a}_1, \mathbf{a}_2$ according to $P(\mathbf{a}_1), P(\mathbf{a}_2|\mathbf{a}_1)$ . For evaluation, we perform a beam search with a width of 12 and maintain the top-12 actions by the improvement of the LKH algorithm.
|
| 253 |
+
|
| 254 |
+
Critic Net. It is built by max-pooling from all node features, and the pooled feature is concatenated with the graph-level feature from the state encoder, which are finally processed by a 3-layer ResNet.
|
| 255 |
+
|
| 256 |
+
$$
|
| 257 |
+
\widetilde {V} \left(\mathcal {G} ^ {k}\right) = \operatorname {R e s N e t} _ {2} \left(\left[ \operatorname {m a x p o o l} (\mathbf {n}) \mid \mid \mathbf {g} \right]\right) \tag {12}
|
| 258 |
+
$$
|
| 259 |
+
|
| 260 |
+
Heuristic Methods. The performance of heuristics varies with the sizes and characteristics of instances. We choose three algorithms, Nearest Neighbour [42] who greedily travels to the next nearest node, Farthest Insertion [46] who repeatedly insert the non-traveled node with the farthest distance to the existing tour, and the third version of Lin-Kernighan Heuristic (LKH3) [23] which succeeds in discovering the best-known solutions for many TSP instances. For LKH3, we can set the number of random restarts (5 by default) to trade-off time for accuracy. We name the default LKH config as LKH3-fast which is also adopted for lower-level optimization in our PPO-BiHyb for its time-efficiency, and we also compare with LKH3-accu with 100 random restarts.
|
| 261 |
+
|
| 262 |
+
Learning Methods. Most deep learning TSP models are designated to handle fully connected 2D Euclidean TSP [15, 28, 33, 57], which is a different setting compared to our HCP testbed. Therefore, we implement PPO-Single following the most general framework [30], and the model details are kept in line with our PPO-BiHyb. We also compare with the Random-BiHyb baseline.
|
| 263 |
+
|
| 264 |
+
# 4.3.2 Experimental Results
|
| 265 |
+
|
| 266 |
+
Table 3: Tests on FHCP with mean number of nodes in brackets. FHCP-X/Y means the number of nodes is in the range of X and Y.
|
| 267 |
+
|
| 268 |
+
<table><tr><td rowspan="2">dataset method</td><td colspan="2">FHCP-500/600 (#nodes=535.1)</td></tr><tr><td>TSP objective ↓</td><td>found cycles ↑</td></tr><tr><td>Nearest Neighbor [42]</td><td>79.6±13.4</td><td>0%</td></tr><tr><td>Farthest Insertion [46]</td><td>133.0±31.7</td><td>0%</td></tr><tr><td>LKH3-fast [23]</td><td>13.8±25.2</td><td>0%</td></tr><tr><td>LKH3-accu [23]</td><td>6.3±13.0</td><td>20%</td></tr><tr><td>PPO-Single [30]</td><td>9.5±45.6</td><td>0%</td></tr><tr><td>Random-BiHyb</td><td>10.0±21.9</td><td>0%</td></tr><tr><td>PPO-BiHyb (ours)</td><td>6.7±14.0</td><td>25%</td></tr></table>
|
| 269 |
+
|
| 270 |
+
We use the FHCP benchmark [21] composed of 1001 hard HCP instances. All instances are known to have valid Hamiltonian cycles, however, finding them is non-trivial for standard HCP/TSP heuristics. Table 3 shows the result from a subset of FHCP benchmark. As mentioned in Sec. 4.3.1, we convert HCP instances to binary TSP instances, whose tour lengths are denoted as "TSP objective". When the TSP objective is 0, it means that a Hamiltonian cycle is found. We use 50 training instances and 20 testing instances, and the model is trained on graphs of sizes from 250 to 500. Our method is comparative with the novel heuristic LKH3-accu and can even surpass in terms of found Hamiltonian cycles, which is the objective of HCP. We set the beamwidth of PPO-Single as 12 allowing its inference time to be $10 \times$ longer than ours, but its performance is still inferior to our bi-level PPO-BiHyb. The Random-BiHyb baseline also improves the performance of LKH3-fast which is adopted for solving the lower-level problem.
|
| 271 |
+
|
| 272 |
+
# 5 Discussions
|
| 273 |
+
|
| 274 |
+
Limitations in model design. In this paper, we adopt the vanilla GCN implemented by TorchGeometric [13], and the detailed configurations can be found in Appendix C. The main purpose of our model design is to validate the effectiveness of the proposed bi-level optimization framework, and we adopt standard building blocks without heavy engineering, from which perspective we agree that there are room for further improvement. One possible direction may be adopting GNN-neural architecture search methods [16, 65].
|
| 275 |
+
|
| 276 |
+
Potential negative impacts. Our approach may potentially decrease the job opportunities and burden the workload of employees in companies, and it calls for the companies and social groups to take more responsible roles when facing the negative effects that come along with optimization tools.
|
| 277 |
+
|
| 278 |
+
Conclusion. We present a bi-level optimization framework based on hybrid machine learning and traditional heuristics, where the graph structure is optimized by RL to narrow down the feasible space of combinatorial problems. The new optimization problems are solved by fast heuristics. Experiments on large real-world combinatorial problems show the effectiveness of our approach. Our method also shows good generalization ability from small training instances to larger testing instances.
|
| 279 |
+
|
| 280 |
+
# Acknowledgments and Disclosure of Funding
|
| 281 |
+
|
| 282 |
+
This work was partly supported by National Key Research and Development Program of China (2020AAA0107600), Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102), NSFC (U19B2035, 61972250, 72061127003), and Ant Group through Ant Research Program. The author Runzhong Wang was also partly supported by Wen-Tsun Wu Honorary Doctoral Scholarship, AI Institute, Shanghai Jiao Tong University. We would also like to thank Chang Liu, Jia Yan and Runsheng Gan for their valuable discussions when we were working on this paper.
|
| 283 |
+
|
| 284 |
+
# References
|
| 285 |
+
|
| 286 |
+
[1] Z. Abu-Aisheh, R. Raveaux, J.-Y. Ramel, and P. Martineau. An exact graph edit distance algorithm for solving pattern recognition problems. In Int. Conf. Pattern Recog., 2015.
|
| 287 |
+
[2] S. A. Bagloee, M. Asadi, M. Sarvi, and M. Patriksson. A hybrid machine-learning and optimization method to solve bi-level problems. Expert Systems with Applications, 95:142-152, 2018.
|
| 288 |
+
[3] Y. Bai, H. Ding, S. Bian, T. Chen, Y. Sun, and W. Wang. Simgnn: A neural network approach to fast graph similarity computation. In Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining, pages 384-392, 2019.
|
| 289 |
+
[4] Y. Bengio, A. Lodi, and A. Prouvost. Machine learning for combinatorial optimization: a methodological tour d'horizon. European Journal of Operational Research, 2020.
|
| 290 |
+
[5] D. B. Blumenthal, N. Boria, J. Gamper, S. Bougleux, and L. Brun. Comparing heuristics for graph edit distance computation. The VLDB journal, 29(1):419-458, 2020.
|
| 291 |
+
[6] S. Bougleux, B. Gaüzere, and L. Brun. Graph edit distance as a quadratic program. In Int. Conf. Pattern Recog., pages 1701-1706. IEEE, 2016.
|
| 292 |
+
[7] L. Chen, G. Lin, S. Wang, and Q. Wu. Graph edit distance reward: Learning to edit scene graph. Eur. Conf. Comput. Vis., 2020.
|
| 293 |
+
[8] X. Chen and Y. Tian. Learning to perform local rewriting for combinatorial optimization. Neural Info. Process. Systems, 32:6281-6292, 2019.
|
| 294 |
+
[9] M. Cho, J. Lee, and K. M. Lee. Reweighted random walks for graph matching. In *Eur. Conf. Comput. Vis.*, pages 492-505. Springer, 2010.
|
| 295 |
+
[10] H. Dai, B. Dai, and L. Song. Discriminative embeddings of latent variable models for structured data. In Int. Conf. Mach. Learn., pages 2702-2711. PMLR, 2016.
|
| 296 |
+
[11] L. Duan, H. Hu, Y. Qian, Y. Gong, X. Zhang, J. Wei, and Y. Xu. A multi-task selected learning approach for solving 3d flexible bin packing problem. In Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pages 1386-1394, 2019.
|
| 297 |
+
[12] L. Euler. Solutio problematis ad geometriam situs pertinentis. Commentarii academiae scientiarum Petropolitanae, pages 128-140, 1741.
|
| 298 |
+
[13] M. Fey and J. E. Lenssen. Fast graph representation learning with PyTorch Geometric. In ICLR Workshop on Representation Learning on Graphs and Manifolds, 2019.
|
| 299 |
+
[14] A. Forti. DAG Scheduling for Grid Computing Systems. PhD thesis, University of Udine, Italy, 2006.
|
| 300 |
+
[15] Z.-H. Fu, K.-B. Qiu, and H. Zha. Generalize a small pre-trained model to arbitrarily large tsp instances. AAAI Conf. Artificial Intell., 2021.
|
| 301 |
+
[16] Y. Gao, H. Yang, P. Zhang, C. Zhou, and Y. Hu. Graph neural architecture search. In C. Bessiere, editor, Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI-20, pages 1403–1409. International Joint Conferences on Artificial Intelligence Organization, 7 2020. Main track.
|
| 302 |
+
[17] M. R. Garey and D. S. Johnson. Computers and Intractability; A Guide to the Theory of NP-Completeness. 1990.
|
| 303 |
+
[18] M. Gasse, D. Chételat, N. Ferroni, L. Charlin, and A. Lodi. Exact combinatorial optimization with graph convolutional neural networks. Neural Info. Process. Systems, 2019.
|
| 304 |
+
[19] R. E. Gomory. Outline of an algorithm for integer solutions to linear programs and an algorithm for the mixed integer problem. In 50 Years of Integer Programming 1958-2008, pages 77-103. Springer, 2010.
|
| 305 |
+
[20] R. Grandl, G. Ananthanarayanan, S. Kandula, S. Rao, and A. Akella. Multi-resource packing for cluster schedulers. SIGCOMM Comput. Commun. Rev., 44(4):455-466, Aug. 2014.
|
| 306 |
+
[21] M. Haythorpe. Fhcp challenge set: The first set of structurally difficult instances of the hamiltonian cycle problem. arXiv preprint arXiv:1902.10352, 2019.
|
| 307 |
+
[22] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Comput. Vis. Pattern Recog., pages 770-778, 2016.
|
| 308 |
+
|
| 309 |
+
[23] K. Helsgaun. An effective implementation of the lin-kernighan traveling salesman heuristic. European Journal of Operational Research, 126(1):106-130, 2000.
|
| 310 |
+
[24] K. Helsgaun. An extension of the lin-kernighan-helsgaun tsp solver for constrained traveling salesman and vehicle routing problems. Roskilde: Roskilde University, 2017.
|
| 311 |
+
[25] R. Hu, J. Xu, B. Chen, M. Gong, H. Zhang, and H. Huang. Tap-net: transport-and-pack using reinforcement learning. ACM Transactions on Graphics (TOG), 39(6):1-15, 2020.
|
| 312 |
+
[26] F. Hutter, H. H. Hoos, and K. Leyton-Brown. Automated configuration of mixed integer programming solvers. In Proceedings of the Conference on Integration of Artificial Intelligence and Operations Research techniques in Constraint Programming (CPAIOR), pages 186-202, 2010.
|
| 313 |
+
[27] R. G. Jeroslow. The polynomial hierarchy and a simple model for competitive analysis. Mathematical programming, 32(2):146-164, 1985.
|
| 314 |
+
[28] C. K. Joshi, T. Laurent, and X. Bresson. An efficient graph convolutional network technique for the travelling salesman problem. arXiv preprint arXiv:1906.01227, 2019.
|
| 315 |
+
[29] N. Karalias and A. Loukas. Erdos goes neural: an unsupervised learning framework for combinatorial optimization on graphs. In Neural Info. Process. Systems, 2020.
|
| 316 |
+
[30] E. Khalil, H. Dai, Y. Zhang, B. Dilkina, and L. Song. Learning combinatorial optimization algorithms over graphs. In Neural Info. Process. Systems, pages 6351-6361, 2017.
|
| 317 |
+
[31] E. Khalil, P. Le Bodic, L. Song, G. Nemhauser, and B. Dilkina. Learning to branch in mixed integer programming. In AAAI Conf. Artificial Intell., volume 30, 2016.
|
| 318 |
+
[32] T. N. Kipf and M. Welling. Semi-supervised classification with graph convolutional networks. Int. Conf. Learn. Rep., 2017.
|
| 319 |
+
[33] W. Kool, H. van Hoof, and M. Welling. Attention, learn to solve routing problems! In Int. Conf. Learn. Rep., pages 1-25, 2019.
|
| 320 |
+
[34] Y. Li, C. Gu, T. Dullien, O. Vinyals, and P. Kohli. Graph matching networks for learning the similarity of graph structured objects. In International Conference on Machine Learning, pages 3835-3845, 2019.
|
| 321 |
+
[35] Z. Li, Q. Chen, and V. Koltun. Combinatorial optimization with graph convolutional networks and guided tree search. In S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018.
|
| 322 |
+
[36] C. Liu, R. Wang, Z. Jiang, and J. Yan. Deep reinforcement learning of graph matching. arXiv preprint arXiv:2012.08950, 2020.
|
| 323 |
+
[37] R. Liu, J. Gao, J. Zhang, D. Meng, and Z. Lin. Investigating bi-level optimization for learning and vision from a unified perspective: A survey and beyond. arXiv preprint arXiv:2101.11517, 2021.
|
| 324 |
+
[38] H. Lu, X. Zhang, and S. Yang. A learning-based iterative method for solving vehicle routing problems. In Int. Conf. Learn. Rep., 2019.
|
| 325 |
+
[39] H. Mao, M. Schwarzkopf, S. B. Venkatakrishnan, Z. Meng, and M. Alizadeh. Learning scheduling algorithms for data processing clusters. In Proceedings of the ACM Special Interest Group on Data Communication, pages 270-288, 2019.
|
| 326 |
+
[40] V. Mnih, K. Kavukcuoglu, D. Silver, A. Graves, I. Antonoglou, D. Wierstra, and M. Riedmiller. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602, 2013.
|
| 327 |
+
[41] A. Nowak, S. Villar, A. Bandeira, and J. Bruna. Revised note on learning quadratic assignment with graph neural networks. In Data Science Workshop, 2018.
|
| 328 |
+
[42] G. Reinelt. The traveling salesman: computational solutions for TSP applications, volume 840. Springer, 2003.
|
| 329 |
+
[43] K. Riesen and H. Bunke. Iam graph database repository for graph based pattern recognition and machine learning. In Joint IAPR International Workshops on Statistical Techniques in Pattern Recognition (SPR) and Structural and Syntactic Pattern Recognition (SSPR), pages 287-297. Springer, 2008.
|
| 330 |
+
[44] K. Riesen and H. Bunke. Approximate graph edit distance computation by means of bipartite graph matching. Image and Vision Computing, 27(7):950-959, 2009.
|
| 331 |
+
|
| 332 |
+
[45] K. Riesen, S. Fankhauser, and H. Bunke. Speeding up graph edit distance computation with a bipartite heuristic. In Mining and Learning with Graphs, pages 21-24, 2007.
|
| 333 |
+
[46] D. J. Rosenkrantz, R. E. Stearns, and P. M. Lewis, II. An analysis of several heuristics for the traveling salesman problem. SIAM journal on computing, 6(3):563-581, 1977.
|
| 334 |
+
[47] J. Schulman, S. Levine, P. Abbeel, M. Jordan, and P. Moritz. Trust region policy optimization. In Int. Conf. Mach. Learn., pages 1889-1897. PMLR, 2015.
|
| 335 |
+
[48] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
|
| 336 |
+
[49] R. Sinkhorn and A. Ranganajan. A relationship between arbitrary positive matrices and doubly stochastic matrices. Ann. Math. Statistics, 1964.
|
| 337 |
+
[50] R. Socher, D. Chen, C. D. Manning, and A. Y. Ng. Reasoning with neural tensor networks for knowledge base completion. In Neural Info. Process. Systems, page 926–934, Red Hook, NY, USA, 2013. Curran Associates Inc.
|
| 338 |
+
[51] H. Sun, W. Chen, H. Li, and L. Song. Improving learning to branch via reinforcement learning. NeurIPS Workshop, 2020.
|
| 339 |
+
[52] R. S. Sutton and A. G. Barto. Reinforcement learning: An introduction. MIT press, 2018.
|
| 340 |
+
[53] E.-G. Talbi. A Taxonomy of Metaheuristics for Bi-level Optimization, pages 1-39. Springer Berlin Heidelberg, Berlin, Heidelberg, 2013.
|
| 341 |
+
[54] Y. Tang, S. Agrawal, and Y. Faenza. Reinforcement learning for integer programming: Learning to cut. In Int. Conf. Mach. Learn., pages 9367-9376. PMLR, 2020.
|
| 342 |
+
[55] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017.
|
| 343 |
+
[56] L. Vicente, G. Savard, and J. Judice. Descent approaches for quadratic bilevel programming. Journal of Optimization Theory and Applications, 81(2):379-399, 1994.
|
| 344 |
+
[57] O. Vinyals, M. Fortunato, and N. Jaitly. Pointer networks. In Neural Info. Process. Systems, pages 2692-2700, 2015.
|
| 345 |
+
[58] R. Wang, J. Yan, and X. Yang. Combinatorial learning of robust deep graph matching: an embedding based approach. IEEE TPAMI, 2020.
|
| 346 |
+
[59] R. Wang, J. Yan, and X. Yang. Neural graph matching network: Learning lawler's quadratic assignment problem with extension to hypergraph and multiple-graph matching. Trans. Pattern Anal. Mach. Intell., 2021.
|
| 347 |
+
[60] R. Wang, T. Zhang, T. Yu, J. Yan, and X. Yang. Combinatorial learning of graph edit distance via dynamic embedding. Comput. Vis. Pattern Recog., 2021.
|
| 348 |
+
[61] L. Xu, F. Hutter, H. H. Hoos, and K. Leyton-Brown. Hydra-mip: Automated algorithm configuration and selection for mixed integer programming. In RCRA workshop on Experimental Evaluation of Algorithms for Solving Problems with Combinatorial Explosion at the International Joint Conference on Artificial Intelligence (IJCAI), 2011.
|
| 349 |
+
[62] J. Yan, S. Yang, and E. R. Hancock. Learning for graph matching and related combinatorial optimization problems. In Int. Joint Conf. Artificial Intell., 2020.
|
| 350 |
+
[63] Y. Yang and A. B. Whinston. A survey on reinforcement learning for combinatorial optimization. CoRR, abs/2008.12248, 2020.
|
| 351 |
+
[64] C. Zhang, W. Song, Z. Cao, J. Zhang, P. S. Tan, and X. Chi. Learning to dispatch for job shop scheduling via deep reinforcement learning. Neural Info. Process. Systems, 33, 2020.
|
| 352 |
+
[65] H. Zhao, Q. Yao, and W. Tu. Search to aggregate neighborhood for graph neural network. In ICDE, 2021.
|
| 353 |
+
|
| 354 |
+
# Checklist
|
| 355 |
+
|
| 356 |
+
1. For all authors...
|
| 357 |
+
|
| 358 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 359 |
+
(b) Did you describe the limitations of your work? [Yes] See discussions in Section 5 and Appendix A.
|
| 360 |
+
(c) Did you discuss any potential negative societal impacts of your work? [Yes] See discussions in Section 5 and Appendix G.
|
| 361 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 362 |
+
|
| 363 |
+
2. If you are including theoretical results...
|
| 364 |
+
|
| 365 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes] See Sec. 3.1.
|
| 366 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] See the proposition and proof in Sec. 3.1.
|
| 367 |
+
|
| 368 |
+
3. If you ran experiments...
|
| 369 |
+
|
| 370 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] The code is publicly available at https://github.com/Thinklab-SJTU/PPO-BiHyb
|
| 371 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] See experiment details in Sec. 4 and Appendix C.
|
| 372 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes]
|
| 373 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] See Appendix F.
|
| 374 |
+
|
| 375 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 376 |
+
|
| 377 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 378 |
+
(b) Did you mention the license of the assets? [Yes] See Appendix H.
|
| 379 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No] We do not include any new assets.
|
| 380 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [Yes] See Appendix H and all assets are publicly available.
|
| 381 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [Yes] See Appendix H and all assets do not contain human information or offensive content.
|
| 382 |
+
|
| 383 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 384 |
+
|
| 385 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A] We do not use crowdsourcing or conduct research with human subjects.
|
| 386 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 387 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38d385249027b0d8037b3cbe0985cd122b20bb826148708fa87ec4f9f3a19ea2
|
| 3 |
+
size 380908
|
abilevelframeworkforlearningtosolvecombinatorialoptimizationongraphs/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d4aa54d97841709c36af41fad61a6520cb5f00b72f085615ab3c8e4393d5353
|
| 3 |
+
size 508379
|
acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4eb0e47b1035897eb0fed35feb6d8629327915b57b3d59493443ee988ce40b8a
|
| 3 |
+
size 89215
|
acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd774b6d903aaaf7bee00daeff000725213fad22d0cc17ab36b0575bf2b94359
|
| 3 |
+
size 114813
|
acausallensforcontrollabletextgeneration/56b4d5c4-1abf-4e8e-b506-78488dbeb0e8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5ca1892cd3f76f720eaed8dbec32ed9d5a0585a4a80dc224e57ae73e59cc6c5
|
| 3 |
+
size 1520947
|
acausallensforcontrollabletextgeneration/full.md
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Causal Lens for Controllable Text Generation
|
| 2 |
+
|
| 3 |
+
Zhiting $\mathbf{H}\mathbf{u}^{1,2}$ Li Erran Li
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>UC San Diego, <sup>2</sup>AWS AI, Amazon
|
| 6 |
+
|
| 7 |
+
zhh019@ucsd.edu, lilimam@amazon.com
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Controllable text generation concerns two fundamental tasks of wide applications, namely generating text of given attributes (i.e., attribute-conditional generation), and minimally editing existing text to possess desired attributes (i.e., text attribute transfer). Extensive prior work has largely studied the two problems separately, and developed different conditional models which, however, are prone to producing biased text (e.g., various gender stereotypes). This paper proposes to formulate controllable text generation from a principled causal perspective which models the two tasks with a unified framework. A direct advantage of the causal formulation is the use of rich causality tools to mitigate generation biases and improve control. We treat the two tasks as interventional and counterfactual causal inference based on a structural causal model, respectively. We then apply the framework to the challenging practical setting where confounding factors (that induce spurious correlations) are observable only on a small fraction of data. Experiments show significant superiority of the causal approach over previous conditional models for improved control accuracy and reduced bias.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Controllable text generation aims at producing fluent language with control over various attributes, ranging from sentiment, topic, politeness, to gender, persona, and so forth [60, 22]. The problem lies at the heart of many NLP applications such as emotional chatbot, news article writing, language detoxification, etc. Of particular interest in this increasingly significant area are two settings for control, namely (1) attribute-conditional generation [14, 30] which generates sentences that entail a given attribute, and (2) text attribute transfer [64, 26] which rewrites a given sentence to possess a desired attribute while preserving all other original characteristics (Figure 1). The goal is to learn the control in each setting with (attribute, text) training pairs<sup>1</sup>.
|
| 16 |
+
|
| 17 |
+
The two settings have usually been considered as separate tasks and each led to various solutions, respectively. Let $\pmb{x}$ denote a sentence and $a$ an attribute. Previous attribute-conditional generation work typically concerns the conditional distribution $p(\pmb{x}|a)$ [14, 30, 33]. Despite the success of simulating observed real text, the conditional distribution is known to be susceptible to capture spurious correlations or biases in the data [48, 84]. For example, when generating biographical text given a gender attribute, the conditional model tends to generate text related to specific occupations such as nurse and yoga teacher for female, and architect and attorney for male [57, 69] (Figure 1). The learned biases could impair the model generalization to new domains, and make negative social impact in downstream applications. A few very recent attempts have been made to mitigate the biases in the model with various machine learning techniques. Yet those methods are often specific to a particular attribute (e.g., gender) [68, 86], or rely on access to additional resources, such as fully observed confounding labels or a priori biased classifiers [23, 40, 67], which can be costly to obtain in real applications. Furthermore, it is unclear how the diverse methods designed for
|
| 18 |
+
|
| 19 |
+
attribute-conditional generation could also be applied to debias text attribute transfer that has been formulated with distinct training objectives.
|
| 20 |
+
|
| 21 |
+
This paper studies controllable text generation from a principled causal perspective, that offers a unifying formulation of the two central tasks, and enables mitigation of spurious correlations with well-established causality techniques. A growing number of recent work has used causality with machine learning [62] for disentangled representation [54, 78], model explanation [6, 13], and robust prediction [61, 24, 83]. Yet most approaches have focused on the vision domain, taking advantage of image spatial structures, and thus are not directly applicable to text with abstract attributes (such as sentiment). Though previous research on text modeling has also studied related concepts such as counterfactuals, it either handles only correlation instead of causation [58, 39, 76, 23, 86, 46], or focuses on different applications such as data augmentation [85, 28, 82] and classification [13, 29]. We discuss more related work in $\S 4$ .
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: The causal ladder [56] and the formulations of controllable generation tasks corresponding to different rungs of the ladder.
|
| 25 |
+
|
| 26 |
+
We develop the first unified causal framework
|
| 27 |
+
|
| 28 |
+
for text generation under control. In particular, we devise a structural causal model (SCM) [56] that describes the causal relationships between different variables, where the text $\pmb{x}$ is outcome and the attribute $a$ to control (e.g., sentiment) is treatment. The SCM further accounts for spurious correlations with confounders (e.g., category) with latent variables. The resulting SCM enables us to formulate the two control tasks as performing causal inference at different rungs of the causal ladder [56] (Figure 1), respectively. Specifically, (1) for attribute-conditional generation, we go beyond the association-based conditional $p(\pmb{x}|a)$ and propose to instead use $p(\pmb{x}|do(a))$ , corresponding to the intervention rung. The do-operation effectively eliminates the effect of confounders on the control, leading to unbiased text outputs; (2) for text attribute transfer, the task naturally maps to counterfactual prediction on the SCM, which answers the question "what the text would have been if the attribute had been different" through the standard causal inference procedure [56, 55]. The unifying perspective also allows us to draw from existing successful techniques and train the SCM for accurate control and confounder balancing [27, 41, 22].
|
| 29 |
+
|
| 30 |
+
Previous causal work typically assumes access to confounding labels or relevant proxy information for the entire observed data [43, 44, 48]. In many real applications, however, it is prohibitively expensive or impossible to measure all the confounding factors for unbiased training. For example, it is often not affordable to annotate massively the confounding labels for the entire (attribute, text) corpus. We thus consider a more practical yet challenging scenario where we observe confounding information for only a small subset (e.g., $1\% - 5\%$ ) of samples [15]. We experiment on difficult datasets where the target attributes and confounding factors have strong correlations. Results show the causal approach substantially improves over conventional conditional models with enhanced control accuracy and reduced bias, on both attribute-conditional generation and attribute transfer.
|
| 31 |
+
|
| 32 |
+
# 2 Background
|
| 33 |
+
|
| 34 |
+
We first briefly review the causal concepts most relevant to the paper. A structural causal model (SCM) [56] is defined by a directed graph consisting of nodes (variables) and edges (direct causal dependence between variables), e.g., Figure 2. Different inference questions on an SCM correspond to different levels of the causal ladder (Figure 1) and require different reasoning tools: (1) "Association" deals with correlations in observed data with joint/marginal/conditional distributions. (2) "Intervention" concerns what would happen were some actions been performed. A typical question is to estimate the distribution of an outcome variable $x$ given an intervention on a treatment variable $a$ : $p(\pmb{x} | \text{do}(a))$ , where the do-operation represents an action on $a$ by setting it to a given value. With randomized experimental data (i.e., collected by randomly assigning treatment), $p(\pmb{x} | \text{do}(a))$ equals to the standard
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
(a)
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
(b)
|
| 41 |
+
Figure 2: Illustration of causal graphs: (a) The proposed structural causal model (SCM, §3.1), where the outcome variable $x$ denotes the text, treatment variable $a$ denotes the attribute to control, $z$ is the latent confounder, and $c$ is the proxy variable for the confounder. A hollow circle indicates the variable is latent, and a shaded circle indicates the variable is observed. The proxy information $c$ is observed only for a subset of examples, which we indicate with a dashed circle. Note the difference of the SCM compared to previous latent-variable controllable generation models [22, 4] which do not explicitly model the confounder or its proxy information, making it impossible to identify the causal effects. (b) Intervention on the attribute $a$ (\$3.2), represented as a blue circle, eliminates the dependence between $z$ and $a$ , leading to the intervened SCM wherein the $z \rightarrow a$ arrow is removed. (c) Counterfactual prediction (\$3.3), where red dashed arrows represent abduction from the original factual data $(a, x, c)$ , and $x'$ is the counterfactual outcome given the new attribute $a'$ .
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
(c)
|
| 45 |
+
|
| 46 |
+
conditional $p(\pmb{x} | a)$ . Yet in practice, we usually only have access to passively observed data, such as the (attribute, text) pairs from existing corpus, and have to adjust for confounders (i.e., variables that correlate with both treatment and outcome) in order to estimate $p(\pmb{x} | do(a))$ from observational distributions. For example, we apply backdoor adjustment [56] in §3.2 for attribute-conditional generation. Finally, (3) "Counterfactuals" involves queries about what would have happened, given the knowledge of what in fact happened. We next show how the controllable text generation tasks are bridged together as different levels of causal inference, operationalized by the proposed SCM.
|
| 47 |
+
|
| 48 |
+
# 3 The Causal Framework for Controllable Text Generation
|
| 49 |
+
|
| 50 |
+
We now describe the unified causal perspective. We first develop the structural causal model that characterizes the causal structure in the controlled generation process (§3.1). We then show that intervention on the SCM leads to attribute-conditional generation (§3.2), while counterfactual prediction makes attribute transfer (§3.3). At last, §3.4 describes the training of the SCM with new objectives to encourage confounder balancing and de-correlation.
|
| 51 |
+
|
| 52 |
+
Compared to previous causal modeling in other domains (e.g., images), modeling text as the outcome is challenging due to the complex unstructured information encoded in the text. We show here that the unifying perspective enables us to bring to bear rich tools and inspirations from causal inference, disentangled representation, and controllable generation, for effective text causal modeling.
|
| 53 |
+
|
| 54 |
+
Figure 2 shows the SCM graphs as detailed below. In the appendix we illustrate the model architecture used in our experimental studies.
|
| 55 |
+
|
| 56 |
+
# 3.1 The Structural Causal Model
|
| 57 |
+
|
| 58 |
+
Figure 2(a) shows the SCM that describes the controlled generation process of text. Here the attribute of interest $a$ serves as the treatment, and the text $x$ is the outcome. For simplicity, we assume $a$ is binary (e.g., positive or negative sentiment), though the framework can straightforwardly be applied to more general cases where $a$ has multiple classes and dimensions. Note that $a$ as the condition for generating $x$ can be instantiated in different forms depending on the concrete application. For example, it can be a scalar $a \in \{0,1\}$ as an input to the generator, or a word sequence such as $a \in \{\text{"[sentiment] positive", " [sentiment] negative"}\}$ that acts as a prompt for the generator to produce text continuation [5, 18, 30, 81].
|
| 59 |
+
|
| 60 |
+
In general, the confounder that induces spurious correlations between $a$ and $x$ is infeasible to be fully specified or observed. For example, to control the sentiment of a restaurant review, the confounder could involve popularity of the restaurant, personal preferences of the customer, and other factors, whose values cannot be directly measured. Thus, following the recent causal approaches in other domains [43, 48, 44], we model the unobserved confounder as a high dimensional latent variable $z$ , and infer $z$ from "indirect" confounding variables that are measurable in practice (such as food type).
|
| 61 |
+
|
| 62 |
+
The "indirect" variables are also called proxy variables in causality [50, 1, 70], which we denote as $c$ . More background of confounder and proxy is provided in the appendix.
|
| 63 |
+
|
| 64 |
+
The goal of controllable text generation is thus to generate coherent text (given original text in the case of text attribute transfer) with accurate target attribute $a$ while unbiased in terms of the confounders.
|
| 65 |
+
|
| 66 |
+
Previous causal studies [43, 48, 44] have usually assumed the confounder proxy $c$ is available for all data. Similarly, recent work of debiasing attribute-conditional generation (based on machine learning) has relied on access to those extensive proxy labels [23, 40]. However, the assumption is often impractical due to the time and financial cost for obtaining the massive additional information beyond the common (attribute, text) data. We thus consider a more practical setting [15] where we only have access to the proxy information for a small subset (e.g., $1\% - 5\%$ ) of examples. In Figure 2(a), we use dashed circle of $c$ to denote the new challenging setting.
|
| 67 |
+
|
| 68 |
+
The resulting SCM thus defines a joint distribution
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
p _ {\theta} (\boldsymbol {x}, a, \boldsymbol {z}, \boldsymbol {c}) = p _ {\theta} (\boldsymbol {x} | a, \boldsymbol {z}) p _ {\theta} (a | \boldsymbol {z}) p _ {\theta} (\boldsymbol {c} | \boldsymbol {z}) p _ {0} (\boldsymbol {z}), \tag {1}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where the component $p_{\theta}(c|z)$ applies when the proxy $c$ is observed for the example; $p_0(z)$ is a standard Gaussian prior following the common practice; and all components with free parameters $\theta$ are modeled as deep neural networks. We use the amortized variational inference as in variational auto-encoders (VAEs) [31] to infer the latent confounder $z$ from observations. Specifically, we introduce a variational distribution $q_{\phi}(z|x,a,c)$ with parameters $\phi$ . To infer $z$ for those examples whose proxy $c$ is not available, we could apply an auxiliary predictor that estimates $c$ from the observed $(x,a)$ . The auxiliary predictor can be trained on the subset of examples with available $c$ . In this work, we instead set the default $c$ to a dummy value when inferring $z$ for simplicity.
|
| 75 |
+
|
| 76 |
+
Note that previous work has also used VAEs for controllable text generation [22, 4]. However, they reside purely at the association level. In particular, despite the latent variables, they do not explicitly model the confounder and/or its proxy information, rendering the causal effects between components not identifiable [56]. As a result, those models are vulnerable to biases, as shown in the experiments.
|
| 77 |
+
|
| 78 |
+
# 3.2 Inference (I): Intervention for Attribute-Conditional Generation
|
| 79 |
+
|
| 80 |
+
We now discuss how to perform causal inference given the SCM for attribute-conditional generation. As mentioned in §1, in contrast to the conventional association-level methods based on the conditional $p(\pmb{x} | a)$ , here we formulate the task with the interventional conditional $p(\pmb{x} | do(a))$ . The do-operation sets $a$ to a given value independently of $z$ (§2), which eliminates the dependence between $a$ and $z$ , leading to the new intervened causal graph in Figure 2(b), where the arrow from $z$ to $a$ is removed. Thus $p_{\theta}(\pmb{x} | do(a))$ captures the causal effect of attribute $a$ on text outcome $\pmb{x}$ without confounding bias. We can use the backdoor adjustment [56] to estimate $p_{\theta}(\pmb{x} | do(a))$ from the observed data:
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
p _ {\theta} (\boldsymbol {x} | d o (a)) = \sum_ {\boldsymbol {z}} p _ {\theta} (\boldsymbol {x} | a, \boldsymbol {z}) p (\boldsymbol {z}). \tag {2}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
That is, we adjust for confounder $z$ by making fair considerations of every possible $z$ values and averaging the results by the distribution $p(z)$ discussed below. The difference from the previous methods becomes even clearer if we similarly decompose the conditional $p_{\theta}(\pmb{x}|a) = \sum_{\pmb{z}} p_{\theta}(\pmb{x}|a, \pmb{z}) p_{\theta}(\pmb{z}|a)$ , which as we can see depends on $p_{\theta}(\pmb{z}|a)$ and inherits the correlations between $a$ and $z$ in the data.
|
| 87 |
+
|
| 88 |
+
We generate text samples from $p_{\theta}(\pmb{x}|do(a))$ approximately by first drawing $z \sim p(z)$ and then decoding with $\pmb{x} \sim p_{\theta}(\pmb{x}|a, \pmb{z})$ . To sample from the marginal $p(z)$ which does not have a clean analytic form, we use a similar approach as in [36] by fitting a simple generative adversarial network (GAN) [16], $p_{\mathrm{GAN}}(z)$ , on the learned latent space (i.e., $z \sim q_{\phi}$ on all training data). We found it is sufficient to use a single-layer GAN which is fast to train.
|
| 89 |
+
|
| 90 |
+
# 3.3 Inference (II): Counterfactual for Text Attribute Transfer
|
| 91 |
+
|
| 92 |
+
Given an observed text $x$ , text attribute transfer seeks to produce new text $x'$ that possesses the given new attribute $a'$ and preserves as many characteristics of the original $x$ as possible. The task can naturally be mapped to counterfactual prediction on the SCM, i.e., imagining the alternative outcome for $x$ should its attribute have been $a'$ . The resulting inference procedure looks similar to the previous VAE-based attribute transfer method [22]. However, besides the key modeling difference of confounder/proxy as above, our counterfactual based interpretation offers a principled causal account
|
| 93 |
+
|
| 94 |
+
for the attribute transfer task. Moreover, the causal perspective inspires new training techniques that substantially improve the performance and reduce generation bias, as presented in §3.4.
|
| 95 |
+
|
| 96 |
+
Figure 2(c) illustrates the inference process. Specifically, from the causal perspective, counterfactual prediction is mathematically formulated as a three-step procedure [56, 55]: (1) Abduction that infers the "context" compatible with the observation $\pmb{x}$ . In our problem, it is sufficient to infer $\pmb{z}$ as the context, as we would only intervene its descendant node $a$ in the SCM. Thus the step is done by computing $q_{\phi}(z|x,a,c)$ ; (2) Action that performs intervention on variable $a$ by setting $a = a'$ ; and (3) Prediction that computes the counterfactual outcome based on the SCM, i.e., $x^{\prime}\sim p_{\theta}(x^{\prime}|a^{\prime},z)$ , where we set $z$ to the mean (vector) of the above abduction distribution $q_{\phi}$ for simplicity.
|
| 97 |
+
|
| 98 |
+
# 3.4 Learning
|
| 99 |
+
|
| 100 |
+
With the causal model and the inferences on it, we now discuss model training, which integrates variational learning and counterfactual reasoning for confounder balancing and disentanglement.
|
| 101 |
+
|
| 102 |
+
Variational auto-encoding objective The base objective for learning the causal model is built on the common VAE approach [31]. Briefly, since the model's marginal log-likelihood (that marginalizes out the latent $z$ ) is intractable, VAEs derive a lower bound with the variational distribution $q_{\phi}$ . Formally, given a training example $(\pmb{x}, a)$ with the optional proxy $c$ :
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\mathcal {L} _ {v a e} (\boldsymbol {\theta}, \phi) = \mathbb {E} _ {\boldsymbol {z} \sim q _ {\phi}} \left[ \log p _ {\theta} (\boldsymbol {x} | a, \boldsymbol {z}) + \lambda_ {a} \log p _ {\theta} (a | \boldsymbol {z}) + \lambda_ {c} \log p _ {\theta} (\boldsymbol {c} | \boldsymbol {z}) \right] - \lambda_ {k l} \mathrm {K L} \left(q _ {\phi} \| p _ {0}\right), \tag {3}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where the first term is the reconstruction that aims to recover the observations $(\pmb{x}, a, \pmb{c})$ given the inferred $\pmb{z}$ from $q_{\phi}$ ; the second term is a Kullback-Leibler regularizer that enforces the variational distribution to stay close to the prior $p_0(\pmb{z})$ . We refer readers to [31] for more details of VAEs. In the objective, $\lambda_a, \lambda_c$ , and $\lambda_{kl} > 0$ are balancing hyperparameters. We set $\lambda_c$ to 0 when proxy $\pmb{c}$ is not available, and otherwise select from $\{0.01, 0.1, 1\}$ based on validation, same as $\lambda_a$ . We use the cyclic schedule from [36] to anneal $\lambda_{kl}$ from 0 to 1 to avoid excessive regularization of the KL term.
|
| 109 |
+
|
| 110 |
+
Counterfactual objectives Training with the above base objective alone can lead to model collapse where the attribute variable $a$ is ignored in the generation process, i.e., text sampled from $p_{\theta}(\pmb{x}|a,\pmb{z})$ is not effectively controlled by $a$ . This is because the training text $\pmb{x}$ has already contained the attribute information, allowing both the inference of $\pmb{z}$ and the subsequent reconstruction of $\pmb{x}$ not to depend on the attribute value $a$ . This issue highlights a key difference of our model compared to previous latent-confounder causal models in other domains (e.g., medication effect prediction), where the outcome is typically a simple binary variable (e.g., cured or not) that does not "leak" the treatment information (e.g., medication) [43, 48, 44]. Causal controllable text generation thus requires new solutions to encourage effective control.
|
| 111 |
+
|
| 112 |
+
Besides, a key ingredient for accurate causal inference is to achieve balance of confounders between treatment groups [27, 63, 52, 44]. That is, we want to match the confounder representation $z$ of the examples whose $a = 0$ and those of the examples whose $a = 1$ , in order to enhance the generalization performance for inferring counterfactual outcomes [27]. The concept is closely related to disentangled representation in machine learning which seeks to keep most dimensions of a representation invariant to the change of a particular dimension [41, 19].
|
| 113 |
+
|
| 114 |
+
The above two desiderata can be resolved with a suite of counterfactual objectives that are based on the counterfactual outcomes $\pmb{x}^{\prime}$ inferred in §3.3. We now describe those intuitive objectives, which are related to the attribute $a$ , confounder $z$ , and proxy $c$ , respectively. We also discuss how we are able to draw inspirations from previous literature of disentangled representation and text attribute transfer, thanks to their connections with causal inference as above.
|
| 115 |
+
|
| 116 |
+
The first objective concerns the attribute $a$ to correctly learn its influence on the outcome. Intuitively, given the counterfactual outcome $x'$ given the counterfactual attribute $a'$ , we want to make sure $x'$ truly entails $a'$ . This can be achieved by using a pretrained attribute classifier $f(x, a)$ that estimates the likelihood of text $x$ possessing attribute $a$ . More specifically, we train the model such that its predicted $x'$ possesses $a'$ with a high likelihood measured by the classifier:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {L} _ {c f - a} (\boldsymbol {\theta}, \phi) = \mathbb {E} _ {\boldsymbol {z} \sim q _ {\phi}, \boldsymbol {x} ^ {\prime} \sim p _ {\theta} \left(\boldsymbol {x} ^ {\prime} \mid a ^ {\prime}, \boldsymbol {z}\right)} [ f \left(\boldsymbol {x} ^ {\prime}, a ^ {\prime}\right) ]. \tag {4}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
As in [22, 79], we use Gumbel-softmax approximation [47, 25] to the discrete text $x'$ to enable gradient backpropagation for optimizing $(\theta, \phi)$ . Similar objective has been used in previous conditional generation of text [22, 45] and image [21, 35]. A crucial caveat is that, here the classifier itself
|
| 123 |
+
|
| 124 |
+
pretrained with the (attribute, text) data can also be biased due to confounding factors. Thus relying only on this objective as in the previous work is not sufficient for accurate unbiased attribute control, as shown in our experiments. To this end, we further devise the following counterfactual objectives.
|
| 125 |
+
|
| 126 |
+
The second objective focuses on balancing the confounder $z$ . Intuitively, by the definition of counterfactuals, $x'$ must have the same confounder representation as the original $x$ . We thus minimize the distance between the respective $z'$ and $z$ :
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\mathcal {L} _ {c f - z} \left(\boldsymbol {\theta}, \phi\right) = - \mathbb {E} _ {\boldsymbol {z}, \boldsymbol {z} ^ {\prime}} \left[ d \left(\boldsymbol {z} ^ {\prime}, \boldsymbol {z}\right) \right], \tag {5}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
where, with slight abuse of notation, $z$ is the mean (vector) of $q_{\phi}(z|x,a,c)$ , $z^{\prime}$ is the mean (vector) of $q_{\phi}(z'|x',a',c)$ on the counterfactual $x^{\prime}$ , and $d(\cdot ,\cdot)$ is a distance metric. Though the vectors $z$ and $z^{\prime}$ have continuous values, we draw inspiration from the recent disentangled representation work [41] and use a binary cross-entropy loss to match $z^{\prime}$ to $z$ , i.e.,
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
d \left(\boldsymbol {z} ^ {\prime}, \boldsymbol {z}\right) = \operatorname {m e a n} \left(\bar {\boldsymbol {z}} \log \left(\sigma \left(\boldsymbol {z} ^ {\prime}\right)\right) + \left(1 - \bar {\boldsymbol {z}}\right) \log \left(1 - \sigma \left(\boldsymbol {z} ^ {\prime}\right)\right)\right), \tag {6}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
where $\bar{z} = (z - \min(z)) / (\max(z) - \min(z)))$ normalizes $z$ to $[0,1]$ , $\sigma(\cdot)$ is the logistic function applied to $z'$ element-wise, and mean( $\cdot$ ) takes the average distance across $z$ dimensions. The distance is shown to be more effective [41] than the common $L_2$ loss $\| z' - z\|^2$ as used in earlier work [22].
|
| 139 |
+
|
| 140 |
+
The third objective carries similar intuition as above, though uses the proxy $c$ when it is available. Specifically, we want $z'$ to be able to reconstruct $c$ (as is $z$ ):
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\mathcal {L} _ {c f - c} (\boldsymbol {\theta}, \phi) = \mathbb {E} _ {\boldsymbol {z} ^ {\prime}} \left[ \log p _ {\theta} \left(\boldsymbol {c} \mid \boldsymbol {z} ^ {\prime}\right) \right], \tag {7}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $\pmb{z}'$ is the mean (vector) of $q_{\phi}(\pmb{z}'|\pmb{x}', a', c)$ , same as in Eq.(5).
|
| 147 |
+
|
| 148 |
+
In sum, the overall objective for training the causal model is:
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\mathcal {L} (\boldsymbol {\theta}, \phi) = \mathcal {L} _ {v a e} + \gamma_ {a} \mathcal {L} _ {c f - a} + \gamma_ {z} \mathcal {L} _ {c f - z} + \gamma_ {c} \mathcal {L} _ {c f - c}, \tag {8}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
with balancing hyperparameters $\gamma_{a},\gamma_{z}$ , and $\gamma_c\geq 0$ . In practice, we found the model is not sensitive to the choices of those hyperparameters. We set each of them to either 0.5 or 1.0 based on validation.
|
| 155 |
+
|
| 156 |
+
# 4 Related Work
|
| 157 |
+
|
| 158 |
+
Causal modeling for generation There is an emerging interest in integrating causality with machine learning [62] in various problems. Several latest works have studied causal inference combined with deep generative models for images, to learn causal structures between attributes [78, 65, 51], synthesize novel images [32, 3], and augment unbiased classifier training [61]. The spatial structure of images can make it easier to learn causal mechanisms, e.g., the work [61] specified independent modules for image background and texture. In contrast, text with abstract concepts (e.g., sentiment, topics) exhibits less independent structure. Previous causal modeling for text usually focuses on language understanding [29, 74, 7, 75, 71, 13, 49]. Recent work has also studied text as outcome in causal inference [12] for data augmentation [85, 28, 82] or generating text in specific domains (e.g., court view [77]). We make the first study of causal modeling for the general problem of text generation under control and demonstrate the effectiveness for bias mitigation.
|
| 159 |
+
|
| 160 |
+
Controllable text generation Various approaches have been developed for attribute-conditional generation, by learning conditional language models (LMs) [30, 14, 81], guided inference [33, 9], or prompts [5, 72]. Recent work has focused on reducing gender bias in machine translation and generation [68, 66, 69, 11]. Other work studied more general unbiased generation with ML assuming access to unbiased classifiers [40, 23]. We use causal techniques to address a different and challenging setting where only limited confounding labels are observed. Unsupervised text attribute transfer has gained increasing attention [64, 22, 26], with the primary focus on learning to disentangle target attribute with other factors. We study the new challenge of attribute transfer in the presence of strong bias in the data, and show greatly improved performance.
|
| 161 |
+
|
| 162 |
+
# 5 Experiments
|
| 163 |
+
|
| 164 |
+
We study the challenging generation tasks with strong spurious correlations in training data. The causal framework substantially reduces bias and improves control accuracy.
|
| 165 |
+
|
| 166 |
+
We describe detailed model configurations in appendix. Briefly, the main model components, including the decoder $p_{\theta}(\pmb{x}|a,\pmb{z})$ , inference network $q_{\phi}(\pmb{z}|\pmb{x},a,\pmb{c})$ , and classifier $f(\pmb{x},a)$ (Eq.4) are all based on the GPT-2 (117M) architecture [59] with pretrained weights, respectively. In $q_{\phi}$ and $f$ , we use the GPT-2 final-step output feature as the representation of input sentence $\pmb{x}$ . We implement other components $(p_{\theta}(a|\pmb{z})$ and $p_{\theta}(\pmb{c}|\pmb{z}))$ as simple MLPs. The model is trained with AdamW optimizer [42] using an initial learning rate of 1e-6. All experiments were conducted on 8 Tesla V100 GPUs.
|
| 167 |
+
|
| 168 |
+
# 5.1 Attribute-Conditional Generation
|
| 169 |
+
|
| 170 |
+
We first evaluate the interventional inference for attribute-conditional generation (§3.2). We use two datasets where the target attribute has a correlation strength of over $90\%$ with the confounding factor, following the challenging settings of the latest work on visual bias [73, 17, 61, 65]. That is, the target attribute and the confounding factor of over $90\%$ examples are both positive or negative, while those of the rest $10\%$ examples are opposite. Differing from previous studies, we further assume the model can observe the confounding labels of a small subset of data, a more practical setting as in §3.1.
|
| 171 |
+
|
| 172 |
+
Datasets Our first dataset is derived from the YELP challenge<sup>2</sup> that contains customer reviews of different categories. Sentiment (1:positive vs. 0:negative) is the attribute we aim to control, and the category of review object (1:restaurant vs. 0:others) is the confounding factor. Specifically, we extract a subset of data where $90\%$ restaurant reviews are of positive sentiment, while $90\%$ reviews to other entities (e.g., shopping) are of negative sentiment (thus a $90\%$ correlation strength). We keep the category labels for less than $2\%$ of training data. The resulting data has $510\mathrm{K} / 6\mathrm{K}$ training/validation examples, wherein 10K training examples have observable confounding category labels<sup>3</sup>. For evaluation, we further create a balanced test set of 13K examples with correlation strength $50\%$ (i.e., no correlation). Following the previous controllable generation [22, 64], we focus on generating short text, by truncating the output text in the data to 20 tokens at maximum.
|
| 173 |
+
|
| 174 |
+
The second dataset is from the BIOS corpus [10] that contains online biographies with gender and occupation labels. We use gender (female/male in the corpus) as the attribute to control. Thus the goal is to generate biographical text of a given gender. For occupation which is the confounding factor, we subsample and merge the occupations into two groups, i.e., {nurse, dietitian, paralegal, ...} and {rapper, DJ, surgeon, ...} (see appendix for more details). The correlation strength of the resulting dataset is $95\%$ . For example, $95\%$ female biographies are about the occupations in group one. We randomly split the dataset into 43K training and 2K validation examples, and keep the binary occupation labels for only 3K randomly selected training examples (among which only $5\% \times 3\mathrm{K} = 150$ examples have opposite gender and occupation labels). As above, we further create a balanced test set of 2K examples for evaluation, and truncate the output text to no more than 20 tokens.
|
| 175 |
+
|
| 176 |
+
Baselines and setup We compare with the conditional language models that people would commonly train for the task. The first model, Conditional LM, conditions only on the target attribute and generates text accordingly. The second model, Conditional LM (full), makes full use of the attribute and confounding labels in hope of better de-correlating the two. Since the confounding labels are available only on a small subset of examples, we first train a classifier on the subset with data-reweighting (see appendix for details), and use it to predict confounding labels for the remaining examples. The language model is then trained on the resulting complete data, conditioning on both the attribute and the (real or estimated) confounding label. We also compare with latest attribute-conditional generation approaches, such as GeDi [33] where a language model conditioning on the confounding information $p_{\mathrm{gedi}}(\boldsymbol{x}|\boldsymbol{c})$ is used to reshape the generation distribution of the above Conditional LM. We include comparison with more baseline methods in the appendix.
|
| 177 |
+
|
| 178 |
+
For our approach, the available confounding labels serve as the proxy $c$ . The attribute classifier $f$ used to train our model (Eq.4) is pretrained on the biased training data. On YELP, the resulting sentiment classifier has a mediocre accuracy of $83\%$ on the balanced test set; On BIOS, the (gender) classifier has an accuracy of $91\%$ .
|
| 179 |
+
|
| 180 |
+
<table><tr><td></td><td>Methods</td><td>Control accuracy (↑)</td><td>Bias (↓)</td><td>Fluency (↓)</td><td>Diversity (↑)</td></tr><tr><td rowspan="5">YELP</td><td>Conditional LM</td><td>79.1</td><td>78.7</td><td>50.4</td><td>41.4</td></tr><tr><td>Conditional LM (full)</td><td>80.3</td><td>78.9</td><td>50.8</td><td>41.9</td></tr><tr><td>GeDi [33]</td><td>80.9</td><td>74.3</td><td>83.2</td><td>41.7</td></tr><tr><td>Ablation: Ours w/o cf-z/c</td><td>91.1</td><td>89.2</td><td>54.1</td><td>40.4</td></tr><tr><td>Ours</td><td>96.3</td><td>59.8</td><td>51.3</td><td>39.1</td></tr><tr><td rowspan="5">BIOS</td><td>Conditional LM</td><td>95.51</td><td>84.73</td><td>17.0</td><td>46.5</td></tr><tr><td>Conditional LM (full)</td><td>93.28</td><td>72.34</td><td>18.5</td><td>48.5</td></tr><tr><td>GeDi [33]</td><td>86.0</td><td>75.2</td><td>27.8</td><td>43.5</td></tr><tr><td>Ablation: Ours w/o cf-z/c</td><td>97.3</td><td>70.1</td><td>29.4</td><td>42.1</td></tr><tr><td>Ours</td><td>99.2</td><td>62.4</td><td>32.0</td><td>40.6</td></tr></table>
|
| 181 |
+
|
| 182 |
+
Table 1: Automatic evaluation of attribute-conditional generation on YELP and BIOS. Control accuracy is measured by the attribute classifier accuracy; Bias is by the confounding classifier accuracy; For fluency, we report perplexity, thus a lower score indicates more fluent text; Diversity is measured by the Distinct-2 metric. For each evaluation aspect, we highlight the best result that has significant improvements over others.
|
| 183 |
+
|
| 184 |
+
<table><tr><td></td><td>Methods</td><td>Control accuracy (↑)</td><td>Bias (↓)</td><td>Fluency (↑)</td></tr><tr><td rowspan="2">YELP</td><td>Conditional LM (full)</td><td>80.0</td><td>73.0</td><td>3.90</td></tr><tr><td>Ours</td><td>97.0</td><td>56.0</td><td>3.85</td></tr><tr><td rowspan="2">BIOS</td><td>Conditional LM (full)</td><td>96.0</td><td>82.0</td><td>4.43</td></tr><tr><td>Ours</td><td>99.0</td><td>60.0</td><td>4.25</td></tr></table>
|
| 185 |
+
|
| 186 |
+
Table 2: Human evaluation of attribute-conditional generation on YELP and BIOS.
|
| 187 |
+
|
| 188 |
+
Evaluation We conduct both automatic and human evaluation. For the former, we follow the common practice and evaluate the generations in terms of various aspects as following: (1) Control accuracy for which we use an "evaluation attribute classifier" that takes as inputs the generated sentences and measures how accurate they entail the input attributes. The evaluation attribute classifier is trained on a large unbiased set of examples from the original corpus and is of high test accuracy (87% for YELP and 95% on BIOS) for evaluation purpose (note the difference from the above classifier $f$ trained with only biased training data); (2) Bias which is measured by another classifier for the confounding factor. Intuitively, the better the predicted confounding labels match the input attributes, the more correlated the two factors in the generation. A 50% match indicates no correlation. The classifiers are trained similarly as the evaluation attribute classifiers, and achieve accuracy 85% on YELP and 90% for BIOS; (3) Fluency which is measured by applying GPT-2 language models (LMs) on the generated text and computing the perplexity; The LMs obtain perplexity of 32.4 and 18.0 on the real text of YELP and BIOS, respectively. (4) Diversity with the common Distinct- $n$ metric [37] that measures the ratio of unique $n$ -grams against total number of $n$ -gram in the generation set. We evaluate 10K generated samples by each model.
|
| 189 |
+
|
| 190 |
+
For human evaluation, we ask human raters to annotate for each generated text the attribute label and confounding factor label, based on which we compute the control accuracy and bias as above. We also annotate language fluency using a 5-point Likert scale. On each dataset, we compare Conditional LM (full) and our approach, with 100 sentences from each model annotated by 3 raters. The Pearson correlation coefficient of human scores is 0.67, showing strong inter-rater agreement.
|
| 191 |
+
|
| 192 |
+
Results Table 1 shows the automatic evaluation results on both YELP and BIOS. Our causal approach significantly improves over the association-based conditional models. For example, on YELP, our model achieves $16\%$ absolute improvement in terms of control accuracy, and at the same time reduces the bias (spurious correlation with the confounder) by $19\%$ . In contrast, the conditional LMs mostly inherit the bias from the training data. As an ablation study, we also evaluate a simplified variant of our full approach by omitting the counterfactual objectives w.r.t $z$ and $c$ (Eqs.5 and 8) (which reduces to a training strategy similar to the previous methods [e.g., 22]). The variant improves the control accuracy over the conditional LMs, but fails to effectively reduce the generation bias. The results show the crucial role of confounder balancing in bias reduction. On the BIOS dataset, our approach also obtains consistent improvement on both accuracy and bias.
|
| 193 |
+
|
| 194 |
+
Table 2 shows the human evaluation results on both datasets, which largely confirm the above observations with automatic evaluation.
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Methods</td><td>Control accuracy (↑)</td><td>Bias (↓)</td><td>Preservation (↑)</td><td>Fluency (↓)</td></tr><tr><td>Hu et al. [22]</td><td>44.1</td><td>68.4</td><td>77.7</td><td>132.7</td></tr><tr><td>He et al. [20]</td><td>35.3</td><td>60.2</td><td>80.1</td><td>57.7</td></tr><tr><td>Ablation: Ours w/o cf-z/c</td><td>75.0</td><td>67.8</td><td>36.3</td><td>34.2</td></tr><tr><td>Ours</td><td>77.0</td><td>61.4</td><td>42.3</td><td>29.6</td></tr></table>
|
| 197 |
+
|
| 198 |
+
Table 3: Results of attribute transfer on the biased YELP. Baselines [22, 20] with the public code, and they fail to rewrite the text on most instances, leading to very low control accuracy and high preservation.
|
| 199 |
+
|
| 200 |
+
<table><tr><td rowspan="2">Methods</td><td rowspan="2">Control accuracy (↑)</td><td colspan="2">Preservation (↑)</td><td rowspan="2">Fluency (↓)</td></tr><tr><td>self-BLEU</td><td>ref-BLEU</td></tr><tr><td>Hu et al. [22]</td><td>86.7</td><td>58.4</td><td>-</td><td>177.7</td></tr><tr><td>Shen et al. [64]</td><td>73.9</td><td>20.7</td><td>7.8</td><td>72.0</td></tr><tr><td>He et al. [20]</td><td>87.9</td><td>48.4</td><td>18.7</td><td>31.7</td></tr><tr><td>Dai et al. [8]</td><td>87.7</td><td>54.9</td><td>20.3</td><td>73.0</td></tr><tr><td>Ablation: Ours w/o cf-z/c</td><td>87.1</td><td>57.2</td><td>24.3</td><td>46.6</td></tr><tr><td>Ours</td><td>91.9</td><td>57.3</td><td>25.5</td><td>47.1</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 4: Results of text attribute transfer on the common unbiased YELP data.
|
| 203 |
+
|
| 204 |
+
# 5.2 Text Attribute Transfer
|
| 205 |
+
|
| 206 |
+
We next study text attribute transfer (§3.3) as the second core task of controllable generation. The proposed causal approach also achieves substantial improvement in terms of accurate control and bias reduction. Besides, for a broader comparison, we also apply our approach to another unbiased dataset widely studied in previous text attribute transfer research, showing superior performance.
|
| 207 |
+
|
| 208 |
+
Datasets We use the above biased YELP dataset (§5.1) to study the attribute transfer, where we aim to modify a sentence to possess the opposite sentiment (e.g., from negative to positive), and at the same time preserve all other characteristics. In particular, we want the new sentence to keep the category unchanged, which is difficult for previous association-based controllable models given the strong correlation in the data between sentiment and category. Besides, since most previous attribute transfer studies have focused only on unbiased setting, we additionally evaluate our approach on the popular unbiased YELP data (reviews with sentiment for restaurants only) [64] for comparison.
|
| 209 |
+
|
| 210 |
+
Evaluation We follow the standard practice for evaluation. For the biased setting, we measure control accuracy, bias, and fluency as in §5.1. We also assess the common aspect preservation, which evaluates the BLEU score [53] between the generated and original sentences (i.e., self-BLEU). A higher score indicates better preservation of sentence properties. For the unbiased setting, we omit the bias evaluation, and additionally compute another preservation metric, ref-BLEU, which is the BLEU score between the generation and human-written golden text on a subset of test examples [38]. We also conduct human evaluation which shows the same conclusions as the automatic evaluation in terms of model performance. We put the results in appendix due to space limitation.
|
| 211 |
+
|
| 212 |
+
Results Table 3 shows results on the biased YELP data, a substantially more challenging setting than the popular unbiased one (Table 4). We compare two of the previous best-performing methods with public code. Our approach again manages to reduce the bias while achieving decent transfer accuracy. The previous methods struggle to edit the text on many instances (e.g., generating the same sentences as inputs), leading to low control accuracy. Ablation comparison with our simplified variant (our $w / o$ cf-z/c) further validates the effect of counterfactual objectives for confounder balancing ( $\S 3.4$ ), as shown by the improved accuracy and mitigated bias of the full approach.
|
| 213 |
+
|
| 214 |
+
Finally, Table 4 shows the results on the common unbiased YELP sentiment data. The results show our approach generates fluent output with improved accuracy and preservation.
|
| 215 |
+
|
| 216 |
+
# 6 Conclusions and Future Work
|
| 217 |
+
|
| 218 |
+
We have presented a principled causal perspective for the two core tasks of controllable text generation. Based on the proposed structural causal model, attribute-conditional generation is modeled as interventional inference, and text attribute transfer performs counterfactual prediction. We connect
|
| 219 |
+
|
| 220 |
+
rich techniques in causality, disentangled representation, and text generative modeling, and develop learning objectives for accurate control and confounder balancing. Focusing on the challenging setting with partially available confounding information, the experiments show our approach achieves accurate control and mitigates the strong correlations in the data.
|
| 221 |
+
|
| 222 |
+
The proposed causal framework opens up a range of new opportunities for further improving and enriching controllable text generation. For example, though this work has focused on single control attribute and confounding factor, it would be interesting to generalize the approach for structured control of a richer set of text attributes, by modeling the underlying causal graph between attributes (as explored similarly in image generation [78, 65]). Besides, we are interested in importing more causality tools through the causal perspective to enable new applications. For instance, the inverse propensity reweighting technique in causality can potentially be used to debias pretrained language models $p_{\mathrm{pretrain}}(\pmb{x}|a)$ , with the following known equation between the unbiased interventional conditional $p(\pmb{x}|do(a))$ and the biased standard conditional $p(\pmb{x}|a)$ :
|
| 223 |
+
|
| 224 |
+
$$
|
| 225 |
+
p (\boldsymbol {x} | d o (a)) = \sum_ {\boldsymbol {z}} p (\boldsymbol {x} | a, \boldsymbol {z}) p (\boldsymbol {z}) = \sum_ {\boldsymbol {z}} p (\boldsymbol {x} | a) p (\boldsymbol {z} | \boldsymbol {x}, a) \frac {p (a)}{p (a | \boldsymbol {z})}, \tag {9}
|
| 226 |
+
$$
|
| 227 |
+
|
| 228 |
+
where $p(a|z)$ is known as the propensity score [56], i.e., the propensity (probability) of the $z$ being assigned to the particular treatment $a$ . Plugging in the $p_{\mathrm{pretrain}}(\pmb{x}|a)$ together with the parameterized estimates of $p_{\theta}(\pmb{z}|\pmb{x},a)$ and $p_{\theta}(a|\pmb{z})$ as learned in §3, we would effectively convert the pretrained LM into the unbiased $p(\pmb{x}|do(a))$ . Further, rich studies in the causality literature have proposed stabilized and enhanced variants of the above inverse propensity reweighting [e.g., see 80], all of which present interesting topics to explore in the controllable generation setting in the future.
|
| 229 |
+
|
| 230 |
+
Ethical considerations We would like to note that automatic text generation could be used maliciously to generate fake, toxic, or offensive content [34, 72, 2]. We hope the unbiased modeling study could offer techniques to alleviate potential issues.
|
| 231 |
+
|
| 232 |
+
# References
|
| 233 |
+
|
| 234 |
+
[1] J. D. Angrist and J.-S. Pischke. Mostly harmless econometrics: An empiricist's companion. Princeton university press, 2008.
|
| 235 |
+
[2] E. M. Bender, T. Gebru, A. McMillan-Major, and S. Shmitchell. On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, pages 610-623, 2021.
|
| 236 |
+
[3] M. Besserve, A. Mehrjou, R. Sun, and B. Scholkopf. Counterfactuals uncover the modular structure of deep generative models. In *Eighth International Conference on Learning Representations (ICLR 2020)*, 2020.
|
| 237 |
+
[4] S. R. Bowman, L. Vilnis, O. Vinyals, A. Dai, R. Jozefowicz, and S. Bengio. Generating sentences from a continuous space. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 10-21, Berlin, Germany, Aug. 2016. Association for Computational Linguistics. doi: 10.18653/v1/K16-1002. URL https://www.aclweb.org/anthology/K16-1002.
|
| 238 |
+
[5] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.
|
| 239 |
+
[6] A. Chattopadhyay, P. Manupriya, A. Sarkar, and V. N. Balasubramanian. Neural network attributions: A causal perspective. In International Conference on Machine Learning, pages 981-990. PMLR, 2019.
|
| 240 |
+
[7] W. Chen, J. Tian, L. Xiao, H. He, and Y. Jin. Exploring logically dependent multi-task learning with causal inference. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2020.
|
| 241 |
+
[8] N. Dai, J. Liang, X. Qiu, and X.-J. Huang. Style transformer: Unpaired text style transfer without disentangled latent representation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5997-6007, 2019.
|
| 242 |
+
|
| 243 |
+
[9] S. Dathathri, A. Madotto, J. Lan, J. Hung, E. Frank, P. Molino, J. Yosinski, and R. Liu. Plug and play language models: A simple approach to controlled text generation. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020. URL https://openreview.net/forum?id=H1edEyBKDS.
|
| 244 |
+
[10] M. De-Arteaga, A. Romanov, H. Wallach, J. Chayes, C. Borgs, A. Chouldechova, S. Geyik, K. Kenthapadi, and A. T. Kalai. Bias in bios: A case study of semantic representation bias in a high-stakes setting. In proceedings of the Conference on Fairness, Accountability, and Transparency, pages 120–128, 2019.
|
| 245 |
+
[11] E. Dinan, A. Fan, A. Williams, J. Urbanek, D. Kiela, and J. Weston. Queens are powerful too: Mitigating gender bias in dialogue generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8173-8188, 2020.
|
| 246 |
+
[12] N. Egami, C. J. Fong, J. Grimmer, M. E. Roberts, and B. M. Stewart. How to make causal inferences using texts. arXiv preprint arXiv:1802.02163, 2018.
|
| 247 |
+
[13] A. Feder, N. Oved, U. Shalit, and R. Reichart. CausaLM: Causal model explanation through counterfactual language models. arXiv preprint arXiv:2005.13407, 2020.
|
| 248 |
+
[14] J. Ficler and Y. Goldberg. Controlling linguistic style aspects in neural language generation. CoRR, abs/1707.02633, 2017. URL http://arxiv.org/abs/1707.02633.
|
| 249 |
+
[15] K. Gan, A. Li, Z. Lipton, and S. Tayur. Causal inference with selectively deconfounded data. In International Conference on Artificial Intelligence and Statistics, pages 2791-2799. PMLR, 2021.
|
| 250 |
+
[16] I. J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. C. Courville, and Y. Bengio. Generative adversarial nets. In NIPS, 2014.
|
| 251 |
+
[17] Y. Goyal, A. Feder, U. Shalit, and B. Kim. Explaining classifiers with causal concept effect (cace). arXiv preprint arXiv:1907.07165, 2019.
|
| 252 |
+
[18] H. Guo, B. Tan, Z. Liu, E. P. Xing, and Z. Hu. Text generation with efficient (soft) Q-learning. arXiv preprint arXiv:2106.07704, 2021.
|
| 253 |
+
[19] N. Hassanpour and R. Greiner. Learning disentangled representations for counterfactual regression. In International Conference on Learning Representations, 2020.
|
| 254 |
+
[20] J. He, X. Wang, G. Neubig, and T. Berg-Kirkpatrick. A probabilistic formulation of unsupervised text style transfer. In International Conference on Learning Representations (ICLR), 2019.
|
| 255 |
+
[21] Z. He, W. Zuo, M. Kan, S. Shan, and X. Chen. Attgan: Facial attribute editing by only changing what you want. IEEE Transactions on Image Processing, 28(11):5464-5478, 2019.
|
| 256 |
+
[22] Z. Hu, Z. Yang, X. Liang, R. Salakhutdinov, and E. Xing. Toward controlled generation of text. In International Conference on Machine Learning (ICML), 2017.
|
| 257 |
+
[23] P.-S. Huang, H. Zhang, R. Jiang, R. Stanforth, J. Welbl, J. Rae, V. Maini, D. Yogatama, and P. Kohli. Reducing sentiment bias in language models via counterfactual evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 65-83, 2020.
|
| 258 |
+
[24] M. Ilse, J. Tomczak, P. Forre, et al. Selecting data augmentation for simulating interventions. In AAAI, 2021.
|
| 259 |
+
[25] E. Jang, S. Gu, and B. Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016.
|
| 260 |
+
[26] D. Jin, Z. Jin, Z. Hu, O. Vechtomova, and R. Mihalcea. Deep learning for text style transfer: A survey. arXiv preprint arXiv:2011.00416, 2020.
|
| 261 |
+
[27] F. Johansson, U. Shalit, and D. Sontag. Learning representations for counterfactual inference. In International conference on machine learning, pages 3020-3029. PMLR, 2016.
|
| 262 |
+
|
| 263 |
+
[28] D. Kaushik, E. Hovy, and Z. Lipton. Learning the difference that makes a difference with counterfactually-augmented data. In International Conference on Learning Representations, 2019.
|
| 264 |
+
[29] K. Keith, D. Jensen, and B. O'Connor. Text and causal inference: A review of using text to remove confounding from causal estimates. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5332-5344, 2020.
|
| 265 |
+
[30] N. S. Keskar, B. McCann, L. R. Varshney, C. Xiong, and R. Socher. Ctrl: A conditional transformer language model for controllable generation. arXiv preprint arXiv:1909.05858, 2019.
|
| 266 |
+
[31] D. P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 267 |
+
[32] M. Kocaoglu, C. Snyder, A. G. Dimakis, and S. Vishwanath. CausalGAN: Learning causal implicit generative models with adversarial training. In International Conference on Learning Representations, 2018.
|
| 268 |
+
[33] B. Krause, A. D. Gotmare, B. McCann, N. S. Keskar, S. Joty, R. Socher, and N. F. Rajani. Gedi: Generative discriminator guided sequence generation. arXiv preprint arXiv:2009.06367, 2020.
|
| 269 |
+
[34] S. Kreps, R. M. McCain, and M. Brundage. All the news that's fit to fabricate: Ai-generated text as a tool of media misinformation. Journal of Experimental Political Science, pages 1-14, 2020.
|
| 270 |
+
[35] G. Lample, N. Zeghidour, N. Usunier, A. Bordes, L. Denoyer, and M. Ranzato. Fader networks: Manipulating images by sliding attributes. In I. Guyon, U. von Luxburg, S. Bengio, H. M. Wallach, R. Fergus, S. V. N. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, 4-9 December 2017, Long Beach, CA, USA, pages 5967-5976, 2017. URL http://papers.nips.cc/paper/7178-fader-networksmanipulating-images-by-sliding-attributes.
|
| 271 |
+
[36] C. Li, X. Gao, Y. Li, X. Li, B. Peng, Y. Zhang, and J. Gao. Optimus: Organizing sentences via pre-trained modeling of a latent space. arXiv preprint arXiv:2004.04092, 2020.
|
| 272 |
+
[37] J. Li, M. Galley, C. Brockett, J. Gao, and W. B. Dolan. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 110–119, 2016.
|
| 273 |
+
[38] J. Li, R. Jia, H. He, and P. Liang. Delete, retrieve, generate: A simple approach to sentiment and style transfer. In 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2018, pages 1865-1874. Association for Computational Linguistics (ACL), 2018.
|
| 274 |
+
[39] S. Li, S. Yavuz, K. Hashimoto, J. Li, T. Niu, N. Rajani, X. Yan, Y. Zhou, and C. Xiong. Coco: Controllable counterfactuals for evaluating dialogue state trackers. In ICLR, 2021.
|
| 275 |
+
[40] R. Liu, C. Jia, J. Wei, G. Xu, L. Wang, and S. Vosoughi. Mitigating political bias in language models through reinforced calibration. In Proceedings of the AAAI Conference on Artificial Intelligence, 2021.
|
| 276 |
+
[41] F. Locatello, M. Tschannen, S. Bauer, G. Ratsch, B. Schölkopf, and O. Bachem. Disentangling factors of variations using few labels. In International Conference on Learning Representations, 2019.
|
| 277 |
+
[42] I. Loshchilov and F. Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2018.
|
| 278 |
+
[43] C. Louizos, U. Shalit, J. M. Mooij, D. Sontag, R. Zemel, and M. Welling. Causal effect inference with deep latent-variable models. In Advances in neural information processing systems (NeurIPS), pages 6446-6456, 2017.
|
| 279 |
+
|
| 280 |
+
[44] D. Lu, C. Tao, J. Chen, F. Li, F. Guo, and L. Carin. Reconsidering generative objectives for counterfactual reasoning. In NeurIPS, 2020.
|
| 281 |
+
[45] F. Luo, P. Li, J. Zhou, P. Yang, B. Chang, Z. Sui, and X. Sun. A dual reinforcement learning framework for unsupervised text style transfer. In *IJCAI*, 2019.
|
| 282 |
+
[46] N. Madaan, I. Padhi, N. Panwar, and D. Saha. Generate your counterfactuals: Towards controlled counterfactual generation for text. In AAAI, 2021.
|
| 283 |
+
[47] C. J. Maddison, A. Mnih, and Y. W. Teh. The concrete distribution: A continuous relaxation of discrete random variables. In ICLR, 2017.
|
| 284 |
+
[48] D. Madras, E. Creager, T. Pitassi, and R. Zemel. Fairness through causal awareness: Learning causal latent-variable models for biased data. In Proceedings of the Conference on Fairness, Accountability, and Transparency, pages 349-358, 2019.
|
| 285 |
+
[49] S. Mani and G. F. Cooper. Causal discovery from medical textual data. In Proceedings of the AMIA Symposium, page 542. American Medical Informatics Association, 2000.
|
| 286 |
+
[50] M. R. Montgomery, M. Gragnolati, K. A. Burke, and E. Paredes. Measuring living standards with proxy variables. Demography, 37(2):155-174, 2000.
|
| 287 |
+
[51] R. Moraffah, B. Moraffah, M. Karami, A. Raglin, and H. Liu. Can: A causal adversarial network for learning observational and interventional distributions. arXiv preprint arXiv:2008.11376, 2020.
|
| 288 |
+
[52] M. Ozery-Flato, P. Thodoroff, M. Ninio, M. Rosen-Zvi, and T. El-Hay. Adversarial balancing for causal inference. arXiv preprint arXiv:1810.07406, 2018.
|
| 289 |
+
[53] K. Papineni, S. Roukos, T. Ward, and W.-J. Zhu. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318, 2002.
|
| 290 |
+
[54] G. Parascandolo, N. Kilbertus, M. Rojas-Carulla, and B. Scholkopf. Learning independent causal mechanisms. In International Conference on Machine Learning, pages 4036-4044. PMLR, 2018.
|
| 291 |
+
[55] N. Pawlowski, D. Coelho de Castro, and B. Glocker. Deep structural causal models for tractable counterfactual inference. Advances in Neural Information Processing Systems, 33, 2020.
|
| 292 |
+
[56] J. Pearl. Causality. Cambridge university press, 2009.
|
| 293 |
+
[57] M. O. Prates, P. H. Avelar, and L. C. Lamb. Assessing gender bias in machine translation: a case study with google translate. Neural Computing and Applications, pages 1-19, 2019.
|
| 294 |
+
[58] L. Qin, A. Bosselut, A. Holtzman, C. Bhagavatula, E. Clark, and Y. Choi. Counterfactual story reasoning and generation. In EMNLP, pages 5046-5056, 2019.
|
| 295 |
+
[59] A. Radford, J. Wu, R. Child, D. Luan, D. Amodei, and I. Sutskever. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9, 2019.
|
| 296 |
+
[60] E. Reiter and R. Dale. Building applied natural language generation systems. Natural Language Engineering, 3(1):57-87, 1997.
|
| 297 |
+
[61] A. Sauer and A. Geiger. Counterfactual generative networks. In ICLR, 2021.
|
| 298 |
+
[62] B. Schölkopf. Causality for machine learning. arXiv preprint arXiv:1911.10500, 2019.
|
| 299 |
+
[63] U. Shalit, F. D. Johansson, and D. Sontag. Estimating individual treatment effect: generalization bounds and algorithms. In International Conference on Machine Learning, pages 3076-3085. PMLR, 2017.
|
| 300 |
+
[64] T. Shen, T. Lei, R. Barzilay, and T. Jaakkola. Style transfer from non-parallel text by cross-alignment. In Advances in neural information processing systems (NeurIPS), pages 6830–6841, 2017.
|
| 301 |
+
|
| 302 |
+
[65] X. Shen, F. Liu, H. Dong, Q. Lian, Z. Chen, and T. Zhang. Disentangled generative causal representation learning. arXiv preprint arXiv:2010.02637, 2020.
|
| 303 |
+
[66] E. Sheng, K.-W. Chang, P. Natarajan, and N. Peng. The woman worked as a babysitter: On biases in language generation. In EMNLP, pages 3398-3403, 2019.
|
| 304 |
+
[67] E. Sheng, K.-W. Chang, P. Natarajan, and N. Peng. Towards controllable biases in language generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 3239-3254, 2020.
|
| 305 |
+
[68] A. Stafanovičs, M. Pinnis, and T. Bergmanis. Mitigating gender bias in machine translation with target gender annotations. In Proceedings of the Fifth Conference on Machine Translation, pages 629–638, 2020.
|
| 306 |
+
[69] G. Stanovsky, N. A. Smith, and L. Zettlemoyer. Evaluating gender bias in machine translation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1679–1684, 2019.
|
| 307 |
+
[70] J. H. Stock, M. W. Watson, et al. Introduction to econometrics, volume 3. Pearson New York, 2012.
|
| 308 |
+
[71] C. Tan, L. Lee, and B. Pang. The effect of wording on message propagation: Topic-and author-controlled natural experiments on twitter. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 175–185, 2014.
|
| 309 |
+
[72] E. Wallace, S. Feng, N. Kandpal, M. Gardner, and S. Singh. Universal adversarial triggers for attacking and analyzing nlp. In EMNLP, 2019.
|
| 310 |
+
[73] Z. Wang, K. Qinami, I. C. Karakozis, K. Genova, P. Nair, K. Hata, and O. Russakovsky. Towards fairness in visual recognition: Effective strategies for bias mitigation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8919-8928, 2020.
|
| 311 |
+
[74] N. Weber, R. Rudinger, and B. Van Durme. Causal inference of script knowledge. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7583-7596, 2020.
|
| 312 |
+
[75] Z. Wood-Doughty, I. Shpitser, and M. Dredze. Challenges of using text classifiers for causal inference. In EMNLP, volume 2018, page 4586. NIH Public Access, 2018.
|
| 313 |
+
[76] T. Wu, M. T. Ribeiro, J. Heer, and D. S. Weld. Polyjuice: Automated, general-purpose counterfactual generation. arXiv preprint arXiv:2101.00288.
|
| 314 |
+
[77] Y. Wu, K. Kuang, Y. Zhang, X. Liu, C. Sun, J. Xiao, Y. Zhuang, L. Si, and F. Wu. De-biased court's view generation with causality. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 763-780, 2020.
|
| 315 |
+
[78] M. Yang, F. Liu, Z. Chen, X. Shen, J. Hao, and J. Wang. CausalVAE: Structured causal disentanglement in variational autoencoder. arXiv preprint arXiv:2004.08697, 2020.
|
| 316 |
+
[79] Z. Yang, Z. Hu, C. Dyer, E. P. Xing, and T. Berg-Kirkpatrick. Unsupervised text style transfer using language models as discriminators. In NeurIPS, pages 7298-7309, 2018.
|
| 317 |
+
[80] L. Yao, Z. Chu, S. Li, Y. Li, J. Gao, and A. Zhang. A survey on causal inference. arXiv preprint arXiv:2002.02770, 2020.
|
| 318 |
+
[81] R. Zellers, A. Holtzman, H. Rashkin, Y. Bisk, A. Farhadi, F. Roesner, and Y. Choi. Defending against neural fake news. Neurips, 2020.
|
| 319 |
+
[82] X. Zeng, Y. Li, Y. Zhai, and Y. Zhang. Counterfactual generator: A weakly-supervised method for named entity recognition. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7270-7280, 2020.
|
| 320 |
+
[83] D. Zhang, H. Zhang, J. Tang, X.-S. Hua, and Q. Sun. Causal intervention for weakly-supervised semantic segmentation. Advances in Neural Information Processing Systems, 33, 2020.
|
| 321 |
+
|
| 322 |
+
[84] J. Zhao, T. Wang, M. Yatskar, V. Ordonez, and K.-W. Chang. Men also like shopping: Reducing gender bias amplification using corpus-level constraints. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, 2017.
|
| 323 |
+
[85] Q. Zhu, W. Zhang, T. Liu, and W. Y. Wang. Counterfactual off-policy training for neural dialogue generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3438-3448, 2020.
|
| 324 |
+
[86] R. Zmigrod, S. J. Mielke, H. Wallach, and R. Cotterell. Counterfactual data augmentation for mitigating gender stereotypes in languages with rich morphology. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1651–1661, 2019.
|
acausallensforcontrollabletextgeneration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ccbad44aedebb78e08c85450b80e7998aeaa857618891709486636cde1baabb
|
| 3 |
+
size 231446
|
acausallensforcontrollabletextgeneration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a492938b821a376af83b47bd0e9a8189a7569d64fb0e7ad3fdff268216d0f8be
|
| 3 |
+
size 536879
|
acentrallimittheoremfordifferentiallyprivatequeryanswering/becf66b0-4033-4827-93d2-82b61ec71945_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e10f18646cba0f4ade76c04378e6016de3d8a6a11acb1fc7b9faf88fd5155d4
|
| 3 |
+
size 82444
|
acentrallimittheoremfordifferentiallyprivatequeryanswering/becf66b0-4033-4827-93d2-82b61ec71945_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a9a28e29790789b0ef0531f851d079360f4605599f23efc76781640d5af1740
|
| 3 |
+
size 96843
|