Add Batch 313e0cae-a089-465e-bd09-2a8d6ba8e643
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_content_list.json +3 -0
- 360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_model.json +3 -0
- 360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_origin.pdf +3 -0
- 360ocameraalignmentviasegmentation/full.md +248 -0
- 360ocameraalignmentviasegmentation/images.zip +3 -0
- 360ocameraalignmentviasegmentation/layout.json +3 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_content_list.json +3 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_model.json +3 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_origin.pdf +3 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/full.md +285 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/images.zip +3 -0
- 3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/layout.json +3 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_content_list.json +3 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_model.json +3 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_origin.pdf +3 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/full.md +257 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/images.zip +3 -0
- 3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/layout.json +3 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_content_list.json +3 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_model.json +3 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_origin.pdf +3 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/full.md +349 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/images.zip +3 -0
- 3dfluidflowreconstructionusingcompactlightfieldpiv/layout.json +3 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_content_list.json +3 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_model.json +3 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_origin.pdf +3 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/full.md +303 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/images.zip +3 -0
- 3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/layout.json +3 -0
- 3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_content_list.json +3 -0
- 3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_model.json +3 -0
- 3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_origin.pdf +3 -0
- 3dhumanshapereconstructionfromapolarizationimage/full.md +259 -0
- 3dhumanshapereconstructionfromapolarizationimage/images.zip +3 -0
- 3dhumanshapereconstructionfromapolarizationimage/layout.json +3 -0
- 3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_content_list.json +3 -0
- 3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_model.json +3 -0
- 3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_origin.pdf +3 -0
- 3drotationequivariantquaternionneuralnetworks/full.md +279 -0
- 3drotationequivariantquaternionneuralnetworks/images.zip +3 -0
- 3drotationequivariantquaternionneuralnetworks/layout.json +3 -0
- 3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_content_list.json +3 -0
- 3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_model.json +3 -0
- 3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_origin.pdf +3 -0
- 3dscenereconstructionfromasingleviewport/full.md +277 -0
- 3dscenereconstructionfromasingleviewport/images.zip +3 -0
- 3dscenereconstructionfromasingleviewport/layout.json +3 -0
- 3pointtmfastermeasurementofhighdimensionaltransmissionmatrices/23bed598-2c66-4536-b2f6-7daecde70142_content_list.json +3 -0
- 3pointtmfastermeasurementofhighdimensionaltransmissionmatrices/23bed598-2c66-4536-b2f6-7daecde70142_model.json +3 -0
360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:025a8c754c605b39cd24e6b970c3030857e77246af715c925a0bd72eec586eb6
|
| 3 |
+
size 74997
|
360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6c88ba7126e01e8cfd04fe398d878f73c058a2dca562a904a4e5bac50f11452
|
| 3 |
+
size 89902
|
360ocameraalignmentviasegmentation/04eb013a-e8b7-4fd7-8619-766ed5039936_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a2250f8f0292a3aa8582669cda29d3cf02151d5df8816f57ba52662172aa172
|
| 3 |
+
size 13894962
|
360ocameraalignmentviasegmentation/full.md
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $360^{\circ}$ Camera Alignment via Segmentation
|
| 2 |
+
|
| 3 |
+
Benjamin Davidson, Mohsan S. Alvi, and João F. Henriques
|
| 4 |
+
|
| 5 |
+
Disperse.io {ben, mohsan, joao}@disperse.io
|
| 6 |
+
|
| 7 |
+
Abstract. Panoramic $360^{\circ}$ images taken under unconstrained conditions present a significant challenge to current state-of-the-art recognition pipelines, since the assumption of a mostly upright camera is no longer valid. In this work, we investigate how to solve this problem by fusing purely geometric cues, such as apparent vanishing points, with learned semantic cues, such as the expectation that some visual elements (e.g. doors) have a natural upright position. We train a deep neural network to leverage these cues to segment the image-space endpoints of an imagined "vertical axis", which is orthogonal to the ground plane of a scene, thus levelling the camera. We show that our segmentation-based strategy significantly increases performance, reducing errors by half, compared to the current state-of-the-art on two datasets of $360^{\circ}$ imagery. We also demonstrate the importance of $360^{\circ}$ camera levelling by analysing its impact on downstream tasks, finding that incorrect levelling severely degrades the performance of real-world computer vision pipelines.
|
| 8 |
+
|
| 9 |
+
# 1 Introduction
|
| 10 |
+
|
| 11 |
+
The ability of $360^{\circ}$ (or spherical) imaging to record an entire scene with a single capture makes them a powerful tool, both for machine perception and for rapidly documenting entire scenes. For example, $360^{\circ}$ imaging has been used to record crime scenes where it is vital to image the entire scene for evidence [32], to easily create Virtual Reality (VR) videos with minimal cost [24], and is perhaps most widely recognized in its role in creating Google Street View [12]. Arrays of cameras that can be composed into a full $360^{\circ}$ image or video are also important in mobile applications with critical safety requirements, such as self-driving cars [1]. With the availability of inexpensive $360^{\circ}$ capture devices, and the growth of VR headsets, there is an increased demand for techniques to automatically analyse and process spherical images.
|
| 12 |
+
|
| 13 |
+
The recent successes of computer vision, with deep learning playing a key role in the state-of-the-art object detectors [22], segmentation [30], camera pose estimation [20] and many others, seem to indicate that the same techniques should be directly applicable to $360^{\circ}$ images. However, there are specific difficulties associated with this modality that need to be addressed. One common problem for spherical images is a misalignment between the camera frames' ground plane and the world frames' ground plane (see fig. 2).
|
| 14 |
+
|
| 15 |
+
This misalignment makes automatically processing spherical images more challenging than it needs to be. For example, training a spherical object detector
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Fig. 1. Illustration of the problem of levelling spherical images. From left to right: a spherical image captured at a tilted angle relative to the sky (red arrow), with the horizon line shown in blue; its 2D representation (equirectangular image), with heavy distortions due to the rotation; the same image, undistorted by our system; the aligned spherical image in 3D.
|
| 19 |
+
|
| 20 |
+
on misaligned images would require the network to learn a representation which was invariant to rotations away from the vertical axis [15]. In contrast to this, if all images are level (upright), the representation could be sensitive to these rotations, simplifying the task to be learned [11].
|
| 21 |
+
|
| 22 |
+
The ground-plane alignment that we focus on estimates 2 degrees of freedom (DOF) (roll and pitch), and must be contrasted to general camera pose estimation, which estimates 6 DOF (translation and rotation in 3D) [20]. Ground-plane alignment can be performed with a single image, by using simple cues (e.g. vertical walls, ground or sky/ceiling positions). Differently, 6D camera pose estimation requires extra reference images [14, 20, 19], making it much less applicable.
|
| 23 |
+
|
| 24 |
+
Aligning spherical images to the ground is also an important pre-processing step for downstream tasks (we demonstrate this empirically in sec. 3). State-of-the-art object detectors, and segmentation networks are trained and evaluated on upright equirectangular images [37, 38, 8], and do not work under arbitrary rotations. Similarly, human visual recognition also degrades quickly with extreme rotations [33], and there are classification problems that are impossible to solve under arbitrary rotations (the canonical example being the distinction between the digits 9 and 6). Ground-plane alignment can also make pose estimation more robust, as estimating the pose of a levelled image requires two fewer DOF [28].
|
| 25 |
+
|
| 26 |
+
At a high level, our method estimates the axis orthogonal to the ground (vertical axis) by segmenting the unit sphere (where each point corresponds to a different direction) into likely candidates. We leverage a state-of-the-art segmentation network [30], by exploiting the fact that the unit sphere can be mapped to a 2D image via the equirectangular transform (sec. 2.1). The network is trained to segment the sphere into those directions likely to correspond to the vertical axis.
|
| 27 |
+
|
| 28 |
+
In addition to the novel segmentation formulation of this problem, we propose to combine the strengths of both geometrical methods and learning-based methods. Geometrical methods, such as those based on detecting and accumulating votes for locations of vanishing points (VP) [10, 21, 37], are very accurate, but brittle under noise and uncertainty. Learning-based methods are less accurate but very robust. We combine both, by incorporating a residual stream
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
Fig. 2. Overview of the proposed method. We train a convolutional network to produce a segmentation of an equirectangular image, using vanishing point features as extra geometric information. The output segmentation encodes the endpoints of the vertical axis, which we use to orient the image upright.
|
| 32 |
+
|
| 33 |
+
that propagates information about VP likelihoods, and this way informs our segmentation network with precise geometrical information.
|
| 34 |
+
|
| 35 |
+
By leveraging the power of feature engineering with state-of-the-art segmentation techniques, our method is the most accurate to date. We compare our method with the two most recent automatic alignment methods Deep360Up [18] and Coarse2Fine [27]. We demonstrate improved performance on the Sun360 dataset [35], as well a new dataset of construction images that we collected.
|
| 36 |
+
|
| 37 |
+
# 1.1 Related work
|
| 38 |
+
|
| 39 |
+
Ground plane alignment is related to pose estimation [14, 20, 19], as described in sec. 1. Another related line of work is rotation invariant (or equivariant) networks [34, 15], which aims to make models more robust and predictable w.r.t. rotations, and is complementary to our method. We aim instead to predict and undo the effect of a single global rotation, with a semantically-defined reference (the ground plane).
|
| 40 |
+
|
| 41 |
+
The classical solution to ground plane alignment has been to extract the straight line segments from an image, and use these to estimate a vanishing point in the direction of the vertical axis [10, 21, 37]. These methods rely on what are known as the Manhattan or Atlanta world assumptions [4, 25], which assert that the scene that has been captured will contain some orthogonal structure, given the tendency in human construction to build at right angles. It must be remarked that this assumption does not always hold in practice. One typical way to extract this orthogonal structure is to determine the direction in which all straight lines in an image are pointing, and have each line vote on vanishing point directions [37], in a manner similar to the Hough transform [7] (c.f. sec. 2.3). The orthogonal directions of the scene can then be found by looking for the three orthogonal directions which together have the most votes. However, many scenes may not have this orthogonal structure, and we may not be able to extract many
|
| 42 |
+
|
| 43 |
+
straight line segments from the image. Moreover, the maximal orthogonal set found by maximisation may not be the true orthogonal directions. Due to the many assumptions of this approach, it is very brittle in practice, despite the apparent strength of the vanishing point features it uses.
|
| 44 |
+
|
| 45 |
+
Deep learning solutions to ground plane alignment have shown to be more robust than the classical vanishing point methods. The existing methods are either a variation of a deep convolutional regression network [18, 17] or a classification network [27]. In the most recent regression network, referred to as Deep360Up, the vertical direction (pointing upwards) is output directly from a DenseNet [16], which is trained using the logarithm of the hyperbolic cosine between the estimated and ground truth vertical directions [18]. The most accurate and recent deep approach has been to use a coarse to fine classification network [27]. This approach, referred to as Coarse2Fine, classifies the pitch and roll of an image as belonging to a $10^{\circ}$ bin (coarse), thus adjusting the image to be within $\pm 5^{\circ}$ , and then classifying the adjusted image to be within a $1^{\circ}$ bin (fine). Another standard feature of such solutions is to generate training data from already levelled images (which we discuss in sec. 2.4). Though these methods have once again demonstrated the power of deep networks, we show in sec. 3 that the proposed segmentation approach is more accurate.
|
| 46 |
+
|
| 47 |
+
A related line of work is to propose network architectures that directly work with spherical images, for example for classification and detection [3], or for segmentation and depth prediction [31]. Our levelling method can alleviate any upright-world assumptions in these works, as well as standard networks, and is thus complementary.
|
| 48 |
+
|
| 49 |
+
# 2 Methods
|
| 50 |
+
|
| 51 |
+
Our approach can broadly be split into three stages: calculating the vanishing points, segmenting the image, and processing the segmentation into a single vertical direction. Before describing our method in detail, we provide some background on equirectangular images and some useful operations.
|
| 52 |
+
|
| 53 |
+
# 2.1 Background on equirectangular images
|
| 54 |
+
|
| 55 |
+
An equirectangular image is a planar representation of an image on the sphere, where height and width correspond respectively to latitude and longitude. The explicit transformation (denoted $p$ ) between pixel coordinates $(x,y)$ and spherical coordinates $(\lambda,\phi)$ is straightforward:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
p: \mathbb {R} ^ {2} \to \mathbb {S} ^ {2}, \quad p (x, y) = \left(\frac {\pi y}{h}, 2 \pi - \frac {\pi x}{w}\right) = (\lambda , \phi) \tag {1}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
where $w$ and $h$ are the dimensions in pixels. Note that this is an invertible transformation and so we can move from the image to the sphere and vice-versa. Using $p$ we will frequently refer to an equirectangular image as being on the sphere, by which we mean the projection of the image to the sphere. Furthermore we
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
Fig.3. An example equirectangular image, and its corresponding projection on the faces of a cube. Note the many curved lines in the equirectangular image, which become straight in the corresponding cube face.
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
|
| 68 |
+
can map spherical to Cartesian coordinates and vice versa using the spherical to cartesian transformation: $f(\lambda, \phi) = (\cos(\phi)\sin(\lambda), \sin(\phi)\sin(\lambda), \cos(\lambda))$ . We can use these transformations to rotate an equirectangular image $I_{\mathrm{src}}$ to create another image $I_{\mathrm{dst}}$ of different orientation, by rotating the sphere. Starting from a point $x_{\mathrm{dst}}$ in the pixel space of $I_{\mathrm{dst}}$ we project $x_{\mathrm{dst}}$ to the sphere $p(x_{\mathrm{dst}})$ , and rotate the sphere with a rotation matrix $R \in SO(3)$ . Note that $R$ represents an arbitrary rotation in 3D space, with an axis of rotation not necessarily corresponding to latitude or longitude. Doing so gives the following relationship between coordinate systems:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
y _ {\mathrm {s r c}} = R f \left(p \left(x _ {\mathrm {d s t}}\right)\right) \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
After this transformation we project back to image space: $x_{\mathrm{src}} = p^{-1}(f^{-1}(y_{\mathrm{src}}))$ . The transformation of image coordinates $x_{\mathrm{dst}}$ to $x_{\mathrm{src}}$ allows us to re-sample an image $I_{\mathrm{src}}$ to create $I_{\mathrm{dst}}$ , for example by bilinear interpolation [15]. As can be seen in eq. (2), we may rotate the image so that we have an equirectangular image of any orientation, which we will use to generate training data for our segmentation network.
|
| 75 |
+
|
| 76 |
+
Another subtle but important aspect about equirectangular images is how to extract straight line segments visible within the scene. Straight lines in the scene do not in general map to straight lines in an equirectangular image (see fig. 3). To recover straight lines from an equirectangular image, we need to convert it to one or more perspective images. We cover the full $360^{\circ}$ view with perspective views, corresponding to 6 cube faces (see fig. 3). Each one is produced by rendering the sphere (with the mapped texture) from 6 different points-of-view, at right angles. This "cube mapping" is commonly used in computer graphics to render far-away scenes [26]. This allows using unmodified line segment detectors.
|
| 77 |
+
|
| 78 |
+
# 2.2 Segmentation framework
|
| 79 |
+
|
| 80 |
+
Our method is based on a convolutional neural network optimised for segmentation, with side-information about vanishing points as input to an attention module. The output of our network is a binary segmentation of the original equirectangular image, which by applying the pixel to spherical transform $p$ may be thought of as a segmentation of the sphere into background and likely
|
| 81 |
+
|
| 82 |
+
directions for the vertical axis. Specifically, we segment all points on the sphere which are within 5 degrees of the north or south pole, where the poles are taken relative to camera coordinates (see fig. 2 for an example segmentation). By embedding all useful inputs and outputs in a 2-dimensional space, we can leverage highly successful 2D segmentation networks, and allow predictions to be based on both geometric and semantic cues (i.e., vanishing points and poses of distinctive objects in images).
|
| 83 |
+
|
| 84 |
+
Network architecture. The base architecture that we use is the Gated-Shape CNN (GSCNN) [30]. The GSCNN is a fully convolutional network, designed to utilise side information about object boundaries to improve its performance on semantic segmentation tasks. It consists of a backbone feature extractor, in our case InceptionV3 [29], an ASPP (atrous spatial pyramid pooling) layer, and the shape stream. The shape stream in the original work accepts image gradients and intermediate backbone features as inputs, and outputs a single channel feature image. The output shape stream features are then combined with other backbone features in the ASPP layer to generate a dense feature map of the same resolution as the input image.
|
| 85 |
+
|
| 86 |
+
Our architecture modifies GSCNNs so that it would be more informative to call the shape stream the vanishing point stream, as we replace image gradients with the vanishing point image $V$ (see sec. 2.3). The reasoning behind this is that $V$ is a feature that is highly informative w.r.t. the vertical axis, and we would like to let the network exploit this source of information. Also, feeding $V$ to the network in this manner allows us to use a pre-trained backbone network, which would not be possible by just concatenating $V$ to the channels of the image. Using a GSCNN enabled us to introduce information relating to vanishing points, whilst also retaining the ability to use pre-trained backbones.
|
| 87 |
+
|
| 88 |
+
# 2.3 Vanishing point image
|
| 89 |
+
|
| 90 |
+
Vanishing points have proven to be a strong geometric cue for many computer vision tasks, including ground plane alignment [10]. In many scenes a horizon line is visible, or orthogonal structures such as the corners of buildings. These
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
Fig. 4. An equirectangular image and the corresponding vanishing point image (sec. 2.3). The 6 regions highlighted in red are areas which have received a large number of votes. Note that each highly-voted region corresponds to one face of the approximately cuboid room (the four walls, floor and ceiling).
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
Fig. 5. Illustration of how to calculate vanishing point features from line segments in a cube face. We can see that every point on the great circle formed by intersecting the plane and the sphere will be orthogonal to the normal vector. Therefore, every point in this circle receives a vote. In practice the 2D surface of the sphere is discretised (sec. 2.1), and every bin within some threshold distance of this circle receives a vote.
|
| 99 |
+
|
| 100 |
+
structures are useful for determining the vertical axis, and can be emphasised by calculating vanishing points (see fig. 4). Moreover, these features can be computed directly from images, with no learning required. This makes them excellent features for our purpose.
|
| 101 |
+
|
| 102 |
+
To build the vanishing point image in fig. 4 we extract all of the straight lines in the scene and use each line to vote on vanishing directions. The first step of this process is to project the equirectangular image to the 6 cube faces (fig. 3, right) and extract line segments from each face. To extract the line segments we use Canny edge detection combined with a probabilistic Hough transform [2, 9]. We then convert each line segment to a plane, defined by the line endpoints, and the origin of the sphere. Let $n$ be the normal vector to this plane. We use $n$ to vote for vanishing point locations, by voting for all directions on the sphere which are orthogonal to $n$ . Geometrically this means all points on the great circle defined by the intersection of the plane and sphere receive a vote. In practice we split the sphere into $h \times w$ bins by projecting each pixel in an equirectangular image $I$ to the sphere and then voting via
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
V _ {h _ {0}, w _ {0}} ^ {n} = \left\{ \begin{array}{l l} 1 & | n \cdot f \left(p \left(I _ {h _ {0}, w _ {0}}\right)\right) | < \lambda_ {\text {v a n i s h i n g}} \\ 0 & \text {o t h e r w i s e} \end{array} . \right. \tag {3}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
We calculate a normal vector $n$ for every line segment and accumulate votes by summing $V = \sum_{n} V^{n}$ . Finally we normalise $V$ to be an intensity image with values in the range of [0, 255]. High values will correspond to probable vanishing points, which have many line segments pointing towards them, and will assist our network in finding the vertical axis (fig. 5).
|
| 109 |
+
|
| 110 |
+
# 2.4 Training method
|
| 111 |
+
|
| 112 |
+
To train our network, we use a weighted generalised dice loss [5] on uniformly distributed points on the sphere. This is in contrast to the original GSCNN work
|
| 113 |
+
|
| 114 |
+
which utilises auxiliary and regularising losses [30], and which have no direct analogue in our setting. We chose to use the generalised dice loss as this has been shown to perform well in situations where there are large class imbalances between foreground and background classes [5]. This is a concern in our setting, since vanishing points are sparse.
|
| 115 |
+
|
| 116 |
+
We do not compute the loss directly on the 2D segmentation image, as this would over-sample the polar regions, thus disproportionately weighing vertical directions near them. Instead, we select points that are uniformly distributed around the sphere, and project these into the equirectangular segmentation, and ground truth. Finally, we interpolate the values of each projected point to construct $y$ and $\hat{y}$ .
|
| 117 |
+
|
| 118 |
+
Training data. The data fed to our network during training are equirectangular images (e.g. from the Sun360 dataset [36]), and ground truth equirectangular segmentations, which we generate from already levelled equirectangular images. The dataset we begin with consists only of levelled equirectangular images. For all of these images, we know that the vertical direction is $z = (0,0,1)$ . By rotating a levelled image with a random rotation $R$ and using eq. (2), we know that the resulting vertical direction of the rotated image will be $R^{-1}z$ . From this we can generate training pairs of image and vertical direction. Now, given a vertical direction, it is simple to construct a binary equirectangular segmentation. Let $u$ be the generic vertical direction for some image and $I$ an equirectangular image. After applying $f \circ p$ to all pixel values in $I$ , we can consider the $i,j^{\text{th}}$ pixel as sitting at $x_{i,j}$ on the sphere in $\mathbb{R}^3$ . Our segmentation $s_{i,j}$ is 1 where $|u \cdot x_{i,j}| > \lambda_{seg}$ and 0 otherwise, which means that we consider pixels that project near to the vertical axis as foreground (1) and all others as background (0). Rotating level images whilst keeping track of the vertical axis allows us to construct many pairs of image and segmentation from a single levelled image.
|
| 119 |
+
|
| 120 |
+
To actually generate our dataset we compute $n_{\mathrm{rot}}$ rotations which will place the vertical axis uniformly around the sphere, and then apply a small offset rotation. Performing these almost uniform rotations avoids using the same $n_{\mathrm{rot}}$ rotations for every image, whilst ensuring that the directions completely cover the sphere (c.f. Appendix A). Note there are infinitely many rotations placing the vertical direction at a specific point (by rolling around the vertical axis). We incorporate this rotation online during training, as the rotation can be represented via rolling the equirectangular image along its width axis.
|
| 121 |
+
|
| 122 |
+
Even when using high quality interpolation methods, we cannot avoid rotational artifacts appearing in the rotated images, which can adversely impact generalization performance. This relatively subtle issue will be discussed in sec. 3.
|
| 123 |
+
|
| 124 |
+
# 2.5 Test-time prediction
|
| 125 |
+
|
| 126 |
+
Once we have an equirectangular segmentation, we can extract a vertical direction by selecting the most probable connected component and taking its centroid
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Fig. 6. Accuracy evaluation on the test splits of 3 datasets. From left to right: Sun360 with the same artificial rotations as used by Deep360up [18], Sun360 without rotation, and the construction dataset (sec. 3.3). We report the accuracy over different angular thresholds, for 4 methods: ours, vanishing points (VP, a purely geometric method), and 2 state-of-the-art deep learning methods (360Up and Coarse2Fine). Our method significantly outperforms others on images without artificial rotations (center and right).
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
|
| 133 |
+

|
| 134 |
+
|
| 135 |
+
as the vertical direction. Given such a centroid $c$ and eq. (1) we recover the vertical direction via $p(c) \in \mathbb{S}^2$ . While this is a simple computer vision operation, for completeness we describe it fully in Appendix A.
|
| 136 |
+
|
| 137 |
+
Test-time augmentation. The final stage of our approach is an optional test time augmentation, which may rotate an image and rerun the segmentation. Let $u$ be a candidate vertical direction obtained after running a single forward pass of the network. If the image was already close to level, i.e. $u$ is close to $z = (0,0,1)$ , then we rotate the images pitch by $20^{\circ}$ and rerun the entire inference and post-processing steps to get a new $u'$ . The reason for this is that, if the image is already close to level, the vanishing point features for the vertical axis are close to the points of most distortion: $\pm z$ . Following this, we rotate $u'$ back $20^{\circ}$ and take the resulting vector as the vertical direction.
|
| 138 |
+
|
| 139 |
+
Testing data. We collected a test set of unlevelled images, where the vertical direction has been calculated manually. To calculate the vertical direction manually two vertical lines, vertical in the world frame, are manually identified, which allows us to construct a plane parallel to the ground plane, by computing the normals as in sec. 2.3. This plane parallel to the ground trivially gives us the vertical axis, as the axis orthogonal to the plane. By ensuring we use unrotated test images, we avoid data leakage due to rotational artifacts present in the images (see sec. 3).
|
| 140 |
+
|
| 141 |
+
# 3 Experiments
|
| 142 |
+
|
| 143 |
+
We trained and tested our methods on three datasets: the Sun360 dataset, a synthetic dataset of noise, and a dataset of construction images. Training on
|
| 144 |
+
|
| 145 |
+
synthetically rotated Sun360 images can lead to rotational artifacts (sec. 3.2) being learned by the network, a common problem with similar synthetic training regimes [23, 6]. As all images in the Sun360 dataset are level, we could only evaluate our networks' performance on levelled images without introducing rotational artifacts. To measure the extent to which the network relies on rotational artifacts, we created a dataset of rotated noise images. Lastly, to accurately estimate the networks' performance on unlevelled images, without the aid of artifacts, we collected a dataset consisting of images which were not level, and for which the vertical direction was known.
|
| 146 |
+
|
| 147 |
+
In the following experiments, we compare our method with that of Deep360Up [18] and a baseline vanishing point (VP) method based on [38]. We made use of the publicly available Deep360Up implementation. When possible we also report the performance of the Coarse2Fine [27] approach, by testing on the same test set. On the Sun360 dataset we also show the importance of the vanishing point stream, by removing it from the network and observing a reduced performance.
|
| 148 |
+
|
| 149 |
+
Finally, we demonstrate the importance of levelling images for downstream tasks by training a segmentation network on levelled and unlevelled images. We make use of our implementation of the original GSCNN work as the segmentation network, and the dataset from [13].
|
| 150 |
+
|
| 151 |
+
# 3.1 Sun360 dataset
|
| 152 |
+
|
| 153 |
+
This dataset consists of 30,000 levelled images, and we use a 80-10-10 split for training, validation and test. As all images in this dataset are already level, we cannot test on any images which do not contain rotational artifacts. To account for this, we evaluated the network on the original, level, images as well as rotated images. We report performance on 3 subsets of data: the unrotated level test set (referred to as Test Flat), the unrotated validation set, and a rotated validation set where all vertical directions are in the upper-hemisphere. To compare our method with both Deep360Up and Coarse2Fine we also report results on a synthetically rotated test set, referred to as Test Deep360Up, consisting of 17,825 images that were used to evaluate both methods in the original works.
|
| 154 |
+
|
| 155 |
+
The accuracy of our method as well as the brittleness of the classical vanishing point method is shown in table 1. Our approach is the most accurate on both the level and synthetically rotated test sets, when considering a threshold of at least $2^{\circ}$ . The Coarse2Fine approach does achieve a higher accuracy than our method when considering a $1^{\circ}$ threshold, but then falls off to be the lowest out of all considered deep learning methods, at larger thresholds. A possible explanation for this dropoff is that the Coarse2Fine approach solves the problem in two stages: first adjusting the image to be within $10^{\circ}$ of level, and then refining this adjusted image to be within $1^{\circ}$ . Therefore, if the initial estimation is incorrect, the network can never recover the true vertical direction. In contrast, our method is completely end-to-end, and so we do not depend on the output of a previous stage, giving a more robust approximation. Here we also demonstrate the importance of the vanishing point stream, as removing it significantly reduces performance. The poor performance of the vanishing point method is explained
|
| 156 |
+
|
| 157 |
+
Table 1. Performance for different subsets of the Sun360 dataset (see text for details). We report the percentage of images for which the vertical axis is correctly estimated within a threshold of $x^{\circ}$ .
|
| 158 |
+
|
| 159 |
+
<table><tr><td colspan="2"></td><td colspan="8">Percentage of estimated axes within x°</td></tr><tr><td>Dataset</td><td>Method</td><td>1°</td><td>2°</td><td>3°</td><td>4°</td><td>5°</td><td>7.5°</td><td>10°</td><td>12°</td></tr><tr><td rowspan="4">Val Rotated</td><td>Ours</td><td>25.1</td><td>60.5</td><td>80.5</td><td>90.3</td><td>94.7</td><td>97.5</td><td>98.1</td><td>98.4</td></tr><tr><td>Ours (no VP)</td><td>20.9</td><td>45.6</td><td>57.0</td><td>74.4</td><td>90.2</td><td>96.5</td><td>97.4</td><td>97.8</td></tr><tr><td>Deep360Up</td><td>4.9</td><td>17.6</td><td>34.8</td><td>54.7</td><td>70.1</td><td>91.0</td><td>95.7</td><td>97.5</td></tr><tr><td>VP</td><td>0.2</td><td>0.7</td><td>1.2</td><td>1.5</td><td>1.7</td><td>2.2</td><td>2.5</td><td>2.8</td></tr><tr><td rowspan="4">Val Flat</td><td>Ours</td><td>34.7</td><td>78.8</td><td>92.4</td><td>96.4</td><td>97.9</td><td>99.0</td><td>99.3</td><td>99.4</td></tr><tr><td>Ours (no VP)</td><td>0.3</td><td>1.0</td><td>2.9</td><td>67.0</td><td>97.0</td><td>98.9</td><td>99.3</td><td>99.4</td></tr><tr><td>Deep360Up</td><td>9.8</td><td>20.5</td><td>33.3</td><td>45.5</td><td>58.8</td><td>81.4</td><td>93.2</td><td>97.1</td></tr><tr><td>VP</td><td>0.5</td><td>2.8</td><td>5.1</td><td>7.3</td><td>9.4</td><td>12.9</td><td>16.0</td><td>17.6</td></tr><tr><td rowspan="5">Test Deep360</td><td>Ours</td><td>19.7</td><td>53.6</td><td>75.5</td><td>87.2</td><td>92.6</td><td>97.1</td><td>98.2</td><td>98.4</td></tr><tr><td>Ours (no VP)</td><td>7.5</td><td>23.5</td><td>40.3</td><td>55.9</td><td>68.2</td><td>87.1</td><td>94.8</td><td>97.4</td></tr><tr><td>Deep360Up</td><td>7.1</td><td>24.5</td><td>43.9</td><td>60.7</td><td>74.2</td><td>91.9</td><td>96.6</td><td>97.9</td></tr><tr><td>Coarse2Fine</td><td>30.9</td><td>51.7</td><td>65.9</td><td>74.1</td><td>79.1</td><td>NA</td><td>NA</td><td>91.0</td></tr><tr><td>VP</td><td>0.3</td><td>0.9</td><td>1.6</td><td>2.1</td><td>2.5</td><td>3.3</td><td>3.8</td><td>4.2</td></tr><tr><td rowspan="4">Test Flat</td><td>Ours</td><td>34.0</td><td>78.4</td><td>92.4</td><td>96.2</td><td>97.8</td><td>98.8</td><td>99.3</td><td>99.4</td></tr><tr><td>Ours (no VP)</td><td>0.4</td><td>1.3</td><td>3.1</td><td>63.9</td><td>96.5</td><td>98.6</td><td>99.0</td><td>99.2</td></tr><tr><td>Deep360Up</td><td>10.2</td><td>22.5</td><td>35.3</td><td>48.2</td><td>60.1</td><td>82.3</td><td>93.4</td><td>97.3</td></tr><tr><td>VP</td><td>0.3</td><td>2.5</td><td>6.1</td><td>8.8</td><td>11.0</td><td>14.1</td><td>16.7</td><td>18.5</td></tr></table>
|
| 160 |
+
|
| 161 |
+
by the nature of the Sun360 dataset, which consists of mostly natural scenes (eg. forests), and therefore does not satisfy the Manhattan world assumption.
|
| 162 |
+
|
| 163 |
+
# 3.2 Noise dataset
|
| 164 |
+
|
| 165 |
+
As we synthetically rotate images during training, it was crucial to ensure the network was not "cheating", i.e. simply using visual artifacts induced by synthetic rotations to solve the problem, and not learning high-level cues that generalize to images with real rotations. Deep networks are very efficient at finding the simplest solution to a problem, and the existence of shortcuts is a prevalent problem in unsupervised learning, for example taking advantage of boundary effects [23, sec. 4.2] or chromatic aberrations of lenses [6, sec. 3.1].
|
| 166 |
+
|
| 167 |
+
We demonstrate empirically that, in fact, a network can invert a rotation on pure noise successfully. To do this, we generated images of random (white) noise, rotated them, and used them to train both our method and the Deep360Up method. We found that in both cases the network could learn to undo the transformation. This highlights the need for an unrotated test set to be sure of the network's performance at test time. Note that we generated a new random noise image at each training and validation step, meaning that this was not a result of over-fitting, as every image the network saw was different. For Deep360Up, we observed the average angular error in this case to be around 5 degrees, and for our method we saw the generalised dice loss fall to 0.04. Both indicate that the network was able to significantly beat chance using only rotational artifacts.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Fig. 7. Visualisation of automatically levelled validation images: proposed method (top), Deep360Up (middle), and the misaligned counterparts (bottom row).
|
| 171 |
+
|
| 172 |
+
# 3.3 Construction dataset
|
| 173 |
+
|
| 174 |
+
To ensure our network was actually solving the problem at hand, we collected a dataset of images from construction sites where we had the raw capture, and the vertical axis of the raw capture. This dataset consists of 10,054 images where we use a 90-10 split for training and validation, and 1006 images for testing. The imbalance in the number of images for training and validation compared to testing images is due to the nature of the data collection process: the training and validation images were already rotated to be level; in contrast, the testing data was gathered manually and consisted of the original capture, which in many cases was not level. This permitted us to test our approach on unlevelled images, that did not contain rotational artifacts. A total of 9365 distinct locations were captured from 16 construction sites, with no overlap in locations between the training and testing data. $48.7\%$ of images were within $3^{\circ}$ of level, and $90\%$ were within $12^{\circ}$ of level, see figure 8 for typical example scenes from this dataset. Again, our method was considerably more accurate than existing state-of-the-art techniques. Table 2 shows that our approach is the most accurate on all datasets, achieving $98\%$ of estimates within 5 degrees for the test set.
|
| 175 |
+
|
| 176 |
+
Note that the performance of the vanishing point method on the construction data is significantly better than when applied to the Sun360 data. This can be explained due to the construction dataset consisting of rooms that satisfy the Manhattan world assumption, in contrast to the Sun360 dataset.
|
| 177 |
+
|
| 178 |
+
# 3.4 Downstream segmentation task
|
| 179 |
+
|
| 180 |
+
To illustrate the importance of levelling images for downstream tasks we trained several segmentation models using the dataset in [13], which consists of 666 images from the Sun360 dataset, for which the authors have added segmentation labels for 15 classes. As all images in the Sun360 dataset are already level, we created a rotated segmentation dataset by randomly rotating each image so
|
| 181 |
+
|
| 182 |
+
Table 2. Performance for different subsets of the construction dataset (see text for details). We report the percentage of images for which the vertical axis is correctly estimated within a threshold of $x^{\circ}$ .
|
| 183 |
+
|
| 184 |
+
<table><tr><td colspan="2"></td><td colspan="8">Percentage of estimated axes within x°</td></tr><tr><td>Dataset</td><td>Method</td><td>1°</td><td>2°</td><td>3°</td><td>4°</td><td>5°</td><td>7.5°</td><td>10°</td><td>12°</td></tr><tr><td rowspan="3">Val. rotated</td><td>Ours</td><td>23.1</td><td>59.0</td><td>79.2</td><td>88.2</td><td>93.2</td><td>96.4</td><td>97.2</td><td>97.5</td></tr><tr><td>Deep360Up</td><td>3.0</td><td>12.1</td><td>27.4</td><td>42.7</td><td>58.3</td><td>82.5</td><td>91.6</td><td>95.7</td></tr><tr><td>VP</td><td>4.2</td><td>13.4</td><td>22.3</td><td>28.9</td><td>33.3</td><td>38.9</td><td>41.6</td><td>43.0</td></tr><tr><td rowspan="3">Val. flat</td><td>Ours</td><td>25.3</td><td>66.3</td><td>87.4</td><td>93.5</td><td>96.0</td><td>97.9</td><td>98.3</td><td>98.7</td></tr><tr><td>Deep360Up</td><td>12.4</td><td>25.3</td><td>39.6</td><td>50.8</td><td>61.5</td><td>82.4</td><td>93.5</td><td>96.7</td></tr><tr><td>VP</td><td>2.8</td><td>11.6</td><td>25.7</td><td>39.1</td><td>47.4</td><td>60.8</td><td>68.6</td><td>73.2</td></tr><tr><td rowspan="3">Test</td><td>Ours</td><td>26.9</td><td>67.3</td><td>88.6</td><td>95.0</td><td>97.5</td><td>99.4</td><td>99.7</td><td>99.7</td></tr><tr><td>Deep360Up</td><td>9.0</td><td>29.3</td><td>49.2</td><td>62.1</td><td>73.0</td><td>88.5</td><td>94.2</td><td>96.2</td></tr><tr><td>VP</td><td>4.9</td><td>15.1</td><td>27.9</td><td>38.1</td><td>46.7</td><td>62.4</td><td>70.5</td><td>74.9</td></tr></table>
|
| 185 |
+
|
| 186 |
+
Table 3. Downstream task performance (mean IOU, in percentages) on different subsets of data (sec. 3.4). For each model we highlight the worst performance in bold.
|
| 187 |
+
|
| 188 |
+
<table><tr><td rowspan="2" colspan="2"></td><td colspan="3">Evaluation Dataset</td></tr><tr><td>Original</td><td>Levelled</td><td>Rotated</td></tr><tr><td rowspan="3">Training Dataset</td><td>Original</td><td>40.1</td><td>40.0</td><td>26.7</td></tr><tr><td>Levelled</td><td>42.5</td><td>42.0</td><td>31.4</td></tr><tr><td>Rotated</td><td>43.0</td><td>42.7</td><td>39.1</td></tr></table>
|
| 189 |
+
|
| 190 |
+
that its vertical direction was at most $45^{\circ}$ away from level. We also constructed a levelled dataset by applying our method to the rotated images, using the estimated rotations to level the images and their annotations. In total we used these 3 datasets: original, rotated, and levelled, to train 3 segmentation models.
|
| 191 |
+
|
| 192 |
+
The segmentation models consist of our own implementation of GSCNNs. Our training regime followed the original work [30] except that we trained for 100 epochs. After training each model, we then evaluated the mean IOU on the original, rotated, and levelled validation sets, consisting of 100 images.
|
| 193 |
+
|
| 194 |
+
Table 3 shows that all models performed the worst on the rotated dataset, even the model trained specifically on rotated images. This drop in performance is particularly striking for models trained on levelled images, with drops of $13.3\%$ and $10.6\%$ for the model trained on the original dataset and levelled dataset respectively. This highlights a significant problem for many $360^{\circ}$ processing methods, which have been trained and evaluated on levelled images. These methods may not generalise well at test time where images may not be level. Our method solves this problem as can be seen in table 3, where the automatically levelled images achieve close to the same performance as the original, levelled dataset.
|
| 195 |
+
|
| 196 |
+
# 4 Conclusion
|
| 197 |
+
|
| 198 |
+
In this paper, we presented the most accurate auto-alignment method to date, developed by combining state-of-the-art segmentation methods with classical
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Fig. 8. Top row: Qualitative results for the downstream task of semantic segmentation (sec. 3.4). Middle row: Ground truth. The model performs well on the original images (left), but significantly worse on rotated images (center). Levelling with our method (right) recovers the performance. This highlights the importance of levelling for realistic downstream tasks. Bottom row: Example scenes from the construction dataset.
|
| 202 |
+
|
| 203 |
+
vanishing point features. We have demonstrated that care needs to be taken when generating training data to avoid data leakage. Moreover, we have demonstrated that casting the vertical axis estimation problem as a segmentation problem results in improved performance, whilst using standard segmentation techniques.
|
| 204 |
+
|
| 205 |
+
One issue with our approach is that we make the assumption that the vertical direction is already in the upper hemisphere. Though this is a reasonable assumption given how images are captured (where such misalignment is rarely an issue), and the availability of onboard sensors to roughly align an image, we could remedy this problem by instead segmenting the image into three classes: up, down and background. Doing so would allow us to calculate a vertical axis as before, but then use the up or down label to vote for the up direction. Future work could also try directly regressing the location of the vertical direction following the segmentation. We leave this for future work as it would require a considerable modification of the proposed framework.
|
| 206 |
+
|
| 207 |
+
# References
|
| 208 |
+
|
| 209 |
+
1. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. arXiv preprint arXiv:1903.11027 (2019)
|
| 210 |
+
2. Canny, J.: A computational approach to edge detection. IEEE Transactions on Pattern Analysis and Machine Intelligence PAMI-8(6), 679-698 (Nov 1986). https://doi.org/10.1109/TPAMI.1986.4767851
|
| 211 |
+
3. Coors, B., Paul Condurache, A., Geiger, A.: Spherenet: Learning spherical representations for detection and classification in omnidirectional images. In: The European Conference on Computer Vision (ECCV) (September 2018)
|
| 212 |
+
4. Coughlan, J.M., Yuille, A.L.: The manhattan world assumption: Regularities in scene statistics which enable bayesian inference. In: Leen, T.K., Dietterich, T.G., Tresp, V. (eds.) Advances in Neural Information Processing Systems 13, pp. 845-851. MIT Press (2001), http://papers.nips.cc/paper/1804-the-manhattan-world-assumption-regularities-in-scene-statistics-which-enable-bayesian-inference.pdf
|
| 213 |
+
5. Davidson, B., Kalitzeos, A., Carroll, J., Dubra, A., Ourselin, S., Michaelides, M., Bergeles, C.: Automatic cone photoreceptor localisation in healthy and stargardt afflicted retinas using deep learning. Scientific Reports 8(1), 7911 (2018). https://doi.org/10.1038/s41598-018-26350-3, https://doi.org/10.1038/s41598-018-26350-3
|
| 214 |
+
6. Doersch, C., Gupta, A., Efros, A.A.: Unsupervised visual representation learning by context prediction. In: The IEEE International Conference on Computer Vision (ICCV) (December 2015)
|
| 215 |
+
7. Duda, R.O., Hart, P.E.: Use of the hough transformation to detect lines and curves in pictures. Commun. ACM 15(1), 11-15 (Jan 1972). https://doi.org/10.1145/361237.361242, https://doi.org/10.1145/361237.361242
|
| 216 |
+
8. Fernandez-Labrador, C., Fácil, J.M., Pérez-Yus, A., Demonceaux, C., Guerrero, J.J.: Panoroom: From the sphere to the 3d layout. CoRR abs/1808.09879 (2018), http://arxiv.org/abs/1808.09879
|
| 217 |
+
9. Galamhos, C., Matas, J., Kittler, J.: Progressive probabilistic though transform for line detection. In: Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149). vol. 1, pp. 554-560 Vol. 1 (June 1999). https://doi.org/10.1109/CVPR.1999.786993
|
| 218 |
+
0. Gallagher, A.C.: Using vanishing points to correct camera rotation in images. In: The 2nd Canadian Conference on Computer and Robot Vision (CRV'05). pp. 460-467 (May 2005). https://doi.org/10.1109/CRV.2005.84
|
| 219 |
+
1. Gidaris, S., Singh, P., Komodakis, N.: Unsupervised representation learning by predicting image rotations. In: ICLR 2018 (2018)
|
| 220 |
+
2. Google: Google Street View product page. https://www.google.com/streetview/ (2007), [Online, accessed March 2020]
|
| 221 |
+
3. Guerrero-Viu, J., Fernandez-Labrador, C., Demonceaux, C., Guerrero, J.J.: What's in my Room? Object Recognition on Indoor Panoramic Images. arXiv e-prints arXiv:1910.06138 (Oct 2019)
|
| 222 |
+
4. Hartley, R., Zisserman, A.: Multiple View Geometry in Computer Vision. Cambridge University Press, USA, 2 edn. (2003)
|
| 223 |
+
5. Henriques, J.F., Vedaldi, A.: Warped convolutions: Efficient invariance to spatial transformations. In: Proceedings of the 34th International Conference on Machine Learning-Volume 70. pp. 1461-1469. JMLR.org (2017)
|
| 224 |
+
|
| 225 |
+
16. Huang, G., Liu, Z., v. d. Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2261-2269 (July 2017). https://doi.org/10.1109/CVPR.2017.243
|
| 226 |
+
17. Jeon, J., Jung, J., Lee, S.: Deep upright adjustment of 360 panoramas using multiple roll estimations. In: Jawahar, C., Li, H., Mori, G., Schindler, K. (eds.) Computer Vision - ACCV 2018. pp. 199-214. Springer International Publishing, Cham (2019)
|
| 227 |
+
18. Jung, R., Lee, A.S.J., Ashtari, A., Bazin, J.: Deep360up: A deep learning-based approach for automatic vr image upright adjustment. In: 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR). pp. 1-8 (March 2019). https://doi.org/10.1109/VR.2019.8798326
|
| 228 |
+
19. Kendall, A., Grimes, M., Cipolla, R.: Posenet: A convolutional network for real-time 6-dof camera relocalization. In: Proceedings of the IEEE international conference on computer vision. pp. 2938-2946 (2015)
|
| 229 |
+
20. Lee, M., Fowlkes, C.C.: Cemnet: Self-supervised learning for accurate continuous ego-motion estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops. pp. 0-0 (2019)
|
| 230 |
+
21. Lezama, J., v. Gioi, R.G., Randall, G., Morel, J.: Finding vanishing points via point alignments in image primal and dual domains. In: 2014 IEEE Conference on Computer Vision and Pattern Recognition. pp. 509-515 (June 2014). https://doi.org/10.1109/CVPR.2014.72
|
| 231 |
+
22. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: SSD: Single shot multibox detector. In: European conference on computer vision. pp. 21-37. Springer (2016)
|
| 232 |
+
23. Noroozi, M., Favaro, P.: Unsupervised learning of visual representations by solving jigsaw puzzles. In: European Conference on Computer Vision. pp. 69-84. Springer (2016)
|
| 233 |
+
24. O'Sullivan, B., Alam, F., Matava, C.: Creating low-cost 360-degree virtual reality videos for hospitals: A technical paper on the dos and don'ts. Journal of medical Internet research 20(7), e239-e239 (Jul 2018). https://doi.org/10.2196/jmir.9596, https://www.ncbi.nlm.nih.gov/pubmed/30012545, 30012545[pmid]
|
| 234 |
+
25. Schindler, G., Dellaert, F.: Atlanta world: an expectation maximization framework for simultaneous low-level edge grouping and camera calibration in complex man-made environments. In: Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004. vol. 1, pp. I-I (June 2004). https://doi.org/10.1109/CVPR.2004.1315033
|
| 235 |
+
26. Sellers, G., Wright, R.S., Haemel, N.: OpenGL Superbible: Comprehensive Tutorial and Reference. Addison-Wesley Professional, 7th edn. (2015)
|
| 236 |
+
27. Shan, Y., Li, S.: Discrete spherical image representation for CNN-based inclination estimation. IEEE Access 8, 2008-2022 (2020). https://doi.org/10.1109/ACCESS.2019.2962133
|
| 237 |
+
28. Sweeney, C., Flynn, J., Nuernberger, B., Turk, M., Höllerer, T.: Efficient computation of absolute pose for gravity-aware augmented reality. In: 2015 IEEE International Symposium on Mixed and Augmented Reality. pp. 19-24 (Sep 2015). https://doi.org/10.1109/ISMAR.2015.20
|
| 238 |
+
29. Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (jun 2016). https://doi.org/10.1109/cvpr.2016.308,
|
| 239 |
+
|
| 240 |
+
30. Takikawa, T., Acuna, D., Jampani, V., Fidler, S.: Gated-SCNN: Gated shape CNNs for semantic segmentation. In: The IEEE International Conference on Computer Vision (ICCV) (October 2019)
|
| 241 |
+
31. Tateno, K., Navab, N., Tombari, F.: Distortion-aware convolutional filters for dense prediction in panoramic images. In: The European Conference on Computer Vision (ECCV) (September 2018)
|
| 242 |
+
32. Trombka, J.I., Schweitzer, J., Selavka, C., Dale, M., Gahn, N., Floyd, S., Marie, J., Hobson, M., Zeosky, J., Martin, K., McClannahan, T., Solomon, P., Gottschang, E.: Crime scene investigations using portable, nondestructive space exploration technology. Forensic Science International 129(1), 1 - 9 (2002). https://doi.org/https://doi.org/10.1016/S0379-0738(02)00079-8, http://www.sciencedirect.com/science/article/pii/S0379073802000798
|
| 243 |
+
33. Wallraven, C., Schwaninger, A., Schuhmacher, S., Bülthoff, H.: View-based recognition of faces in man and machine: Re-visiting inter-extra-ortho. vol. 2525, pp. 651-660 (11 2002). https://doi.org/10.1007/3-540-36181-2_65
|
| 244 |
+
34. Weiler, M., Hamprecht, F.A., Storath, M.: Learning steerable filters for rotation equivariant CNNs. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 849-858 (2018)
|
| 245 |
+
35. Xiao, J., Ehinger, K.A., Oliva, A., Torralba, A.: Recognizing scene viewpoint using panoramic place representation. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition. pp. 2695-2702 (June 2012). https://doi.org/10.1109/CVPR.2012.6247991
|
| 246 |
+
36. Xiao, J., Ehinger, K., Oliva, A., Antonio, T.: Recognizing scene viewpoint using panoramic place representation. In: Proceedings of 25th IEEE Conference on Computer Vision and Pattern Recognition. pp. 0-0 (2012)
|
| 247 |
+
37. Xu, J., Stenger, B., Kerola, T., Tung, T.: Pano2cad: Room layout from a single panorama image. In: 2017 IEEE Winter Conference on Applications of Computer Vision (WACV). pp. 354-362 (March 2017). https://doi.org/10.1109/WACV.2017.46
|
| 248 |
+
38. Zhang, Y., Song, S., Tan, P., Xiao, J.: Panocontext: A whole-room 3d context model for panoramic scene understanding. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) Computer Vision - ECCV 2014. pp. 668-686. Springer International Publishing, Cham (2014)
|
360ocameraalignmentviasegmentation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23a64d0580ade16ed7edd0965375dacc153c624b672ce0443fffa34119cd65dd
|
| 3 |
+
size 514395
|
360ocameraalignmentviasegmentation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ceb64759dfc2316520196fcc16e8d60e89f553f80c70067d449580d013ccede
|
| 3 |
+
size 354779
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c635578703e3e6a64763d9ec79a5cf1e3c2263d9e725713193c6ceb0d5711913
|
| 3 |
+
size 77094
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90150eb4fbfa4ca0a50647d61583645a085c942a6364cf8d7ffd030a28b5dbd4
|
| 3 |
+
size 92773
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/d0d4d273-2c35-49ff-8f48-69b8de014f9b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:741e7bf267d4cc75dc4c51d473826fc6d95bd18d6a7ecb7202319abc11628175
|
| 3 |
+
size 21331202
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/full.md
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Bird Reconstruction: a Dataset, Model, and Shape Recovery from a Single View
|
| 2 |
+
|
| 3 |
+
Marc Badger $^{1,2[0000-0002-6411-706X]}$ , Yufu Wang $^{1,2[0000-0001-9907-8382]}$ , Adarsh Modh $^{1,2[0000-0003-1597-2753]}$ , Ammon Perkes $^{1,2[0000-0001-8932-8309]}$ , Nikos Kolotouros $^{1,2[0000-0003-4885-4876]}$ , Bernd G. Pfrommer $^{1,2[0000-0002-3852-3240]}$ , Marc F. Schmidt $^{1,3[0000-0001-7496-0889]}$ , and Kostas Daniilidis $^{1,2[0000-0003-0498-0758]}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> University of Pennsylvania, Philadelphia PA 19104, USA
|
| 6 |
+
<sup>2</sup> {mbadger, yufu, adarshm, nkolot, pfrommer, kostas}@seas.upenn.edu
|
| 7 |
+
<sup>3</sup> {aperkes, marcschm}@sas.upenn.edu
|
| 8 |
+
|
| 9 |
+
Abstract. Automated capture of animal pose is transforming how we study neuroscience and social behavior. Movements carry important social cues, but current methods are not able to robustly estimate pose and shape of animals, particularly for social animals such as birds, which are often occluded by each other and objects in the environment. To address this problem, we first introduce a model and multi-view optimization approach, which we use to capture the unique shape and pose space displayed by live birds. We then introduce a pipeline and experiments for keypoint, mask, pose, and shape regression that recovers accurate avian postures from single views. Finally, we provide extensive multi-view keypoint and mask annotations collected from a group of 15 social birds housed together in an outdooraviary. The project website with videos, results, code, mesh model, and the Penn Aviary Dataset can be found at https://marcbadger.github.io/avian-mesh.
|
| 10 |
+
|
| 11 |
+
Keywords: pose estimation, shape estimation, birds, animals, dataset
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Why computational ethology? Accurate measurement of behavior is vital to disciplines ranging from neuroscience and biomechanics to human health and agriculture. Through automated measurement, computational ethology aims to capture complex variation in posture, orientation, and position of multiple individuals over time as they interact with each other and their environment [1]. Pose trajectories contain rich, unbiased, information from which we can extract more abstract features that are relevant to brain function, social interactions, biomechanics, and health. Studying neural functions in the context of natural social behavior is a critical step toward a deeper understanding how the brain integrates perception, cognition, and learning and memory to produce behavior. Pose trajectories reveal how animals maneuver to negotiate cluttered environments, how animals make decisions while foraging or searching for mates, and how the collective behavior of a group arises from individual decisions. Automated capture
|
| 16 |
+
|
| 17 |
+
of difficult-to-observe behaviors is transforming diverse applications by streamlining the process of extracting quantitative physiological, behavioral, and social data from images and video.
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Fig. 1. Visual signals convey important social cues in birds. Motions such as pecking (top left) and wingstrokes (bottom left) drive social behavior in both males and females. Complex wing folding and large changes in body volume when feathers are puffed (upper right) make shape recovery (lower right) a difficult task. Images from [38] and [2].
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
|
| 40 |
+
Why does bird posture matter? Why cowbirds? Understanding how the collective behavior of social groups arises from individual interactions is important for studying the evolution of sociality and neural mechanisms behind social behaviors. Although vocalizations are a clear channel for communication in birds, surprisingly, changes in posture, orientation, and position also play an important role in communication. One of the best studied groups from both behavioral and neuroscience perspectives are the brown-headed cowbirds (Molothrus ater). In cowbirds, females influence the behavior of males through a number of visual mechanisms including "wingstrokes", which involve changes in both pose and shape over time [38] (Figure 1). Interactions between birds are usually recorded by observing a focal individual's interactions in person in the field. Although insightful, such manual observations contain observer bias, miss interactions between non-focal individuals, and cannot be performed continuously for long periods. Qualitative observations also miss important variation in posture that would be revealed by a quantitative approach. For example, Figure 1 shows changes in pose and shape that can serve as social cues in cowbirds. The ability to estimate the pose of multiple interacting individuals would transform the study of animal communication [1], as is it beginning to do for humans [13, 14, 29]. Estimating the pose and shape of birds in a social context, however, presents several challenges.
|
| 41 |
+
|
| 42 |
+
Why is estimating bird pose and shape challenging? Recovering shape and pose of birds in the wild is challenging for the following four reasons:
|
| 43 |
+
|
| 44 |
+
1. Changes in pose and shape are difficult to model in birds.
|
| 45 |
+
2. No pose or shape priors are available.
|
| 46 |
+
3. Many birds are only visible from a single unoccluded view.
|
| 47 |
+
4. Appearance variation in natural settings makes detection difficult.
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
Fig. 2. Appearance variation across bird identity (top vs bottom) and across viewpoint, time of day, and season (1st column vs. columns 2-4 respectively). The red box within the left image of each panel shows the location of the enlarged crop (right image).
|
| 51 |
+
|
| 52 |
+
Shape is particularly difficult to model because birds have highly mobile feathers that allow dramatic changes in both shape (e.g. tail fanning) and perceived body volume (e.g. feather puffing in Figure 1). Furthermore, when the wings are held next to the body, they are folded in a complex way in which much of the wing surface becomes sandwiched between the top of the wing and the body. These "internal" surfaces cannot be recovered from scans of figurines with folded wings and figurines with wings in intermediate poses are not available. In addition to modeling challenges, cowbirds interact in a complex environment containing extreme variation in illumination and may be heavily occluded either by vegetation by other birds in the group.
|
| 53 |
+
|
| 54 |
+
Animal posture is often described using joint angles derived from semantic keypoints, joints, or other anatomical locations. This approach is attractive because keypoints are easy to identify and can readily be localized by deep-learning-based software packages such as DeepLabCut [26], DeepPoseKit [10], and LEAP [32]. Under heavy occlusion, however, even multi-view setups frequently do not observe relevant keypoints from more than one view. One solution is to lift the pose from 2D to 3D, but unlike for humans, researchers do not yet have strong species-specific priors for tackling this problem. We overcome the limitations of directly using 2D keypoints and skeletons by fitting a 3D parameterized mesh model with priors learned from a multi-view dataset.
|
| 55 |
+
|
| 56 |
+
Dataset. With the aim of creating a robust system for estimating the shape and pose of multiple interacting birds over months-long timescales, we recorded the behavior of 15 cowbirds housed together in an outdoor aviary over the course of a single three-month mating season. Our carefully calibrated multi-view dataset contains large variation in (i) bird pose, orientation, and position/depth, (ii) viewpoint across eight cameras, and (iii) appearance across different lighting conditions (time of day and weather) and seasons (Figure 2). Cowbirds have a nearly textureless appearance and birds move freely and explore all three dimensions of their cage, producing a large range of subject depth with respect to the camera. Importantly, both perched and flying birds adopt postures covering a large range of motion in both orientation and pose.
|
| 57 |
+
|
| 58 |
+
We annotated silhouette and keypoints for 1000 instances and matched these annotations across views. Although $90\%$ of annotated birds were visible from 3
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Fig. 3. The dataset and model. We provide multi-view segmentation masks for over 6300 bird instances, keypoints for 1000 bird instances, the first articulated 3D mesh model of a bird, and a full pipeline for recovering the shape and pose of birds from single views.
|
| 62 |
+
|
| 63 |
+
or more cameras, about half of the annotated instances were occluded to some degree. Only $62\%$ of birds had more than one unoccluded view, highlighting the need for a single-view approach.
|
| 64 |
+
|
| 65 |
+
After collecting keypoint and silhouette ground truth from multiple views, we fit our avian mesh model using a multi-view optimization-based approach to learn a shape space and pose priors. We then use the model and priors to train a neural network to regress pose parameters directly from keypoint and silhouette data. These pose parameters can be used to initialize a single-view optimization procedure to further refine body pose and shape (Figure 4). We use our dataset for learning instance segmentation and keypoint localization, and for estimating bird pose and shape, but our dataset could also be used in the future for learning Re-ID tasks.
|
| 66 |
+
|
| 67 |
+
In summary, our contributions are focused around the four challenges mentioned previously:
|
| 68 |
+
|
| 69 |
+
1. We develop the first parameterized avian mesh model that is capable of capturing the unique pose and shape changes displayed by birds.
|
| 70 |
+
2. We fit our mesh model to available multi-view keypoint and silhouette data using an optimization-based approach to obtain an accurate shape space and pose prior.
|
| 71 |
+
3. We develop a neural network based pipeline for recovering the shape and pose of birds from a single view.
|
| 72 |
+
4. We present a challenging multi-view dataset for studying social behavior in birds. The dataset contains extreme variation in subject appearance and depth and many subjects are fully or partially occluded in all but one view.
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
Fig. 4. We estimate the 3D pose and shape of birds from a single view. Given a detection and associated bounding box, we predict body keypoints and a mask. We then predict the parameters of an articulated avian mesh model, which provides a good initial estimate for optional further optimization.
|
| 76 |
+
|
| 77 |
+
# 2 Related work
|
| 78 |
+
|
| 79 |
+
Human pose and shape estimation. Recent advances in human pose estimation have capitalized on i) powerful 2D joint detectors, ii) 3D pose priors, and iii) low-dimensional articulated 3D shape models of the human body. SMPL [25], the most popular formulation, first deforms a template mesh using shape and pose parameters learned from over 1000 registered body scans of people [4] and then uses linear blend skinning (LBS) to transform mesh vertices given a set of joint angles. In SMPLify, Bogo et al. [3] estimate 3D human pose and shape from single images by fitting SMPL to 2D keypoints. Huang et al. [12] extend SMPLify [3] to the multi-view setting and show a positive effect of silhouette supervision in addition to keypoints. Pavlakos et al. [31] estimate pose and shape directly from predicted keypoints and silhouettes in an end-to-end framework. Recent approaches regress pose and shape directly from images and use adversaries with access to a 3D pose dataset [15], Graph-CNN architectures [21], texture consistency [30], and model-fitting within the training loop [20]. All of the above methods base their approach on parameterized mesh models indicating their critical importance for bridging between observation in 2D and estimation in 3D. In contrast to previous works that rely on 3D scans and SMPL-like models to develop meshes and shape spaces for novel domains such as hands [34], faces [22], and four-legged animals [42], we learn our avian mesh model directly from video data of live birds.
|
| 80 |
+
|
| 81 |
+
Animal pose and shape estimation. Within biology, most work focuses on isolated animals with no background clutter and few occlusions. Mathis et al. [26] and Pereira et al. [32] recently provided tools for training convolutional neural networks for keypoint localization. Graving et al. [10] localize keypoints on three datasets of fruit flies [32], desert locusts [10], and Grevy's zebras [10]. Günel et al. [11] use a Stacked Hourglass network [27] for 2D keypoint localization in flies and perform pictorial structures and belief propagation message passing [7] to reconstruct 3D pose from 2D detections. Liu and Belhumeur et al. [24] use HOG descriptors and linear SVMs to localize bird parts in the more challenging CUB-200-2011 dataset [37]. All of these works are based on the detection and direct triangulation of 2D keypoints. A fundamental challenge, however, is that
|
| 82 |
+
|
| 83 |
+
any particular keypoint may not be visible from more than one view. Models that constrain the relative position of keypoints, such as the parameterized mesh model we present here, overcome this issue.
|
| 84 |
+
|
| 85 |
+
Two previous works use articulated graphics models to estimate the pose of flying animals. Fontaine et al. [8] construct a 3D mesh model of a fruit fly and estimate the fly's trajectory and pose over time by fitting the model to three orthogonal views. Breslav [5] create a two-DOF 3D graphics model of a bat and use a Markov Random Field to estimate the 3D pose of bats flying in the wild captured with a multi-view thermal camera setup.
|
| 86 |
+
|
| 87 |
+
Animal shape estimation is a difficult task. Cashman and Fitzgibbon [6] estimate the shape of dolphins. Ntouskos et al. [28] fit shape primitives to silhouettes of four legged animals. Vincente and Agapito [36] obtain and deform a template mesh using silhouettes from two reference images. Kanazawa et al. [16] learn how animals deform from images by creating an animal-specific model of local stiffness. Kanazawa et al. [17] predict shape, pose, and texture of birds in CUB-200 by deforming a spherical mesh, but do not model pose and thus the locations of wingtips on the mesh are often topologically adjacent to the tail rather than near the shoulders. Zuffi et al. [42] create a realistic, parameterized 3D model (SMAL) from scans of toys by aligning a four-legged template to the scans. They capture shape using PCA coefficients of the aligned meshes and learn a pose prior from a short walking video. Zuffi, Kanazawa, and Black [41] fit the SMAL model to several images of the same animal and then refine the shape to better fit the image data, resulting in capture of both shape and texture (SMALR). Zuffi et al. [40] estimate 3D pose, shape, and texture of zebras in the wild by integrating the SMAL model into an end-to-end network regression pipeline. Their key insight was to first use SMALR to pose an existing horse model and capture a rough texture of the target species. A common feature of these approaches is that they create or leverage a parameterized mesh model. The SMAL model was only trained on four-legged animals so the shape space learned by the model is insufficient for modeling birds, which differ markedly in both limb shape and joint angles. To overcome the lack of a statistical model for birds, we add one additional degree of freedom to each joint and obtain a pose and shape space from multi-view fits to live birds.
|
| 88 |
+
|
| 89 |
+
Datasets for animal pose estimation. Large-scale object recognition datasets contain many species of animals including dogs, cats, birds, horses, sheep, and more. MS COCO [23] contains 3362 images with bird mask annotations, but no keypoint or pose annotations. The CUB-200 dataset [37] contains 11,788 masks and keypoint instances of birds in the wild. A fruit fly dataset [32] contains 1500 images with centered dorsal views of single flies walking in an arena with a plain white background containing no variation or distractors. The desert locust (800 images) and Grévy's zebras (900 images) include other individuals in the frame, but views are dorsal-only, centered, and narrowly cropped around a focal individual. In contrast our multi-view dataset contains both masks and keypoints of multiple, overlapping subjects and has large variation in relative viewpoint and complex changes in background and lighting.
|
| 90 |
+
|
| 91 |
+
# 3 Approach
|
| 92 |
+
|
| 93 |
+
We adapt a boot-strapped, four-step approach to developing a full pipeline for 3D bird reconstruction from single images (Figure 5). First we develop a parameterized avian mesh and use a multi-view optimization procedure to fit the model to annotations in our dataset. Because they use information from multiple views, these fits are generally good and do not suffer from ambiguities that can plague pose estimation from single views. It is enticing to deploy this multi-view optimization approach towards our end-goal of estimating the pose and shape of all birds over time, but it is slow (initialization is usually far from the target) and requires multiple views in order to produce realistic poses. Nearly $40\%$ of the birds in our dataset were visible from one or fewer unoccluded views, however, indicating the need for a single-view approach. Second, from the multi-view fits, we extract distributions of shape and pose for birds in the apiary, which we use to create a synthetic dataset on which we train neural networks that regress pose and shape parameters from keypoints and silhouettes in a single view. Third, we train a second network to predict an instance segmentation and keypoints given a detection and corresponding bounding box. Finally, we connect the keypoint and segmentation network to the pose regression network. The full pipeline provides a pose and shape estimate from a single view image, which can be used to initialize further optimization (Figure 4).
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
Fig. 5. Overall approach for recovering bird pose and shape from a single view. See Figure 4 for a detailed view of the final pipeline.
|
| 97 |
+
|
| 98 |
+
Bird detection in full images. We detect bird instances using a Mask R-CNN pretrained on COCO instance segmentation. We removed weights for non-bird classes (leaving bird and background) and then fine-tuned all layers on our dataset for 15 epochs in PyTorch.
|
| 99 |
+
|
| 100 |
+
Keypoints and silhouette prediction. We train a convolutional neural network to predict keypoints and a silhouette given a detection and corresponding bounding box. We modify the structure of High-Resolution Net (HRNet) [35], which is state-of-the-art for keypoint localization in humans, so that it outputs masks in addition to keypoints. Our modified HRNet achieves 0.46 PCK@05, 0.64 PCK@10, and 0.78 IoU on our dataset.
|
| 101 |
+
|
| 102 |
+
Skinned linear articulated bird model. To define an initial mesh, joint locations, and skinning weights, we used an animated 3D mesh of a bird model
|
| 103 |
+
|
| 104 |
+
downloaded from the CGTrader Marketplace website. The model originally contained 18k vertices and 13k faces, but we removed vertices associated with body feathers, eyes, and other fine details to obtain a mesh with 3932 vertices, 5684 faces, and 25 skeletal joints (including a root joint, which is used for camera pose). We use the skinning weights defined in the original file. In addition to skeletal joints, we define 16 mesh keypoints that correspond to the annotated semantic keypoints in our dataset. We obtain keypoint locations by identifying up to four mesh vertices associated with each keypoint and averaging their 3D locations.
|
| 105 |
+
|
| 106 |
+
To pose the model, we specify a function $M(\alpha, \theta, \gamma, \sigma)$ of bone length parameters $\alpha \in \mathbb{R}^J$ for $J$ joints, pose parameters $\theta \in \mathbb{R}^{3J}$ specifying relative rotation of the joints (and the rotation of the root relative to the global coordinate system) in axis-angle parameterization, global translation inside the aviary $\gamma$ , and scale $\sigma$ , that returns a mesh $\mathcal{M} \in \mathbb{R}^{N \times 3}$ , with $N = 3932$ vertices. Unlike SMPL [25] and SMAL [42] models, we do not have access to 3D ground truth variation in shape, which prevents the use of shape coefficients drawn from a learned PCA shape space. We mitigate this limitation by including an additional degree of freedom per joint, $\alpha_i$ , that models the distance between parent and child joints, thereby capturing variation in the relative length proportions of the body and limb segments. When birds perch, their wings fold in on themselves and we found that this large deformation is not well modeled by LBS of a single bird mesh model (it is also difficult to capture and register in 3D scans). To overcome this limitation, we use two template poses with identical mesh topology, bones, skinning weights, and keypoints, but with different initial postures: one for birds with their wings outstretched and another for birds with their wings folded (Figure 6). Finally, we also include an overall scale parameter to allow for consistent 3D multi-view estimation among cameras.
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
Fig. 6. Our model is capable of capturing both perched and flying poses.
|
| 110 |
+
|
| 111 |
+
To form the mesh into a given pose, we modify the approach used in SMPL [25] and SMPLify [3] to allow variable bone lengths. Starting with a template mesh $\mathcal{M}^T$ in a canonical pose with joint locations $\mathcal{J} \in \mathbb{R}^{J \times 3}$ , we first calculate
|
| 112 |
+
|
| 113 |
+
the position of each joint $i$ relative to its parent as
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathcal {J} _ {i} ^ {o} = \mathcal {J} _ {i} - \mathcal {J} _ {\text {p a r e n t} (i)}. \tag {1}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
We then multiply this vector by $\alpha_{i}$ to adjust the distance between the two joints and form a new skeletal shape $\mathcal{J}'$ , still in the canonical pose, with joint locations
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\mathcal {J} _ {i} ^ {\prime} = \alpha_ {i} \mathcal {J} _ {i} ^ {o} + \sum_ {j \in A (i)} \alpha_ {j} \mathcal {J} _ {j} ^ {o}, \tag {2}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
where $A(i)$ is the ordered set of joint ancestors of joint $i$ (i.e. all joints encountered moving along the kinematic tree from joint $i$ to the root). Finally, $\mathcal{I}' = J(\alpha)$ is transformed into the final pose using the global rigid transformation $R_{\theta}(\cdot)$ defined by pose and root orientation parameters $\theta$ , and a LBS function $W(\cdot; \mathcal{M}^T)$ is applied. The final mesh vertices are
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\mathcal {M} = M (\alpha , \theta , \gamma , \sigma) \stackrel {\text {d e f}} {=} \sigma W \left(R _ {\theta} (J (\alpha)); \mathcal {M} ^ {T}\right) + \gamma . \tag {3}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
The positions of 3D keypoints are calculated as $P(M(\alpha, \theta, \gamma))$ , where $P(\mathcal{M}) : \mathbb{R}^{N \times 3} \mapsto \mathbb{R}^{K \times 3}$ and $K$ is the number of keypoints. In practice $P$ is simply the average of four selected mesh vertices for each semantic keypoint.
|
| 132 |
+
|
| 133 |
+
Optimization. To fit our bird model to detected keypoints, we introduce a fitting procedure similar to SMPLify, an optimization-based approach originally described by Bogo et al. [3]. Unlike SMPLify, we capture among individual variation using bone length parameters rather than body shape parameters and we fit to semantic keypoints rather than joint locations. We minimize an objective function with a keypoint reprojection error term and silhouette error term for each camera $i$ , two pose priors, and a prior on the relative 3D distances between joints. Specifically, we minimize:
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
E (\alpha , \theta , \gamma) = \sum_ {\mathrm {c a m} i} E _ {k p} ^ {(i)} (\cdot ; \cdot) + E _ {m s k} ^ {(i)} (\cdot ; \cdot) + \lambda_ {\theta} E _ {\theta} (\theta) + \lambda_ {p} E _ {p} (\theta) + \lambda_ {b} E _ {b} (\alpha) \quad (4)
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
with
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
E _ {k p} ^ {(i)} (\alpha , \theta , \gamma ; K _ {i}, R _ {i}, t _ {i}, \mathcal {P} _ {i}) = \sum_ {\mathrm {k p t} k} w _ {k} \rho (\parallel \Pi_ {K _ {i}, R _ {i}, t _ {i}} (P (M (\alpha , \theta , \gamma)) _ {k} - \mathcal {P} _ {i, k} \| _ {2})) (5)
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
and
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
E _ {m s k} ^ {(i)} (\alpha , \theta , \gamma ; K _ {i}, R _ {i}, t _ {i}, \mathcal {S} _ {i}) = \lambda_ {m s k} \| \mathcal {R} _ {K _ {i}, R _ {i}, t _ {i}} (M (\alpha , \theta , \gamma)) - \mathcal {S} _ {i} \| _ {2}. \tag {6}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
Equation 5 is a weighted reprojection penalty (using the robust Geman-McClure function $\rho$ [9]) between keypoints $\mathcal{P}_i$ and the projected mesh keypoints $\Pi_{K_i,R_i,t_i}(P(M(\alpha ,\theta ,\gamma)))$ for pinhole projection function $\varPi(x)=K[R|t]x$ . The bone lengths, $\alpha$ , are the distances between parent and child joints, $\theta$ are the pose parameters, $\gamma$ is the translation in the global reference frame, $K_{i}$ , $R_{i}$ , and $t_i$ are the intrinsics, rotation, and translation, respectively, used in perspective
|
| 152 |
+
|
| 153 |
+
projection for camera $i$ , and $\mathcal{P}_i$ are the detected or annotated 2D keypoint locations in the image. Equation 6 penalizes differences between an annotated mask $S_i$ and a rendered silhouette $\mathcal{R}_{K_i,R_i,t_i}(M(\alpha,\theta,\gamma))$ obtained using Neural Mesh Renderer [18]. $E_{\theta}(\theta) = |\theta - \theta_o|$ is a pose prior that penalizes the $L_1$ distance from the canonical pose $\theta_o$ . $E_p(\theta) = \max(0,\theta - \theta_{\max}) + \max(0,\theta_{\min} - \theta)$ linearly penalizes joint angles outside defined limits $\theta_{\min}$ and $\theta_{\max}$ and $E_b(\alpha) = \max(0,\alpha - \alpha_{\max}) + \max(0,\alpha_{\min} - \alpha)$ penalizes bone lengths outside limits $\alpha_{\min}$ and $\alpha_{\max}$ . In the single-view setting, the pose prior $(E_{\theta})$ and joint angle $(E_p)$ and bone length $(E_b)$ limit losses are disabled and we use the Mahalanobis distance to the distribution of multi-view pose and shape estimates instead. We minimize the objective in 4 using Adam [19] in PyTorch.
|
| 154 |
+
|
| 155 |
+
Synthetic data and pose and shape regression. After performing multiview optimization on 140 3D bird instances in our annotated dataset, we fit a multivariate Gaussian to the estimated pose parameters (pose, viewpoint, and translation). We then sample 100 random points from this distribution for each bird instance, project the corresponding model's visible keypoints onto the camera and render the silhouette, generating 14,000 synthetic instances for training. We keep the bone lengths of the original 140 instances, but add in random noise to the bone lengths for each sample.
|
| 156 |
+
|
| 157 |
+
We train pose and shape regression networks on the 14,000 synthetic single-view instances supervised by the ground truth pose and shape parameters. For the pose regression network inputs are 2D joint locations and targets are 3D rotations, which are first transformed to the representation proposed by Zhou et al. [39] before computing the $L^2$ loss. The pose regression network is an MLP with two fully connected layers with the final layer outputting $25 * 6 + 3$ translation parameters. The shape regression network takes in a mask and contains one $5 \times 5$ convolutional layer followed by four $3 \times 3$ convolutional layers and a fully connected layer with 24 outputs, corresponding to the 24 bone lengths. Each convolutional layer is followed by batch normalization and max-pooling layers. Training was performed for 20 epochs using Adam.
|
| 158 |
+
|
| 159 |
+
# 4 The cowbird dataset
|
| 160 |
+
|
| 161 |
+
Image acquisition and aviary details. We captured video of 15 individual cowbirds (Molothrus ater) in an outdoor aviary from March to June using eight synchronized cameras recording $1920 \times 1200$ images at $40\mathrm{Hz}$ . The aviary is 2.5 meters in height and width and is 6 meters long. Cameras were positioned in the corners and oriented so that their combined fields view provided maximal coverage of the aviary volume by least four cameras. Intrinsic parameters were estimated for each camera using a standard checkerboard and the camera calibration package in ROS. Extrinsic parameters for camera orientation and translation were estimated online via the TagSLAM package [33] using arrays of fiducial markers permanently attached to the aviary walls.
|
| 162 |
+
|
| 163 |
+
Dataset annotation and statistics. From the above recordings, we exported sets of synchronous frames from 125 "moments" (1000 images) drawn
|
| 164 |
+
|
| 165 |
+
from 10 days uniformly distributed over the recording period (an average of 12.5 uniformly distributed moments each day). On all images, we exhaustively annotated instance segmentation masks for all visible birds, producing over 6355 masks and bounding boxes. On a subset of 18 moments across six of the 10 days we also annotated the locations of 12 semantic keypoints on a total of 1031 masks (Figure 3). We annotated the bill tip, right and left eyes, neck, nape, right and left wrists, right and left wing tips, right and left feet, and the tail tip. Statistics on the visibility of keypoints (Table S7) and a comparison with other animal datasets (Tables S4, S5) are in the supplementary material.
|
| 166 |
+
|
| 167 |
+
We manually associated keypoint annotations within each moment across camera views to create 3D instance ID tags. From the 3D instance ID tags, $64\%$ , $26\%$ , and $10\%$ of birds were fully or partially visible from four or more cameras, three cameras, and two or fewer cameras, respectively (Supplementary Table S6). The average width $\times$ height of bird masks was $68 \times 75$ pixels (or $\approx$ $5\%$ of image width; the 5th and 95th percentiles of bird max dimensions were $17 \times 19$ and $239 \times 271$ pixels, respectively). We provide four types of test/train splits: by moment, by day, by time of day (morning vs. afternoon), and by season (March and April vs May and June). Birds wore colored bands on their legs that, when visible, could provide the true ID of the bird, but we leave the potential application of this dataset to the Re-ID task for future work.
|
| 168 |
+
|
| 169 |
+
# 5 Experiments
|
| 170 |
+
|
| 171 |
+
Detection. We first evaluate the performance of Mask R-CNN on instance segmentation of birds using our dataset. We show excellent generalization (AP = 0.52) when predicting masks on unseen days in the test set (Figure 7). Further analyses and performance on additional splits of the dataset (e.g. split by time of day or season) are provided in Supplementary Table S1.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Fig. 7. Instance detections made by a fine tuned Mask R-CNN network over a large range of lighting conditions and views. Best viewed in color.
|
| 175 |
+
|
| 176 |
+
Multi-view optimization. We fit our articulated avian mesh model to annotations corresponding to each 3D bird instance in our keypoint dataset. We fit using all keypoint labels from all available views. We present qualitative results in Figure 8. Our fitting procedure resulted in many plausible results but also in many failure cases, shown in the bottom row of Figure 8. From the multi-view
|
| 177 |
+
|
| 178 |
+
fits, we obtained a pose and shape space for the mesh model, which we display in the supplementary video. We perform an ablation experiment to investi-
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
Fig. 8. Multi-view optimization-based fits of the bird mesh to keypoint and mask annotations in our dataset (upper section). Failure cases are shown in the lower section.
|
| 182 |
+
|
| 183 |
+
gate the effects of pose priors and joint and bone limits on performance in the single-view setting. For each ablation, we remove the corresponding term from the objective and report its effect on the accuracy of projected mesh keypoints and silhouettes, which we report in Supplementary Table S3. We measure keypoint accuracy relative to ground truth keypoints using PCK at two thresholds calculated based on the largest dimension of the bounding box and we measure the accuracy of the projected silhouettes using IoU with ground truth masks. We budget 500 iterations for fitting each instance for all settings. The PCK increased as we removed the pose prior and bone limit (but not pose limit) terms from our objective. This increase indicates the model is achieving a better fit to the keypoints, potentially at the cost of producing an unrealistic fit, as might be indicated by the simultaneous decrease in IoU as priors are removed.
|
| 184 |
+
|
| 185 |
+
Do silhouettes improve multi-view optimization? We compared fits of the model with and without the silhouette term (Equation 6) in the objective. The silhouette term improves IoU while only slightly affecting keypoint error (Table 1). More importantly, the silhouette term allows the model to better capture changes in shape produced during feather puffing (Figure 1).
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Fig.9. Regression-based recovery of bird pose and shape from a single view. Each panel shows the input image and refined mesh (see Figure 4).
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
|
| 212 |
+
Table 1. Ablation study of the silhouette term in the multi-view optimization setting. PCK@05 and PCK@10 denote percent correct keypoints within $5\%$ and $10\%$ of bounding box width, respectively. Silhouettes improve IoU with minimal effect on keypoint error.
|
| 213 |
+
|
| 214 |
+
<table><tr><td></td><td>weight ratio (kpt:mask)</td><td>PCK@05</td><td>PCK@10</td><td>IoU</td></tr><tr><td>keypoints only</td><td>N/A</td><td>0.356</td><td>0.631</td><td>0.540</td></tr><tr><td>keypoints + mask</td><td>10:1</td><td>0.355</td><td>0.637</td><td>0.560</td></tr><tr><td>keypoints + mask</td><td>1:1</td><td>0.328</td><td>0.618</td><td>0.624</td></tr></table>
|
| 215 |
+
|
| 216 |
+
3D shape and pose recovery from a single view. Our single-view pipeline produces poses that are consistent across views (Table 2, Supplementary Figure S1). To overcome scale ambiguity, we fix pose and shape and then find the Procrustes transformation (translation, rotation, and scaling) that minimizes keypoint reprojection error in each additional view. We also perform experiments to evaluate the individual components of our full pipeline (Table 3). We first compare pose regression alone (i.e. not optimizing after regression), single-view optimization alone (i.e. not initialized by pose regression network), and the full pipeline. Although the regression network alone is less "accurate" than single-view optimization (Table 3), the pose regression network produces good estimates of global pose, which allows optimization to proceed much faster. Additional examples are shown in Figure 9. Finally, we demonstrate that our model and bone length formulation generalize to similar bird species in the CUB-200 dataset (Supplementary Figure S3).
|
| 217 |
+
|
| 218 |
+
Failure cases. Occasional failures resulted in unnatural poses, which are shown in Supplementary Figure S2. To evaluate the cause of these failures, two annotators inspected the same random sample of 500 crops and rated their confidence in each bird's pose (confident, semi-confident, not-confident). They then rated the predicted keypoints as good or bad for all crops. Finally, they viewed the mesh fits and rated each as a success or failure. We found that $84\%$ of confident, $35\%$ of semi-confident, and $12\%$ of not-confident crops were fit
|
| 219 |
+
|
| 220 |
+
Table 2. Cross-view PCK and IoU of projected meshes from the single-view pipeline. Values are averaged across all views except the view used to obtain the mesh. Ground truth pipeline input means the keypoint and mask network predictions (Figure 4) are replaced by ground truth annotations.
|
| 221 |
+
|
| 222 |
+
<table><tr><td>Pipeline input</td><td>PCK@05</td><td>PCK@10</td><td>IoU</td></tr><tr><td>predictions</td><td>0.313</td><td>0.632</td><td>0.589</td></tr><tr><td>ground truth</td><td>0.332</td><td>0.635</td><td>0.586</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 3. Same-view evaluation of the single-view pipeline and ablations. Regression and optimization are performed using keypoint and mask predictions and evaluated against ground truth. Additional results are presented in Supplementary Table S2.
|
| 225 |
+
|
| 226 |
+
<table><tr><td></td><td>PCK@05</td><td>PCK@10</td><td>IoU</td></tr><tr><td>regression</td><td>0.104</td><td>0.318</td><td>0.483</td></tr><tr><td>optimization</td><td>0.331</td><td>0.575</td><td>0.641</td></tr><tr><td>reg. + opt.</td><td>0.364</td><td>0.619</td><td>0.671</td></tr></table>
|
| 227 |
+
|
| 228 |
+
successfully. Bad keypoint detection was responsible for $60\%$ of failures. Even good fits are not perfect, particularly in the tail and feet. Adding more degrees of freedom to the model, such as tail fanning, and annotating additional keypoints on the toes would improve these areas.
|
| 229 |
+
|
| 230 |
+
# 6 Conclusions
|
| 231 |
+
|
| 232 |
+
We present an articulated 3D model that captures changes in pose and shape that have been difficult to model in birds. We provide a novel multi-view dataset with both instance masks and keypoints that contains challenging occlusions and variation in viewpoint and lighting. Our single-view pipeline recovers cross-view-consistent avian pose and shape, and enables robust pose estimation of birds interacting in a social context. We aim to deploy our pipeline in the apiary to better understand how individual interactions drive the formation of avian social networks.
|
| 233 |
+
|
| 234 |
+
An interesting feature of birds is that variation in a single individual's shape across time can be much larger than overall shape variation among individuals (e.g. due to feather fluffing shown in Figure 1). In the future, it will be interesting to apply our pipeline to video data and additional species to develop a more nuanced model of how shape varies across time, individuals, and species.
|
| 235 |
+
|
| 236 |
+
Capturing 3D pose is critical to understanding human and animal health and behavior. Pose data produced by our pipeline will be useful for addressing how flying animals maneuver, negotiate cluttered environments, and make decisions while foraging or searching for mates, and how the collective behavior of a group arises from individual decisions.
|
| 237 |
+
|
| 238 |
+
Acknowledgements. We thank the diligent annotators in the Schmidt Lab, Kenneth Chaney for compute resources, and Stephen Phillips for helpful discussions. We gratefully acknowledge support through the following grants: NSF-IOS-1557499, NSF-IIS-1703319, NSF MRI 1626008, NSF TRIPODS 1934960.
|
| 239 |
+
|
| 240 |
+
# References
|
| 241 |
+
|
| 242 |
+
1. Anderson, D.J., Perona, P.: Toward a science of computational ethology. Neuron 84(1), 18 - 31 (2014). https://doi.org/https://doi.org/10.1016/j.neuron.2014.09.005, http://www.sciencedirect.com/science/article/pii/S0896627314007934
|
| 243 |
+
2. Baillie, K.U., Spitzer, S., Crucius, D.: 'Smartaviary' poised to break new ground in behavioral research (2019), https://penntoday.upenn.edu/news/smart-aviary-poised-break-new-ground-behavioral-research
|
| 244 |
+
3. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In: Computer Vision - ECCV 2016. pp. 561-578. Lecture Notes in Computer Science, Springer International Publishing (Oct 2016)
|
| 245 |
+
4. Bogo, F., Romero, J., Loper, M., Black, M.J.: FAUST: Dataset and evaluation for 3D mesh registration. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). IEEE, Piscataway, NJ, USA (Jun 2014)
|
| 246 |
+
5. Breslav, M.: 3D pose estimation of flying animals in multi-view video datasets. Ph.D. thesis, Boston University (2016)
|
| 247 |
+
6. Cashman, T., Fitzgibbon, A.: What shape are dolphins? Building 3D morphable models from 2D images. IEEE Transactions on Pattern Analysis and Maching Intelligence 35, 232 (January 2013), https://www.microsoft.com/en-us/research/publication/shape-dolphins-building-3d-morphable-models-2d-images/
|
| 248 |
+
7. Felzenszwalb, P.F., Huttenlocher, D.P.: Pictorial structures for object recognition. Int. J. Comput. Vision 61(1), 5579 (Jan 2005). https://doi.org/10.1023/B:VISI.0000042934.15159.49, https://doi.org/10.1023/B:VISI.0000042934.15159.49
|
| 249 |
+
8. Fontaine, E.I., Zabala, F., Dickinson, M.H., Burdick, J.W.: Wing and body motion during flight initiation in drosophila revealed by automated visual tracking. Journal of Experimental Biology 212(9), 1307-1323 (2009). https://doi.org/10.1242/jeb.025379, https://jeb.biologists.org/content/212/9/1307
|
| 250 |
+
9. Geman, S., McClure, D.: Statistical methods for tomographic image reconstruction. Bulletin of the International Statistical Institute LII(4), 5-21 (1987)
|
| 251 |
+
0. Graving, J.M., Chae, D., Naik, H., Li, L., Koger, B., Costelloe, B.R., Couzin, I.D.: DeepPoseKit, a software toolkit for fast and robust animal pose estimation using deep learning. eLife 8, e47994 (2019)
|
| 252 |
+
1. Günel, S., Rhodin, H., Morales, D., Campagnolo, J., Ramdya, P., Fua, P.: DeepFly3D, a deep learning-based approach for 3D limb and appendage tracking in tethered, adult Drosophila. eLife 8, e48571 (2019)
|
| 253 |
+
2. Huang, Y., Bogo, F., Lassner, C., Kanazawa, A., Gehler, P.V., Romero, J., Akhter, I., Black, M.J.: Towards accurate marker-less human shape and pose estimation over time. In: 2017 International Conference on 3D Vision (3DV). pp. 421-430 (2017)
|
| 254 |
+
3. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: 2015 IEEE International Conference on Computer Vision (ICCV). pp. 3334-3342 (2015)
|
| 255 |
+
4. Joo, H., Simon, T., Cikara, M., Sheikh, Y.: Towards social artificial intelligence: Nonverbal social signal prediction in a triadic interaction. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10865-10875 (2019)
|
| 256 |
+
|
| 257 |
+
15. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Computer Vision and Pattern Recognition (CVPR) (2018)
|
| 258 |
+
16. Kanazawa, A., Kovalsky, S., Basri, R., Jacobs, D.: Learning 3D deformation of animals from 2D images. Computer Graphics Forum 35(2), 365-374 (2016). https://doi.org/10.1111/cgi.f.12838, https://onlinelibrary.wiley.com/doi/abs/10.1111/cgi.f.12838
|
| 259 |
+
17. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: ECCV (2018)
|
| 260 |
+
18. Kato, H., Ushiku, Y., Harada, T.: Neural 3D mesh renderer. In: 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3907-3916 (2018)
|
| 261 |
+
19. Kingma, D.P., Ba, J.L.: Adam: A method for stochastic optimization. arXiv (2014)
|
| 262 |
+
20. Kolotouros, N., Pavlakos, G., Black, M., Daniilidis, K.: Learning to reconstruct 3D human pose and shape via model-fitting in the loop. In: 2019 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 2252-2261 (2019)
|
| 263 |
+
21. Kolotouros, N., Pavlakos, G., Daniilidis, K.: Convolutional mesh regression for single-image human shape reconstruction. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4496-4505 (2019)
|
| 264 |
+
22. Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4D scans. ACM Trans. Graph. 36(6) (Nov 2017). https://doi.org/10.1145/3130800.3130813, https://doi.org/10.1145/3130800.3130813
|
| 265 |
+
23. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dólar, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) Computer Vision - ECCV 2014. pp. 740-755. Springer International Publishing, Cham (2014)
|
| 266 |
+
24. Liu, J., Belhumeur, P.N.: Bird part localization using exemplar-based models with enforced pose and subcategory consistency. In: 2013 IEEE International Conference on Computer Vision. pp. 2520-2527 (2013)
|
| 267 |
+
25. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: A skinned multi-person linear model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34(6), 248:1-248:16 (Oct 2015)
|
| 268 |
+
26. Mathis, A., Mamidanna, P., Cury, K.M., Abe, T., Murthy, V.N., Mathis, M.W., Bethge, M.: DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience 21(9), 1281-1289 (2018)
|
| 269 |
+
27. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision - ECCV 2016. pp. 483-499. Springer International Publishing, Cham (2016)
|
| 270 |
+
28. Ntouskos, V., Sanzari, M., Cafaro, B., Nardi, F., Natola, F., Pirri, F., Ruiz, M.: Component-wise modeling of articulated objects. In: 2015 IEEE International Conference on Computer Vision (ICCV). pp. 2327-2335 (2015)
|
| 271 |
+
29. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10967-10977 (2019)
|
| 272 |
+
30. Pavlakos, G., Kolotouros, N., Daniilidis, K.: Texturepose: Supervising human mesh estimation with texture consistency. In: 2019 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 803-812 (2019)
|
| 273 |
+
31. Pavlakos, G., Zhu, L., Zhou, X., Daniilidis, K.: Learning to estimate 3D human pose and shape from a single color image. In: 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 459-468 (2018)
|
| 274 |
+
|
| 275 |
+
32. Pereira, T.D., Aldarondo, D.E., Willmore, L., Kislin, M., Wang, S.S.H., Murthy, M., Shaevitz, J.W.: Fast animal pose estimation using deep neural networks. Nature Methods 16, 117-125 (2019)
|
| 276 |
+
33. Pfrommer, B., Daniilidis, K.: Tagslam: Robust slam with fiducial markers. arXiv (2019)
|
| 277 |
+
34. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Trans. Graph. 36(6) (Nov 2017). https://doi.org/10.1145/3130800.3130883, https://doi.org/10.1145/3130800.3130883
|
| 278 |
+
35. Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5686-5696 (2019)
|
| 279 |
+
36. Vicente, S., Agapito, L.: Balloon shapes: Reconstructing and deforming objects with volume from images. In: 2013 International Conference on 3D Vision - 3DV 2013. pp. 223-230 (2013)
|
| 280 |
+
37. Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S.: The Caltech-UCSD Birds-200-2011 Dataset. Tech. Rep. CNS-TR-2011-001, California Institute of Technology (2011)
|
| 281 |
+
38. West, M.J., King, A.P.: Female visual displays affect the development of male song in the cowbird. Nature 334, 224-246 (1988)
|
| 282 |
+
39. Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5738-5746 (2019)
|
| 283 |
+
40. Zuffi, S., Kanazawa, A., Berger-Wolf, T., Black, M.: Three-D safari: Learning to estimate zebra pose, shape, and texture from images "in the wild". In: 2019 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 5358-5367 (2019)
|
| 284 |
+
41. Zuffi, S., Kanazawa, A., Black, M.J.: Lions and tigers and bears: Capturing non-rigid, 3D, articulated shape from images. In: 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3955-3963 (2018)
|
| 285 |
+
42. Zuffi, S., Kanazawa, A., Jacobs, D.W., Black, M.J.: 3D menagerie: Modeling the 3D shape and pose of animals. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5524-5532 (2017)
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9fa4eadfd65be37c722a94022d23ababee5ef606f811157b5dfe1ab9cb95981
|
| 3 |
+
size 529825
|
3dbirdreconstructionadatasetmodelandshaperecoveryfromasingleview/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9064f12cb21a91584582a795b107f6e54c39aecaab1bf24abafcf204f87bea1c
|
| 3 |
+
size 384767
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15aa095f8fae92ba72ea0c3b2b34f82cd6dc55c50cdbc3d1f07adf2acaf85129
|
| 3 |
+
size 72368
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9d1653cc828fd94226aa745e32ea00c27bb7695f206778f022f31b311f5c583
|
| 3 |
+
size 86288
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/2f4d5859-2846-4dac-99f0-b3eb017ee241_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e337a2339600cd08bd99c03ef27cf239c21b3bdcd655f75c142c948e0db2925e
|
| 3 |
+
size 843004
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/full.md
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D-CVF: Generating Joint Camera and LiDAR Features Using Cross-View Spatial Feature Fusion for 3D Object Detection
|
| 2 |
+
|
| 3 |
+
Jin Hyeok Yoo*, Yecheol Kim*, Jisong Kim, and Jun Won Choi*
|
| 4 |
+
|
| 5 |
+
Department of Electrical Engineering, Hanyang University {jhyoo,yckim,jskim}@spa.hanyang.ac.kr junwchoi@hanyang.ac.kr
|
| 6 |
+
|
| 7 |
+
Abstract. In this paper, we propose a new deep architecture for fusing camera and LiDAR sensors for 3D object detection. Because the camera and LiDAR sensor signals have different characteristics and distributions, fusing these two modalities is expected to improve both the accuracy and robustness of 3D object detection. One of the challenges presented by the fusion of cameras and LiDAR is that the spatial feature maps obtained from each modality are represented by significantly different views in the camera and world coordinates; hence, it is not an easy task to combine two heterogeneous feature maps without loss of information. To address this problem, we propose a method called 3D-CVF that combines the camera and LiDAR features using the cross-view spatial feature fusion strategy. First, the method employs auto-calibrated projection, to transform the 2D camera features to a smooth spatial feature map with the highest correspondence to the LiDAR features in the bird's eye view (BEV) domain. Then, a gated feature fusion network is applied to use the spatial attention maps to mix the camera and LiDAR features appropriately according to the region. Next, camera-LiDAR feature fusion is also achieved in the subsequent proposal refinement stage. The low-level LiDAR features and camera features are separately pooled using region of interest (RoI)-based feature pooling and fused with the joint camera-LiDAR features for enhanced proposal refinement. Our evaluation, conducted on the KITTI and nuScenes 3D object detection datasets, demonstrates that the camera-LiDAR fusion offers significant performance gain over the LiDAR-only baseline and that the proposed 3D-CVF achieves state-of-the-art performance in the KITTI benchmark.
|
| 8 |
+
|
| 9 |
+
Keywords: 3D Object Detection, Sensor Fusion, Intelligent Vehicle, Camera Sensor, LiDAR Sensor, Bird's Eye View
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Object detection has been considered one of the most challenging computer vision problems. Recently, the emergence of convolutional neural networks
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
(a)
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
(b)
|
| 20 |
+
Fig. 1. Visualization of the projected camera feature map: (a), (b), and (c) show visualizations of the six camera feature maps projected in the bird's eye view (BEV) domain. Without our auto-calibrated projection, some artifacts in the feature map are visible in (a). The auto-calibrated projection generates the smooth and dense transformed feature map shown in (b). However, the feature map in (b) fails to localize the region of the objects. After applying the adaptive gated fusion network, we can finally resolve the region of objects as shown in the feature map (c).
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
(c)
|
| 24 |
+
|
| 25 |
+
(CNN) has enabled unprecedented progress in object detection techniques owing to its ability to extract the abstract high-level features from the 2D image. Thus far, numerous object detection methods have been developed for 2D object detection [16,20,21]. Recently, these studies have been extended to the 3D object detection task [29,2,8,24,3,17,31,28,12,9,13,30,26], where the locations of the objects should be identified in 3D world coordinates. 3D object detection is particularly useful for autonomous driving applications because diverse types of dynamic objects, such as surrounding vehicles, pedestrians, and cyclists, must be identified in the 3D environment.
|
| 26 |
+
|
| 27 |
+
In general, achieving good accuracy in 3D object detection using only a camera sensor is not an easy task owing to the lack of depth information. Thus, other ranging sensors such as LiDAR, Radar, and RGB-D camera sensors are widely used as alternative signal sources for 3D object detection. Thus far, various 3D object detectors employing LiDAR sensors have been proposed, including MV3D [2], FIXOR [29], ContFuse [13], PointRCNN [22], F-ConvNet [26], STD [30], VoxelNet [31], SECOND [28], MMF [12], PointPillar [9], and Part A² [23]. Although the performance of the LiDAR only based 3D object detectors have been significantly improved lately, LiDAR point clouds are still limited for providing dense and rich information on the objects such as their fine-grained shape, colors, and textures. Hence, using camera and LiDAR data together is expected to yield better and more robust detection results in accuracy. Various camera and LiDAR fusion strategies have been proposed for 3D object detection. Well-known camera and LiDAR fusion methods include AVOD [8], MV3D [2], MMF [12], RoarNet [24], F-PointNet [17], and ContFuse [13].
|
| 28 |
+
|
| 29 |
+
In fact, the problem of fusing camera and LiDAR sensors is challenging as the features obtained from the camera image and LiDAR point cloud are represented in different points of view (i.e., camera-view versus 3D world view).
|
| 30 |
+
|
| 31 |
+
When the camera feature is projected into 3D world coordinates, some useful spatial information about the objects might be lost since this transformation is a one-to-many mapping. Furthermore, there might be some inconsistency between the projected coordinate and LiDAR 3D coordinate. Indeed, it has been difficult for the camera-LiDAR fusion-based methods to beat the LiDAR-only methods in terms of performance. This motivates us to find an effective way to fuse two feature maps in different views without losing important information for 3D object detection.
|
| 32 |
+
|
| 33 |
+
In this paper, we propose a new 3D object detection method, named $3D$ -cross view fusion (3D-CVF), which can fuse the spatial feature maps separately extracted from the camera and LiDAR data, effectively. As shown in Fig. 2, we are interested in fusing the LiDAR sensor and the $N$ multi-view cameras deployed to cover a wider field of view. Information fusion between the camera and LiDAR is achieved over two object detection stages. In the first stage, we aim to generate the strong joint camera-LiDAR features. The auto-calibrated feature projection maps the camera-view features to smooth and dense BEV feature maps using the interpolated projection capable of correcting the spatial offsets. Fig. 1 (a) and (b) compare the feature maps obtained without auto-calibrated projection versus with the auto-calibrated projection, respectively. Note that the auto-calibrated projection yields a smooth camera feature map in the BEV domain as shown in Fig. 1 (b). We also note from Fig. 1 (b) that since the camera feature mapping is a one-to-many mapping, we cannot localize the objects on the transformed camera feature map. To resolve objects in the BEV domain, we employ the adaptive gated fusion network that determines where and what should be brought from two sources using attention mechanism. Fig. 1 (c) shows the appropriately-localized activation for the objects obtained by applying the adaptive gated fusion network. Camera-LiDAR information fusion is also achieved at the second proposal refinement stage. Once the region proposals are found based on the joint camera-LiDAR feature map obtained in the first stage, 3D region of interest (RoI)-based pooling is applied to fuse low-level LiDAR and camera features with the joint camera-LiDAR feature map. The LiDAR and camera features corresponding to the 3D RoI boxes are pooled and encoded by PointNet encoder. Aggregation of the encoded features with the joint camera-LiDAR features lead to improved proposal refinement.
|
| 34 |
+
|
| 35 |
+
We have evaluated our 3D-CVF method on publicly available KITTI [4] and nuScenes [1] datasets. We confirm that by combining the above two sensor fusion strategies combined, the proposed method offers up to $1.57\%$ and $2.74\%$ performance gains in mAP over the baseline without sensor fusion on the KITTI and nuScenes datasets, respectively. Also, we show that the proposed 3D-CVF method achieves impressive detection accuracy comparable to state-of-the-art performance in KITTI 3D object detection benchmark.
|
| 36 |
+
|
| 37 |
+
The contributions of our work are summarized as follows
|
| 38 |
+
|
| 39 |
+
- We propose a new 3D object detection architecture that effectively combines information provided by both camera and LiDAR sensors in two detection stages. In the first stage, the strong joint camera-LiDAR feature is gener
|
| 40 |
+
|
| 41 |
+
ated by applying the auto-calibrated projection and the gated attention. In the second proposal refinement stage, 3D RoI-based feature aggregation is performed to achieve further improvements through sensor fusion.
|
| 42 |
+
|
| 43 |
+
- We investigate the benefit of the sensor fusion achieved by the 3D-CVF. Our experiments demonstrate that the performance gain achieved by the sensor fusion in nuScenes dataset is higher than that in KITTI dataset. Because the resolution of LiDAR used in nuScenes is lower than that in KITTI, this shows that the camera sensor compensates low resolution of the LiDAR data. Also, we observe that the performance gain achieved by the sensor fusion is much higher for distant objects than for near objects, which also validates our conclusion.
|
| 44 |
+
|
| 45 |
+
# 2 Related Work
|
| 46 |
+
|
| 47 |
+
# 2.1 LiDAR-Only 3D Object Detection
|
| 48 |
+
|
| 49 |
+
The LiDAR-based 3D object detectors should encode the point clouds since they have unordered and irregular structures. MV3D [2] and FIXOR [29] projected 3D point clouds onto the discrete grid structure in 2D planes and extracted the features from the resulting multi-view 2D images. PointRCNN [22] and STD [30] used PointNet [18,19] to yield the global feature representing the geometric structure of the entire point set. Voxel-based point encoding methods used 3D voxels to organize the unordered point clouds and encoded the points in each voxel using the point encoding network [31]. Various voxel-based 3D object detectors have been proposed, including SECOND [28], PointPillar [9], and Part- $A^2$ [23].
|
| 50 |
+
|
| 51 |
+
# 2.2 LiDAR and Camera Fusion-based 3D Object Detection
|
| 52 |
+
|
| 53 |
+
To exploit the advantages of the camera and LiDAR sensors, various camera and LiDAR fusion methods have been proposed for 3D object detection. The approaches proposed in [17,24,27,26] detected the objects in the two sequential steps, where 1) the region proposals were generated based on the camera image, and then 2) the LiDAR points in the region of interest were processed to detect the objects. However, the performance of these methods is limited by the accuracy of the camera-based detector. MV3D [2] proposed the two-stage detector, where 3D proposals are found from the LiDAR point clouds projected in BEV, and 3D object detection is performed by fusing the multi-view features obtained by RoI pooling. AVOD [8] fused the LiDAR BEV and camera front-view features at the intermediate convolutional layer to propose 3D bounding boxes. ContFuse [13] proposed the effective fusion architecture that transforms the front camera-view features into those in BEV through some interpolation network. MMF [12] learned to fuse both camera and LiDAR data through multi-task loss associated with 2D and 3D object detection, ground estimation, and depth completion.
|
| 54 |
+
|
| 55 |
+
While various sensor fusion networks have been proposed, they do not easily outperform LiDAR-only based detectors. This might be due to the difficulty of
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Fig. 2. Overall structure of 3D-CVF: After point clouds and each camera-view image are separately processed by each backbone network, the camera-view features are transformed to the features in BEV using the auto-calibrated feature projection. Then, the camera and LiDAR features are fused using the gated feature fusion network. The detection outputs are predicted after refining the proposals using 3D RoI-based fusion network. The format of 3D convolutional layers used in the figure follows “ $k_x$ x $k_y$ x $k_z$ (channel size)” where $k_x$ , $k_y$ and $k_z$ denote the kernel sizes in each axis.
|
| 59 |
+
|
| 60 |
+
combining the camera and LiDAR features represented in different view domains. In the next sections, we present an effective way to overcome this challenge.
|
| 61 |
+
|
| 62 |
+
# 3 Proposed 3D Object Detector
|
| 63 |
+
|
| 64 |
+
In this section, we present the details of the proposed architecture.
|
| 65 |
+
|
| 66 |
+
# 3.1 Overall architecture
|
| 67 |
+
|
| 68 |
+
The overall architecture of the proposed method is illustrated in Fig. 2. It consists of five modules including the 1) LiDAR pipeline, 2) camera pipeline, 3) cross-view spatial feature mapping, 4) gated camera-LiDAR feature fusion network, and 5) proposal generation and refinement network. Each of them is described in the following
|
| 69 |
+
|
| 70 |
+
LiDAR Pipeline: LiDAR points are first organized based on the LiDAR voxel structure. The LiDAR points in each voxel are encoded by the point encoding network [31], which generates the fixed-length embedding vector. These encoded LiDAR voxels are processed by six 3D sparse convolution [28] layers with stride two, which produces the LiDAR feature map of 128 channels in the BEV domain. After sparse convolutional layers are applied, the width and height
|
| 71 |
+
|
| 72 |
+
of the resulting LiDAR feature map are reduced by a factor of eight compared to those of the LiDAR voxel structure.
|
| 73 |
+
|
| 74 |
+
RGB Pipeline: In parallel to the LiDAR pipeline, the camera RGB images are processed by the CNN backbone network. We use the pre-trained ResNet-18 [6] followed by feature pyramid network (FPN) [14] to generate the camera feature map of 256 channels represented in camera-view. The width and height of the camera feature maps are reduced by a factor of eight compared to those of the input RGB images.
|
| 75 |
+
|
| 76 |
+
Cross-View Feature Mapping: The cross-view feature (CVF) mapping generates the camera feature maps projected in BEV. The auto-calibrated projection converts the camera feature maps in camera-view to those in BEV. Then, the projected feature map is enhanced by the additional convolutional layers and delivered to the gated camera-LiDAR feature fusion block.
|
| 77 |
+
|
| 78 |
+
Gated Camera-LiDAR Feature Fusion: The adaptive gated fusion network is used to combine the camera feature maps and the LiDAR feature map. The spatial attention maps are applied to both feature maps to adjust the contributions from each modality depending on their importance. The adaptive gated fusion network produces the joint camera-LiDAR feature map, which is delivered to the 3D RoI fusion-based refinement block.
|
| 79 |
+
|
| 80 |
+
3D RoI Fusion-based Refinement: After the region proposals are generated based on the joint camera-LiDAR feature map, the RoI pooling is applied for proposal refinement. Since the joint camera-LiDAR feature map does not contain sufficient spatial information, both the multi-scale LiDAR features and camera features are extracted using 3D RoI-based pooling. These features are separately encoded by the PointNet encoder and fused with the joint camera-LiDAR feature map by a 3D RoI-based fusion network. The fused feature is finally used to produce the final detection results.
|
| 81 |
+
|
| 82 |
+
# 3.2 Cross-View Feature Mapping
|
| 83 |
+
|
| 84 |
+
**Dense Camera Voxel Structure:** The camera voxel structure is used for the feature mapping. To generate the spatially dense features, we construct the camera voxel structure whose width and height are two times longer than those of the LiDAR voxel structure in the $(x,y)$ axis. This leads to the voxel structure with higher spatial resolution. In our design, the camera voxel structure has four times as many voxels as the LiDAR voxel structure.
|
| 85 |
+
|
| 86 |
+
Auto-Calibrated Projection Method: The auto-calibrated projection technique is devised to 1) transform the camera-view feature into the BEV feature and 2) find the best correspondence between them to maximize the effect of information fusion. The structure of the auto-calibrated projection method is depicted in Fig. 3. First, the center of each voxel is projected to $(\hat{x},\hat{y})$ in the camera-view plane using the world-to-camera-view projection matrix and $(\hat{x},\hat{y})$ is adjusted by the calibration offset $(\varDelta x,\varDelta y)$ . Then, the neighbor camera feature pixels near to the calibrated position $(\hat{x} +\varDelta x,\hat{y} +\varDelta y)$ are combined with the weights determined by interpolation methods. That is, the combined pixel
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
Fig. 3. Illustration of the proposed auto-calibrated projection: To represent the camera feature in BEV, the center coordinate of a voxel is projected onto the point $(\hat{x},\hat{y})$ with calibration offset $(\Delta x,\Delta y)$ in the camera-view plane. The neighboring four feature pixels are combined using linear interpolation and assigned to the corresponding voxel.
|
| 90 |
+
|
| 91 |
+
vector $\mathbf{u}$ is given by
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\mathbf {u} = \sum_ {m = 1} ^ {2} \sum_ {n = 1} ^ {2} w _ {m, n} \mathbf {f} _ {m, n}, \tag {1}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where the set $\{\mathbf{f}_{m,n}\}$ corresponds to four adjacent feature pixels closest to $(\hat{x} + \Delta x, \hat{y} + \Delta y)$ , and $w_{m,n}$ is the weight obtained by the interpolation methods. In bilinear interpolation, $w_{m,n}$ is obtained using Euclidean distance as follows
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
w _ {m, n} \propto | (x _ {m}, y _ {m}) - (\hat {x} + \varDelta x, \hat {y} + \varDelta y)) | ^ {- 1}, \tag {2}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $w_{m,n}$ is normalized such that $\sum_{m=1}^{2} \sum_{n=1}^{2} w_{m,n} = 1$ . Then, the combined feature $\mathbf{u}$ is assigned to the corresponding voxel. Note that different calibration offsets $(\Delta x, \Delta y)$ are assigned to different regions in 3D space. These calibration offset parameters can be jointly optimized along with other network weights. The auto-calibrated projection provides spatially smooth camera feature maps that best match with the LiDAR feature map in the BEV domain.
|
| 104 |
+
|
| 105 |
+
# 3.3 Gated Camera-LiDAR Feature Fusion
|
| 106 |
+
|
| 107 |
+
Adaptive Gated Fusion Network: To extract essential features from both camera and LiDAR sensors, we apply an adaptive gated fusion network that selectively combines the feature maps depending on the relevance to the
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
Fig. 4. Adaptive gated fusion network: The adaptive gated fusion network generates the attention maps by applying $3 \times 3$ convolutional layers followed by a sigmoid function to the concatenated inputs. These attention maps are multiplied to both camera and LiDAR features through the element-wise product operation.
|
| 111 |
+
|
| 112 |
+
object detection task [7]. The proposed gated fusion structure is depicted in Fig. 4. The camera and LiDAR features are gated using the attention maps as follows
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\mathbf {F} _ {g. C} = \mathbf {F} _ {C} \times \underbrace {\sigma \left(\operatorname {C o n v} _ {C} \left(\mathbf {F} _ {C} \oplus \mathbf {F} _ {L}\right)\right)} _ {\text {C a m e r a A t t e n t i o n M a p}} \tag {3}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathbf {F} _ {g. L} = \mathbf {F} _ {L} \times \underbrace {\sigma \left(\operatorname {C o n v} _ {L} \left(\mathbf {F} _ {C} \oplus \mathbf {F} _ {L}\right)\right)} _ {\text {L i D A R A t t e n t i o n M a p}} \tag {4}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\mathbf{F}_C$ and $\mathbf{F}_L$ represent the camera feature and LiDAR feature, respectively, $\mathbf{F}_{g.C}$ and $\mathbf{F}_{g.L}$ are the corresponding gated features, $\times$ is the element-wise product operation, and $\oplus$ is the channel-wise concatenation operation. Note that the elements of the attention maps indicate the relative importance of the camera and LiDAR features. After the attention maps are applied, the final joint feature $\mathbf{F}_{joint}$ is obtained by concatenating $\mathbf{F}_{g.C}$ and $\mathbf{F}_{g.L}$ channel-wise. (see Fig. 2.)
|
| 123 |
+
|
| 124 |
+
# 3.4 3D-RoI Fusion-based Refinement
|
| 125 |
+
|
| 126 |
+
Region Proposal Generation: The initial detection results are obtained by the region proposal network (RPN). Initial regression results and objectness scores are predicted by applying the detection sub-network to the joint camera-LiDAR feature. Since the initial detection results have a large number of proposal boxes associated with objectness scores, the boxes with high objectness scores remain through NMS post-processing with the IoU threshold 0.7.
|
| 127 |
+
|
| 128 |
+
3D RoI-based Feature Fusion: The predicted box regression values are translated to the global coordinates using the rotated 3D RoI alignment [12]. The low-level LiDAR and camera features are pooled using 3D RoI-based pooling and combined with the joint camera-LiDAR features. These low-level features retain the detailed spatial information on objects (particularly in $z$ axis) so that it can provide useful information for refining the region proposals. Specifically, six multi-scale LiDAR features corresponding to the 3D RoI boxes are pooled by 3D RoI-based pooling. These low-level LiDAR features are individually encoded by PointNet encoders for each scale and concatenated into a $1 \times 1$ feature vector.
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
Fig. 5. Illustration of the proposed RoI grid-based pooling of camera features: The RoI grid-based camera feature is generated by pooling the camera features according to the grid points in a 3D RoI box and encoding them using PointNet encoder.
|
| 132 |
+
|
| 133 |
+
Simultaneously, the multi-view camera features are also transformed into a $1 \times 1$ feature vector. Since the camera-view features are represented in a different domain from the 3D RoI boxes, we devise the RoI grid-based pooling. As shown in Fig. 5, consider the $r \times r \times r$ equally spaced coordinates in the 3D RoI box. These points are projected to the camera view-domain and the camera feature pixels corresponding to these points are encoded by the PointNet encoders. Concatenation of these encoded multi-view camera features forms another $1 \times 1$ feature vector. The final feature used for proposal refinement is obtained by concatenating these two $1 \times 1$ feature vectors with the RoI aligned joint camera-LiDAR features.
|
| 134 |
+
|
| 135 |
+
# 3.5 Training Loss Function
|
| 136 |
+
|
| 137 |
+
Our 3D-CVF is trained via two-stage training process. In the first stage, we train the network pipeline up to RPN using the RPN loss, $L_{rpn} = \beta_1 L_{cls} + \beta_2 (L_{reg|\theta} + L_{reg|loc})$ , where $\beta_1$ and $\beta_2$ are set to 1.0 and 2.0, respectively, and $L_{reg|loc}$ and $L_{reg|\theta}$ are given by the Smoothed-L1 loss [5] and modified Smoothed-L1 loss [28], respectively. Note that we follow suggestions from [28] in parameterizing 3D ground truth boxes and 3D anchors. Note also that $L_{cls}$ denotes the focal loss [15]
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
L _ {c l s} = \frac {1}{N _ {b o x}} \sum_ {i = 1} ^ {N _ {b o x}} - \alpha (1 - p _ {i}) ^ {\gamma} \log \left(p _ {i}\right), \tag {5}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $N_{box}$ denotes the total number of boxes, $p_i$ is the objectness scores for $i$ th box, and we set $\alpha = 0.25$ and $\gamma = 2$ . In the next stage, the entire network is trained using the RPN loss $L_{rpn}$ plus refinement loss $L_{ref}$ . The refinement loss $L_{ref}$ is given by
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
L _ {r e f} = \beta_ {1} L _ {i o u} + \beta_ {2} \left(L _ {r e g | \theta} + L _ {r e g | l o c}\right), \tag {6}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Modality</td><td rowspan="2">Runtime (ms)</td><td colspan="3">3D AP (%)</td></tr><tr><td>AP_Easy</td><td>AP(Mod.)</td><td>AP_Hard</td></tr><tr><td>VoxelNet [31]</td><td>LiDAR</td><td>220</td><td>77.47</td><td>65.11</td><td>57.73</td></tr><tr><td>SECOND [28]</td><td>LiDAR</td><td>50</td><td>83.13</td><td>73.66</td><td>66.20</td></tr><tr><td>PointPillars [9]</td><td>LiDAR</td><td>16.2</td><td>79.05</td><td>74.99</td><td>68.30</td></tr><tr><td>PointRCNN [22]</td><td>LiDAR</td><td>100</td><td>85.94</td><td>75.76</td><td>68.32</td></tr><tr><td>Fast PointRCNN [3]</td><td>LiDAR</td><td>65</td><td>85.29</td><td>77.40</td><td>70.24</td></tr><tr><td>Patches [10]</td><td>LiDAR</td><td>150</td><td>88.67</td><td>77.20</td><td>71.82</td></tr><tr><td>Part A² [23]</td><td>LiDAR</td><td>80</td><td>87.81</td><td>78.49</td><td>73.51</td></tr><tr><td>STD [30]</td><td>LiDAR</td><td>80</td><td>87.95</td><td>79.71</td><td>75.09</td></tr><tr><td>MV3D [12]</td><td>LiDAR+RGB</td><td>240</td><td>71.09</td><td>62.35</td><td>55.12</td></tr><tr><td>AVOD [8]</td><td>LiDAR+RGB</td><td>80</td><td>73.59</td><td>65.78</td><td>58.38</td></tr><tr><td>F-PointNet [17]</td><td>LiDAR+RGB</td><td>170</td><td>81.20</td><td>70.39</td><td>62.19</td></tr><tr><td>AVOD-FPN [8]</td><td>LiDAR+RGB</td><td>100</td><td>81.94</td><td>71.88</td><td>66.38</td></tr><tr><td>UberATG-ContFuse [13]</td><td>LiDAR+RGB</td><td>60</td><td>82.54</td><td>66.22</td><td>64.04</td></tr><tr><td>RoarNet [24]</td><td>LiDAR+RGB</td><td>100</td><td>83.95</td><td>75.79</td><td>67.88</td></tr><tr><td>UberATG-MMF [12]</td><td>LiDAR+RGB</td><td>80</td><td>88.40</td><td>77.43</td><td>70.22</td></tr><tr><td>Our 3D-CVF</td><td>LiDAR+RGB</td><td>75</td><td>89.20</td><td>80.05</td><td>73.11</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 1. Performance on KITTI test benchmark for Car category: The model is trained on KITTI training set and evaluated on KITTI test set. "AP_Easy", "AP_Reg.", and "AP_Hard" mean the average precision for "easy", "moderate", and "hard" difficulty levels.
|
| 152 |
+
|
| 153 |
+
where $L_{iou}$ denotes the confidence score refinement loss that follows the definition of 3D IoU loss in [11]. Further details of training procedure are provided in the next section.
|
| 154 |
+
|
| 155 |
+
# 4 Experiments
|
| 156 |
+
|
| 157 |
+
In this section, we evaluate the performance of the proposed 3D-CVF on the KITTI [4] and nuScenes [1] datasets.
|
| 158 |
+
|
| 159 |
+
# 4.1 KITTI
|
| 160 |
+
|
| 161 |
+
The KITTI dataset is the widely used dataset for evaluating 3D object detectors. It contains the camera and LiDAR data collected using a single Pointgrey camera and Velodyne HDL-64E LiDAR. The training set and test set contain 7,481 images and 7,518 images, respectively. For validation, we split the labeled training set into the train set and valid set by half as done in [2]. The detection task is divided into three different levels of difficulty, namely "easy", "moderate", and "hard". The average precision (AP) obtained from the 41-point precision-recall (PR) curve was used as a performance metric.
|
| 162 |
+
|
| 163 |
+
Training Configuration: We limited the range of point cloud to $[0, 70.4] \times [-40, 40] \times [-3, 1]m$ in $(x, y, z)$ axis. The LiDAR voxel structure consists of $1600 \times 1408 \times 40$ voxel grids with each voxel of size $0.05 \times 0.05 \times 0.1m$ . We aimed to detect only cars, because the training data for other categories is not large
|
| 164 |
+
|
| 165 |
+
enough in KITTI dataset. Accordingly, only two anchors with different angles $(0^{\circ}, 90^{\circ})$ were used. To train the 3D-CVF, we used the pre-trained LiDAR backbone network. As mentioned, training was conducted in two stages. We first trained the network up to RPN using the ADAM optimizer with one-cycle learning rate policy [25] over 70 epochs. The learning rate was scheduled with the max parameter set to 3e-3, the division factor 10, the momentum range from 0.95 to 0.85, and the fixed weight decay parameter of 1e-2. The mini-batch size was set to 12. Next, the entire network was trained over 50 epochs with the mini-batch size of 6. The initial learning rate was set to 1e-4 for the first 30 epochs and decayed by a factor of 0.1 every 10 epochs. As a camera backbone network, we used the ResNet-18 [6] network with FPN [14] pre-trained with the KITTI 2D object detection dataset.
|
| 166 |
+
|
| 167 |
+
Data Augmentation: Since we use both camera data and LiDAR point clouds together, careful coordination between the camera and LiDAR data is necessary for data augmentation. We considered random flipping, rotation, scaling, and ground truth boxes sampling augmentation (GT-AUG) [28]. We randomly flipped the LiDAR points and rotate the point clouds within a range of $\left[-\frac{\pi}{4}, \frac{\pi}{4}\right]$ along the $z$ axis. We also scaled the coordinates of the points with a factor within [0.95, 1.05]. The modifications applied to the LiDAR points were reflected in the camera images. However, it was difficult to apply GT-AUG to both LiDAR and camera data without distortion. Hence, GT-AUG was used only when the LiDAR backbone network was pretrained. We found that the benefit of the GT-AUG was not negligible in KITTI due to relatively small dataset size.
|
| 168 |
+
|
| 169 |
+
Results on KITTI Test Set: Table 1 provides the mAP performance of several 3D object detectors evaluated on KITTI 3D object detection tasks. The results for other algorithms are brought from the KITTI leaderboard (http://www.cvlabs.net/datasets/kitti/eval_object.php?obj_benchmark=3d). We observe that the proposed 3D-CVF achieves the significant performance gain over other camera-LiDAR fusion-based detectors in the leaderboard. In particular, the 3D-CVF achieves up to $2.89\%$ gains (for hard difficulty) over UberATG-MMF [12], the best fusion-based method so far. The 3D-CVF outperforms most of the LiDAR-based 3D object detectors except for the STD [30]. While the 3D-CVF outperforms the STD [30] for easy and moderate levels but it is not for the hard level. Since the STD [30] uses the PointNet-based backbone, it might have a stronger LiDAR pipeline than the voxel-based backbone used in our 3D-CVF. It would be possible to apply our sensor fusion strategies to these kinds of detectors to improve their performance.
|
| 170 |
+
|
| 171 |
+
Table 1 also provides the inference time of 3D object detectors. We evaluated the interference time on $1 \times$ NVIDIA GTX 1080 Ti. Note that the inference time of the proposed 3D-CVF is $75\mathrm{ms}$ per frame, which looks comparable to that of other methods. We also measured the runtime of our LiDAR-only baseline. Note that the camera-LiDAR fusion requires only $25\mathrm{ms}$ additional runtime over $50\mathrm{ms}$ runtime of the LiDAR-only baseline.
|
| 172 |
+
|
| 173 |
+
<table><tr><td></td><td>Car</td><td>Ped.</td><td>Bus</td><td>Barrier</td><td>T.C.</td><td>Truck</td><td>Trailer</td><td>Moto.</td><td>mAP</td><td>NDS</td></tr><tr><td>SECOND [28]</td><td>69.16</td><td>58.60</td><td>34.87</td><td>28.94</td><td>24.83</td><td>23.73</td><td>5.52</td><td>16.60</td><td>26.32</td><td>35.36</td></tr><tr><td>PointPillars [9]</td><td>75.25</td><td>59.47</td><td>43.80</td><td>30.95</td><td>18.57</td><td>23.42</td><td>20.15</td><td>21.12</td><td>29.34</td><td>39.03</td></tr><tr><td>MEGVII [32]</td><td>71.61</td><td>65.28</td><td>50.29</td><td>48.62</td><td>45.65</td><td>35.77</td><td>20.19</td><td>28.20</td><td>37.68</td><td>44.15</td></tr><tr><td>LiDAR-only Baseline</td><td>78.21</td><td>68.72</td><td>51.02</td><td>43.42</td><td>37.47</td><td>34.84</td><td>32.01</td><td>34.55</td><td>39.43</td><td>46.21</td></tr><tr><td>Our 3D-CVF</td><td>79.69</td><td>71.28</td><td>54.96</td><td>47.10</td><td>40.82</td><td>37.94</td><td>36.29</td><td>37.18</td><td>42.17</td><td>49.78</td></tr></table>
|
| 174 |
+
|
| 175 |
+
Table 2. mAP and NDS performance on nuScenes validation set: The model was trained on nuScenes train set and evaluated on nuScenes validation set. "Cons. Veh." and "Bicycle" classes were omitted as their accuracy was too low. The performance of the SECOND, PointPillars, and MEGVII was reproduced using their official codes.
|
| 176 |
+
|
| 177 |
+
# 4.2 nuScenes
|
| 178 |
+
|
| 179 |
+
The nuScenes dataset is a large-scale 3D detection dataset that contains more than 1,000 scenes in Boston and Singapore [1]. The dataset was collected using six multi-view cameras and 32-channel LiDAR. 360-degree object annotations for 10 object classes were provided. The dataset consists of 28,130 training samples and 6,019 validation samples. The nuScenes dataset suggests the use of an evaluation metric called nuScenes detection score (NDS) [1].
|
| 180 |
+
|
| 181 |
+
Training Configuration: For the nuScenes dataset, the range of point cloud was within $[-49.6, -49.6] \times [-49.6, 49.6] \times [-5, 3]m$ in $(x, y, z)$ axis which was voxelized with each voxel size of $0.05 \times 0.05 \times 0.2m$ . Consequently, this partitioning leads to the voxel structure of size $1984 \times 1984 \times 40$ . Anchor size of each class was determined by averaging the width and height values of the ground truths. We trained the network over 20 epochs using the same learning rate scheduling used in the KITTI dataset. The mini-batch size was set to 6. DS sampling [32] was adopted to alleviate the class imbalance problem in the nuScenes dataset.
|
| 182 |
+
|
| 183 |
+
Data Augmentation: For data augmentation, we used the same augmentation strategies except for GT-AUG. Unlike KITTI dataset, we found that skipping GT-AUG does not degrade the accuracy in nuScenes dataset.
|
| 184 |
+
|
| 185 |
+
Results on nuScenes Validation Set: We mainly tested our 3D-CVF on nuScenes to verify the performance gain achieved by sensor fusion. For this purpose, we compared the proposed 3D-CVF with the baseline algorithm, which has the same structure as our method except that the camera pipeline is disabled. For a fair comparison, DS sampling strategy was also applied to the baseline. As a reference, we also added the performance of the SECOND [28], PointPillar [9], and MEGVII [32]. Table 2 provides the AP for 8 classes, mAP, and NDS achieved by several 3D object detectors. We observe that the sensor fusion offers $2.74\%$ and $3.57\%$ performance gains over the baseline in the mAP and NDS metrics, respectively. The performance of the proposed method consistently outperforms the baseline in terms of AP for all classes. In particular, the detection accuracy is significantly improved for classes with relatively low APs. This shows that the
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Modality</td><td colspan="3">Proposed Fusion Strategy</td><td colspan="3">3D AP (%)</td></tr><tr><td>Adaptive Gated Fusion</td><td>Cross-View Mapping</td><td>3D RoI-based Refinement</td><td>APEasy</td><td>APMod.</td><td>APHard</td></tr><tr><td>LiDAR-only Baseline</td><td>LiDAR</td><td></td><td></td><td></td><td>88.35</td><td>78.31</td><td>77.08</td></tr><tr><td rowspan="4">Our 3D-CVF</td><td rowspan="4">LiDAR + RGB</td><td>✓</td><td></td><td></td><td>88.74</td><td>78.54</td><td>77.25</td></tr><tr><td>✓</td><td>✓</td><td></td><td>88.89</td><td>79.19</td><td>77.87</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>89.39</td><td>79.25</td><td>78.02</td></tr><tr><td>✓</td><td>✓</td><td></td><td>89.67</td><td>79.88</td><td>78.47</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 3. Ablation study on KITTI valid set for Car category: The effect of our camera-LiDAR fusion schemes is highlighted in this study.
|
| 190 |
+
|
| 191 |
+
camera modality is useful to detect objects that are relatively difficult to identify with LiDAR sensors.
|
| 192 |
+
|
| 193 |
+
# 4.3 Ablation study
|
| 194 |
+
|
| 195 |
+
In Table 3, we present an ablation study for validating the effect of the ideas in the proposed 3D-CVF method. Note that our ablation study has been conducted on the KITTI valid set. Overall, our ablation study shows that the fusion strategy used in our 3D-CVF offers $1.32\%$ , $1.57\%$ , and $1.39\%$ gains in $\mathrm{AP}_{Easy}$ $\mathrm{AP}_{Mod}$ and $\mathrm{AP}_{Hard}$ over the LiDAR-only baseline.
|
| 196 |
+
|
| 197 |
+
Effect of Naive Camera-LiDAR fusion: We observe that when the camera and LiDAR features are fused without cross-view feature mapping, adaptive gated fusion network, and 3D RoI fusion-based refinement, the improvement in detection accuracy is marginal.
|
| 198 |
+
|
| 199 |
+
Effect of Adaptive Gated Fusion Network: The adaptive gated fusion network leads to $0.54\%$ , $0.87\%$ , and $0.79\%$ performance boost in $\mathrm{AP}_{Easy}$ , $\mathrm{AP}_{Mod}$ and $\mathrm{AP}_{Hard}$ levels, respectively. By combining the camera and LiDAR features selectively depending on their relevance to the detection task, our method can generate the enhanced joint camera-LiDAR feature.
|
| 200 |
+
|
| 201 |
+
Effect of Cross-View Feature Mapping: The auto-calibrated projection generates the smooth and dense camera features in the BEV domain. The detection accuracy improves over the baseline by $0.5\%$ , $0.06\%$ , and $0.15\%$ in $\mathrm{AP}_{Easy}$ , $\mathrm{AP}_{Mod}$ , and $\mathrm{AP}_{Hard}$ , respectively.
|
| 202 |
+
|
| 203 |
+
Effect of 3D RoI Fusion-based Refinement: We observe that the 3D RoI fusion-based refinement improves $\mathrm{AP}_{Easy}$ $\mathrm{AP}_{Mod}$ , and $\mathrm{AP}_{Hard}$ by $0.28\%$ , $0.63\%$ , and $0.45\%$ , respectively. It indicates that our 3D RoI fusion-based refinement compensates the lack of spatial information in the joint camera-LiDAR features that may occur due to processing through many CNN pipelines.
|
| 204 |
+
|
| 205 |
+
# 4.4 Performance Evaluation based on Object Distance
|
| 206 |
+
|
| 207 |
+
To investigate the effectiveness of sensor fusion, we evaluated the detection accuracy of the 3D-CVF for different object distances. We categorized the objects in the KITTI valid set into three classes according to the distance ranges
|
| 208 |
+
|
| 209 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">3D AP (%)</td></tr><tr><td>0 ~ 20m</td><td>20 ~ 40m</td><td>40 ~ 70m</td></tr><tr><td>LiDAR-only Baseline</td><td>89.86</td><td>76.72</td><td>30.57</td></tr><tr><td>Our 3D-CVF</td><td>90.02</td><td>79.73</td><td>35.86</td></tr><tr><td>improvement</td><td>+0.16</td><td>+3.01</td><td>+5.29</td></tr></table>
|
| 210 |
+
|
| 211 |
+
Table 4. Accuracy of 3D-CVF for different object distance ranges: The model is trained on KITTI train set and evaluated on KITTI valid set. We provide the detection accuracy of the 3D-CVF for object distance ranges, $(0\sim 20\mathrm{m})$ , $(20\sim 40\mathrm{m})$ , and $(40\sim 70\mathrm{m})$ .
|
| 212 |
+
|
| 213 |
+
$(0 \sim 20m)$ , $(20 \sim 40m)$ , and $(40 \sim 70m)$ . Table 4 provides the mAPs achieved by the 3D-CVF for three classes of objects. Note that the performance gain achieved by the sensor fusion is significantly higher for distant objects. The difference of mAP between nearby and distant objects is up to $5\%$ . This result indicates that the LiDAR-only baseline is not sufficient to detect distant objects due to the sparseness of LiDAR points and the camera modality successfully compensates it.
|
| 214 |
+
|
| 215 |
+
# 5 Conclusions
|
| 216 |
+
|
| 217 |
+
In this paper, we proposed a new camera and LiDAR fusion architecture for 3D object detection. The 3D-CVF achieved multi-modal fusion over two object detection stages. In the first stage, to generate the effective joint representation of camera and LiDAR data, we introduced the cross-view feature mapping that transforms the camera-view feature map into the calibrated and interpolated feature map in BEV. The camera and LiDAR features were selectively combined based on the relevance to the detection task using the adaptive gated fusion network. In the second stage, the 3D RoI-based fusion network refined the region proposals by pooling low-level camera and LiDAR features by 3D RoI pooling and fusing them after PointNet encoding. Our evaluation conducted on KITTI and nuScenes datasets confirmed that significant performance gain was achieved by the camera-LiDAR fusion and the proposed 3D-CVF outperformed the state-of-the-art 3D object detectors in KITTI leaderboard.
|
| 218 |
+
|
| 219 |
+
# Acknowledgements
|
| 220 |
+
|
| 221 |
+
This work was supported by Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (2016-0-00564, Development of Intelligent Interaction Technology Based on Context Awareness and Human Intention Understanding).
|
| 222 |
+
|
| 223 |
+
# References
|
| 224 |
+
|
| 225 |
+
1. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. arXiv preprint arXiv:1903.11027 (2019)
|
| 226 |
+
2. Chen, X., Ma, H., Wan, J., Li, B., Xia, T.: Multi-view 3d object detection network for autonomous driving. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 1907-1915 (2017)
|
| 227 |
+
3. Chen, Y., Liu, S., Shen, X., Jia, J.: Fast point r-cnn. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV). pp. 9775-9784 (2019)
|
| 228 |
+
4. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 3354-3361. IEEE (2012)
|
| 229 |
+
5. Girshick, R.: Fast r-cnn. IEEE International Conference on Computer Vision (ICCV) pp. 1440-1448 (2015)
|
| 230 |
+
6. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 770-778 (2016)
|
| 231 |
+
7. Kim, J., Koh, J., Kim, Y., Choi, J., Hwang, Y., Choi, J.W.: Robust deep multimodal learning based on gated information fusion network. In: Asian Conference on Computer Vision (ACCV). pp. 90-106. Springer (2018)
|
| 232 |
+
8. Ku, J., Mozifian, M., Lee, J., Harakeh, A., Waslander, S.L.: Joint 3d proposal generation and object detection from view aggregation. In: IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1-8. IEEE (2018)
|
| 233 |
+
9. Lang, A.H., Vora, S., Caesar, H., Zhou, L., Yang, J., Beijbom, O.: Pointpillars: Fast encoders for object detection from point clouds. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 12697-12705 (2019)
|
| 234 |
+
0. Lehner, J., Mitterecker, A., Adler, T., Hofmacher, M., Nessler, B., Hochreiter, S.: Patch refinement-localized 3d object detection. arXiv preprint arXiv:1910.04093 (2019)
|
| 235 |
+
1. Li, B., Ouyang, W., Sheng, L., Zeng, X., Wang, X.: Gs3d: An efficient 3d object detection framework for autonomous driving. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1019-1028 (2019)
|
| 236 |
+
2. Liang, M., Yang, B., Chen, Y., Hu, R., Urtasun, R.: Multi-task multi-sensor fusion for 3d object detection. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 7345-7353 (2019)
|
| 237 |
+
3. Liang, M., Yang, B., Wang, S., Urtasun, R.: Deep continuous fusion for multi-sensor 3d object detection. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 641-656 (2018)
|
| 238 |
+
4. Lin, T.Y., Dólar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 2117-2125 (2017)
|
| 239 |
+
5. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV). pp. 2980-2988 (2017)
|
| 240 |
+
6. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. European Conference on Computer Vision (ECCV) pp. 21-37 (2016)
|
| 241 |
+
|
| 242 |
+
17. Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 918-927 (2018)
|
| 243 |
+
18. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 652-660 (2017)
|
| 244 |
+
19. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: Advances in Neural Information Processing Systems (NeurIPS). pp. 5099-5108 (2017)
|
| 245 |
+
20. Redmon, J., Farhadi, A.: Yolo9000: Better, faster, stronger. Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR) pp. 6517-6525 (2017)
|
| 246 |
+
21. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in Neural Information Processing Systems (NeurIPS) pp. 91-99 (2015)
|
| 247 |
+
22. Shi, S., Wang, X., Li, H.: Pointcnn: 3d object proposal generation and detection from point cloud. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 770-779 (2019)
|
| 248 |
+
23. Shi, S., Wang, Z., Wang, X., Li, H.: Part-a^2 net: 3d part-aware and aggregation neural network for object detection from point cloud. arXiv preprint arXiv:1907.03670 (2019)
|
| 249 |
+
24. Shin, K., Kwon, Y.P., Tomizuka, M.: Roarnet: A robust 3d object detection based on region approximation refinement. In: IEEE Intelligent Vehicles Symposium (IV). pp. 2510-2515. IEEE (2019)
|
| 250 |
+
25. Smith, L.N.: A disciplined approach to neural network hyper-parameters: Part 1-learning rate, batch size, momentum, and weight decay. arXiv preprint arXiv:1803.09820 (2018)
|
| 251 |
+
26. Wang, Z., Jia, K.: Frustum convnet: Sliding frustums to aggregate local point-wise features for amodal 3d object detection. arXiv preprint arXiv:1903.01864 (2019)
|
| 252 |
+
27. Xu, D., Anguelov, D., Jain, A.: Pointfusion: Deep sensor fusion for 3d bounding box estimation. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 244-253 (2018)
|
| 253 |
+
28. Yan, Y., Mao, Y., Li, B.: Second: Sparsely embedded convolutional detection. Sensors 18(10), 3337 (2018)
|
| 254 |
+
29. Yang, B., Luo, W., Urtasun, R.: Pixor: Real-time 3d object detection from point clouds. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 7652-7660 (2018)
|
| 255 |
+
30. Yang, Z., Sun, Y., Liu, S., Shen, X., Jia, J.: Std: Sparse-to-dense 3d object detector for point cloud. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV). pp. 1951-1960 (2019)
|
| 256 |
+
31. Zhou, Y., Tuzel, O.: Voxelnet: End-to-end learning for point cloud based 3d object detection. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition (CVPR). pp. 4490-4499 (2018)
|
| 257 |
+
32. Zhu, B., Jiang, Z., Zhou, X., Li, Z., Yu, G.: Class-balanced grouping and sampling for point cloud 3d object detection. arXiv preprint arXiv:1908.09492 (2019)
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd0691c6104d57f56c4e897f7a07a05ce57b7a0229a525678920dfce06fa1794
|
| 3 |
+
size 438342
|
3dcvfgeneratingjointcameraandlidarfeaturesusingcrossviewspatialfeaturefusionfor3dobjectdetection/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:06bd7a8858cff97f84eb60d9bdea214c774d1c7f72a8f8610fd7f88b010c0283
|
| 3 |
+
size 359824
|
3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9271e9bf4539e17055f6d9a4465ac885e91f1940ca02af56b28e149f80baac60
|
| 3 |
+
size 77374
|
3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:598997da9a3461b8d7a7c3e50d4c77d61e8400a37bb2e5f6a3ac19d746c0315d
|
| 3 |
+
size 96041
|
3dfluidflowreconstructionusingcompactlightfieldpiv/35b6e762-1496-4360-ad6f-60bd707da407_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dee3da44aed3e25a6ef226ca4d368f428be4f5ae5af81985f3320ef6b4f8ab42
|
| 3 |
+
size 37277879
|
3dfluidflowreconstructionusingcompactlightfieldpiv/full.md
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Fluid Flow Reconstruction Using Compact Light Field PIV
|
| 2 |
+
|
| 3 |
+
Zhong Li $^{1\star}$ , Yu Ji $^{2}$ , Jingyi Yu $^{2,3}$ , and Jinwei Ye $^{4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> University of Delaware, Newark, DE, USA
|
| 6 |
+
2 DGene, Baton Rouge, LA, USA
|
| 7 |
+
3 ShanghaiTech University, Shanghai, China
|
| 8 |
+
|
| 9 |
+
$^{4}$ Louisiana State University, Baton Rouge, LA, USA
|
| 10 |
+
|
| 11 |
+
jye@csc.1su.edu
|
| 12 |
+
|
| 13 |
+
Abstract. Particle Imaging Velocimetry (PIV) estimates the fluid flow by analyzing the motion of injected particles. The problem is challenging as the particles lie at different depths but have similar appearances. Tracking a large number of moving particles is particularly difficult due to the heavy occlusion. In this paper, we present a PIV solution that uses a compact lenslet-based light field camera to track dense particles floating in the fluid and reconstruct the 3D fluid flow. We exploit the focal symmetry property in the light field focal stacks for recovering the depths of similar-looking particles. We further develop a motion-constrained optical flow estimation algorithm by enforcing the local motion rigidity and the Navier-Stoke fluid constraint. Finally, the estimated particle motion trajectory is used to visualize the 3D fluid flow. Comprehensive experiments on both synthetic and real data show that using a compact light field camera, our technique can recover dense and accurate 3D fluid flow.
|
| 14 |
+
|
| 15 |
+
Keywords: volumetric flow reconstruction, particle imaging velocimetry (PIV), light field imaging, focal stack
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Recovering time-varying volumetric 3D fluid flow is a challenging problem. Successful solutions can benefit applications in many science and engineering fields, including oceanology, geophysics, biology, mechanical and environmental engineering. In experimental fluid dynamics, a standard methodology for measuring fluid flow is called Particle Imaging Velocimetry (PIV) [1]: the fluid is seeded with tracer particles, whose motions are assumed to follow the fluid dynamics faithfully, then the particles are tracked over time and their motion trajectories in 3D are used to represent the fluid flows.
|
| 20 |
+
|
| 21 |
+
Although being highly accurate, existing PIV solutions usually require complex and expensive equipment and the setups end up bulky. For example, standard laser-based PIV methods [16, 6] use ultra high speed laser beam to illuminate particles in order to track their motions. One limitation of these method is
|
| 22 |
+
|
| 23 |
+
that the measured motion field only contains 2D in-plane movement restricted on the 2D fluid slice being scanned, as the laser beam can only scan one depth layer at a time. To fully characterize the fluid, it is necessary to recover the 3D flow motion within the entire fluid volume. Three-dimensional PIV such as tomographic PIV (Tomo-PIV) [9] use multiple cameras to capture the particles and resolve their depths in 3D using multi-view stereo. But such multi-camera systems need to be well calibrated and fully synchronized. More recently, the Rainbow PIV solutions [47, 46] use color to encode particles at different depths in order to recover the 3D fluid flow. However, this setup requires specialized illumination source with diffractive optics for color-encoding and the optical system needs to be precisely aligned.
|
| 24 |
+
|
| 25 |
+
In this paper, we present a flexible and low-cost 3D PIV solution that only uses one compact lenslet-based light field camera as the acquisition device. A light field camera, in essence, is a single-shot, multi-view imaging device [33]. The captured light field records 4D spatial and angular light rays scattered from the tracer particles. As commercial light field cameras (e.g. Lytro Illum and Raytrix R42) can capture high resolution light field, we are able to resolve dense particles in 3D fluid volume. Small baseline of the lenslet array further helps recover subtle particle motions at sub-pixel level. In particular, our method benefits from the post-capture refocusing capability of light field. We use the focal stack to establish correspondences among particles at different depths. To resolve heavily occluded particles, we exploit the focal stack symmetry (i.e., intensities are symmetric in the focal stack around the ground truth disparity [25, 41]) for accurate particle 3D reconstruction.
|
| 26 |
+
|
| 27 |
+
Given the 3D locations of particles at each time frame, we develop a physics-based optical flow estimation algorithm to recover the particle's 3D velocity field, which represents the 3D fluid flows. In particular, we introduce two new regularization terms to refine the classic variational optical flow [17]: 1) one-to-one particle correspondence term to maintain smooth and consistent flow motions across different time frames; and 2) divergence-free regularization term derived from the Navier-Stoke Equations to enforce the physical properties of incompressible fluid. These terms help resolve ambiguities in particle matching caused by similar appearances while enforcing the reconstruction to obey physical laws. Through synthetic and real experiments, we show that using a simple single camera setup, our approach outperforms state-of-the-art PIV solutions on recovering volumetric 3D fluid flows of various types.
|
| 28 |
+
|
| 29 |
+
# 2 Related Work
|
| 30 |
+
|
| 31 |
+
In computer vision and graphics, much effort has been made in modeling and recovering transparent objects or phenomena directly from images (e.g., fluid [32, 49], gas flows [20, 4, 28, 48], smoke [12, 14], and flames [13, 19], etc.). As these objects do not have their own appearances, often a known pattern is assumed and the light paths traveled through the transparent medium are estimated for 3D reconstruction. A comprehensive survey can be found in [18]. However, many
|
| 32 |
+
|
| 33 |
+
of these imaging techniques are designed to recover the 3D density field, which does not explicitly reveal the internal flow motion.
|
| 34 |
+
|
| 35 |
+
Our method, instead, aims at estimating the 3D flow motion in terms of velocity field. The measurement procedure is similar to the Particle Imaging Velocimetry (PIV) method that estimates flow motion from movement by injecting tracer particles. Traditional PIV [16, 6] recovers 2D velocity fields on thin fluid slices using high speed laser scanning. As 3D volumetric flow is critical to fully characterize the fluid behavior, recovering a 3D velocity field within the entire volume is of great interest.
|
| 36 |
+
|
| 37 |
+
To recover 3D velocity field of a dense set of particles, stereoscopic cameras [3, 35] are used to estimate the particle depth. Tomographic PIV (Tomo-PIV) [9, 36, 22] use multiple (usually three to six) cameras to determine 3D particle locations by space carving. Aguirre-Pablo et al. [2] perform Tomo-PIV using mobile devices. However, the accuracy of reconstruction is compromised due to the low resolution of mobile cameras. Other notable 3D PIV approaches include defocusing PIV [45, 21], Holographic PIV [50, 39], and synthetic aperture PIV [5, 31]. All these systems use an array of cameras for acquisition and each measurement requires elaborate calibration and synchronization. In contrast, our setup is more flexible by using a single compact light field camera. Recently proposed rainbow PIV [46, 47] use color-coded illumination to recover depth from a single camera. However, both the light source and camera are customized with special optical elements and only sparse set of particles can be resolved. Proof-of-concept simulations [27] and experiments [10] using compact light field or plenoptic cameras for PIV have been performed and showed efficacy. However, the depth estimation and particle tracking algorithms used in these early works are rather primitive and are not optimized according to light field properties. As result, the recovered particles are relatively sparse and the reconstruction accuracy is lower than traditional PIV. Shi et al. [37, 38] use ray tracing to estimate particle velocity with a light field camera, and conduct comparison with Tomo-PIV. In our approach, we exploit the focal stack symmetry [25] of light fields for more accurate depth reconstruction in presence of heavily occluded dense particles.
|
| 38 |
+
|
| 39 |
+
To recover the flow motion, standard PIV uses 3D cross-correlation to match local windows between neighboring time frames [9, 44]. Although many improvements (for instance, matching with adaptive window sizes [22]) have been made, the window-based solutions suffer problems at regions with few visible particles. Another class of methods directly track the path of individual particles over time [29, 36]. However, with increased particle density, tracking is challenging under occlusions. Heitz et al. [15] propose the application of variational optical flow to fluid flow estimation. Vedula et al. [43] extend the optical flow to dynamic environment and introduce the scene flow. Lv et al. [26] use a neural network to recover 3D scene flow. Unlike natural scenes that have diverse features, our PIV scenes only contain similar-looking particles. Therefore, existing optical flow or scene flow algorithms are not directly applicable to our problem. Some methods [47, 23] incorporate physical constraints such as the Stokes equation into optical flow framework to recover fluid flows that obey physical
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Fig. 1. Overall pipeline of our light field PIV 3D fluid flow reconstruction algorithm.
|
| 43 |
+
|
| 44 |
+
laws. However, these physics-based regularizations are in high-orders and are difficult to solve. In our approach, we introduce two novel regularization terms: 1) rigidity-enforced particle correspondence term and 2) divergence-free term to refine the basic variational optical flow framework for estimating the motion of dense particles.
|
| 45 |
+
|
| 46 |
+
# 3 Our Approach
|
| 47 |
+
|
| 48 |
+
Fig. 1 shows the algorithmic pipeline of volumetric 3D fluid flow reconstruction using light field PIV. For each time frame, we first detect particles in the light field sub-aperture images using the IDL particle detector [7]. We then estimate particle depths through a joint optimization that exploits light field properties. After we obtain 3D particle locations, we compare two consecutive frames to establish one-to-one particle correspondences and finally solve the 3D velocity field using a constrained optical flow.
|
| 49 |
+
|
| 50 |
+
# 3.1 3D Particle Reconstruction
|
| 51 |
+
|
| 52 |
+
We first describe our 3D particle reconstruction algorithm that exploits various properties of light field.
|
| 53 |
+
|
| 54 |
+
Focal Stack Symmetry. A focal stack is a sequence of images focused at different depth layers. Due to the post-capture refocusing capability, a focal stack can be synthesized from a light field by integrating captured light rays. Lin et al. [25] conduct symmetry analysis on focal stacks and show that non-occluding pixels in a focal stack exhibit symmetry along the focal dimension centered at the in-focus slice. In contrast, occluding boundary pixels exhibit local asymmetry as the outgoing rays are not originated from the same surface. Such property is called focal stack symmetry. As shown in Fig. 2, in a focal stack, a particle exhibits symmetric defocus effect centered at the in-focus slice. It's also worth noting that occluded particles could be seen in the focal stack as the occluder becomes extremely out-of-focus. Utilizing the focal stack symmetry helps resolve heavily occluded particles and hence enhances the accuracy and robustness of particle depth estimation.
|
| 55 |
+
|
| 56 |
+
Given a particle light field, we synthesize a focal stack from the sub-aperture images by integrating rays from the same focal slice. Each focus slice $f$ has a
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Fig. 2. Focal stack symmetry. We show zoom-in views of four focal slices on the right. A particle exhibits symmetric defocus effect (e.g., $31.5\mathrm{mm}$ and $36.5\mathrm{mm}$ slices) centered at the in-focus slice $(34\mathrm{mm})$ . In the $39\mathrm{mm}$ slice, an occluded particle could be seen as the occluder becomes extremely out-of-focus.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+
corresponding disparity $d$ that indicates the in-focus depth layer. Let $I(p,f)$ be the intensity of a pixel $p$ at focal slice $f$ . For symmetry analysis, we define an in-focus score $\kappa (p,f)$ a pixel $p$ at focal slice $f$ as:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\kappa (p, f) = \int_ {0} ^ {\delta_ {m a x}} \rho (I (p, f + \delta) - I (p, f - \delta)) d \delta \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\delta$ represents tiny disparity/focal shift and $\delta_{max}$ is maximum shift amount; $\rho (\nu) = 1 - e^{-|\nu |_2 / (2\sigma^2)}$ is a robust distance function with $\sigma$ controlling its sensitivity to noises. According to the focal stack symmetry, the intensity profile $I(p,f)$ is locally symmetric around the true surface depth. Therefore, if the pixel $p$ is in focus at its true depth sparsity $\hat{d}$ , $\kappa (p,\hat{d})$ should be 0. Hence given an estimated disparity $d$ at $p$ , the closer distance between $d$ and $\hat{d}$ , the smaller the $\kappa (p,\hat{d})$ . We then formulate the focal stack symmetry term $\beta_{fs}$ for particle depth estimation by summing up $\kappa (p,d)$ for all pixels in a focal slice $f$ with disparity $d$ :
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\beta_ {f s} (d) = \sum_ {p} \kappa (p, d) \tag {2}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
Color and Gradient Consistency. Besides the focal stack symmetry, we also consider the color and gradient data consistency across sub-aperture images for depth estimation using data terms similar to [25]. Specifically, by comparing each sub-aperture image with the center view, we define a cost metric $C(i,p,d)$ as:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
C (i, p, d) = \left| I _ {c} (\omega (p)) - I _ {i} (\omega (p + d (p) \chi (i))) \right| \tag {3}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where $i$ is the sub-aperture image index; $I_{c}$ and $I_{i}$ refers to the center view and sub-aperture image respectively; $\omega(p)$ refers to a small local window centered around pixel $p$ ; $d(p)$ is an estimate disparity at pixel $p$ ; and $\chi(i)$ is a scalar that scale the disparity $d(p)$ according to the relative position between $I_{c}$ and $I_{i}$ as $d(p)$ is the pixel-shift between neighboring sub-aperture images.
|
| 88 |
+
|
| 89 |
+
The cost metric $C$ measures the intensity similarity between shifted pixels in sub-aperture images given an estimated disparity. By summing up $C$ for all pixels, we obtain the sum of absolute differences (SAD) term for color consistency measurement:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\beta_ {s a d} (d) = \frac {1}{N} \sum_ {i \in N} \sum_ {p} C \tag {4}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $\mathrm{N}$ is the total number of sub-aperture images (excluding the center view).
|
| 96 |
+
|
| 97 |
+
Besides the color consistency, we also consider the consistency in gradient domain. We first take partial derivates of cost metric $C$ (Eq. 3) in both $x$ and $y$ directions: $D_x = \partial C / \partial x$ and $D_y = \partial C / \partial y$ and then formulate the following weighted sum of gradient differences (GRAD) for gradient consistency measurement:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\beta_ {g r a d} (d) = \frac {1}{N} \sum_ {i \in N} \sum_ {p} \mathcal {W} (i) \mathcal {D} _ {x} + (1 - \mathcal {W} (i)) \mathcal {D} _ {y} \tag {5}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
In Eq. 5, $\mathcal{W}(i)$ is a weighing factor that determines the contribution of horizontal gradient cost $(\mathcal{D}_x)$ according to the relative positions of the two sub-aperture images being compared. It is defined as $\mathcal{W}(i) = \frac{\Delta i_x}{\Delta i_x + \Delta i_y}$ , where $\Delta i_x$ and $\Delta i_y$ are the position differences between sub-aperture images along $x$ and $y$ directions. For example, $\mathcal{W}(i) = 1$ if the target view is located at the horizontal extent of the reference view. In this case, only the gradient costs in the $x$ direction are aggregated.
|
| 104 |
+
|
| 105 |
+
Particle Depth Estimation. Finally, combining Eq. 2, 4, and 5, we form the following energy function for optimizing the particle disparity $d$ :
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\beta (d) = \beta_ {f s} (d) + \lambda_ {s a d} \beta_ {s a d} (d) + \lambda_ {g r a d} \beta_ {g r a d} (d) \tag {6}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
In our experiments, the two weighting factors are set as $\lambda_{sad} = 0.8$ and $\lambda_{grad} = 0.9$ . We use the Levenberg-Marquardt (LM) optimization to solve Eq. 6. Finally, using the calibrated light field camera intrinsic parameters, we are able to convert the particle disparity map to 3D particle location. The pipeline of our 3D particle reconstruction algorithm is shown in Fig. 3.
|
| 112 |
+
|
| 113 |
+

|
| 114 |
+
Fig. 3. Our 3D particle reconstruction algorithm pipeline.
|
| 115 |
+
|
| 116 |
+
# 3.2 Fluid Flow Reconstruction
|
| 117 |
+
|
| 118 |
+
After we reconstruct 3D particles in each frame, we compare two consecutive frames to estimate the volumetric 3D fluid flow.
|
| 119 |
+
|
| 120 |
+
Given two sets of particle locations $S_{1}$ and $S_{2}$ recovered from consecutive frames, we first convert $S_{1}$ and $S_{2}$ into voxelized 3D volumes as occupancy probabilities $\Theta_{1}$ and $\Theta_{2}$ through linear interpolation. Our goal is to solve per-voxel 3D velocity vector $\mathbf{u} = [u,v,w]$ for the whole volume.
|
| 121 |
+
|
| 122 |
+
In particular, we solve this problem under the variational optical flow framework [17] and propose two novel regularization terms, the correspondence term and the divergence-free term, for improved accuracy and efficiency. Our overall energy function $E_{total}$ is a combination of regularization terms and is written as:
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
E _ {t o t a l} = E _ {d a t a} + \lambda_ {1} E _ {s m o o t h} + \lambda_ {2} E _ {c o r r e s} + \lambda_ {3} E _ {d i v} \tag {7}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $\lambda_1, \lambda_2$ , and $\lambda_3$ are term balancing factors. Please see our supplementary material for mathematical details of solving this energy function. In the following, we describe the algorithmic details of each regularization term.
|
| 129 |
+
|
| 130 |
+
Basic Optical Flow. The data term $E_{data}$ and smooth term $E_{smooth}$ are adopted from basic optical flow. They are derived from the brightness constancy assumption. $E_{data}$ enforces consistency between occupancy possibilities $\Theta_1$ and $\Theta_2$ at corresponding voxels and $E_{smooth}$ constrain the fluid motion to be piece-wise smooth. In our case, $E_{data}$ and $E_{smooth}$ can be written as:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
E _ {\text {d a t a}} (\mathbf {u}) = \int | | \Theta_ {2} (\mathbf {p} + \mathbf {u}) - \Theta_ {1} (\mathbf {p}) | | _ {2} ^ {2} d \mathbf {p} \tag {8}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
E _ {\text {s m o o t h}} (\mathbf {u}) = \left\| \nabla \cdot \mathbf {u} \right\| _ {2} ^ {2} \tag {9}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
where $\mathbf{p}$ refers to a voxel in fluid volume and $\nabla$ is the gradient operator.
|
| 141 |
+
|
| 142 |
+
Correspondence Term. We propose a novel correspondence term for more accurate flow estimation. Notice that $E_{data}$ in the basic optical flow only enforces voxel-level consistency while particle-to-particle correspondences are not guaranteed. We therefore develop a correspondence term $E_{corres}$ to enforce one-to-one particle matching. $E_{corres}$ helps improve matching accuracy especially in regions with high particle density.
|
| 143 |
+
|
| 144 |
+
Let's consider two sets of particles: $S_{1} = \{s_{1}|s_{1}\in \mathbb{R}^{3}\}$ as reference and $S_{2} = \{s_{2}|s_{2}\in \mathbb{R}^{3}\}$ as target. $E_{corres}$ enforces the one-to-one particle matching between the target and reference sets. To formulate $E_{corres}$ , we first estimate correspondences between particles in $S_{1}$ and $S_{2}$ . We solve this problem by estimating transformations that map particles in $S_{1}$ to $S_{2}$ .
|
| 145 |
+
|
| 146 |
+
In particular, we employ a deformable graph similar to [42] that considers local geometric similarity and rigidity. To build the graph, we uniformly sample a set of particles in $S_{1}$ and use them as graph nodes $\mathbf{G} = \{g_1, g_2, g_3, \dots, g_m\}$ . We
|
| 147 |
+
|
| 148 |
+
then aim to estimate a set of affine transformations $\mathbf{A} = \{A_i\}_{i=1}^m$ and $\mathbf{b} = \{b_i\}_{i=1}^m$ for each graph node. We then use these graph nodes as control points to deform particles in $S_1$ instead of computing transformations for individual particles. Given the graph node transformations $\mathbf{A}$ and $\mathbf{b}$ , we can transform every particle $s_1 \in S_1$ to its new location $s_1'$ using a weighted linear combination of graph nodes transformations:
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
s _ {1} ^ {\prime} = f (s _ {1}, \mathbf {A}, \mathbf {b}) = \sum_ {i = 1} ^ {m} \varpi_ {i} (s _ {1}) (\mathbf {A} (s _ {1} - g _ {i}) + g _ {i} + b _ {i}) \tag {10}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where the weight $\varpi_{i}(s_{1}) = \max (0,(1 - ||s_{1} - g_{i}||^{2} / R^{2})^{3})$ models a graph node $g_{i}$ influence on a particle $s_1\in S_1$ according to their Euclidean distance. This restricts the particle transformation to be only affected by nearby graph nodes. In our experiment, we consider the nearest four graph nodes and $R$ is the particle's distance to its nearest graph node.
|
| 155 |
+
|
| 156 |
+
To obtain the graph node transformations $\mathbf{A}$ and $\mathbf{b}$ , we solve an optimization problem with energy function:
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\bar {\Psi} _ {t o t a l} = \bar {\Psi} _ {d a t a} + \alpha_ {1} \bar {\Psi} _ {r i g i d} + \alpha_ {2} \bar {\Psi} _ {s m o o t h} \tag {11}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
$\varPsi_{data}$ is the data term aims to minimize particle-to-particle distances after transformation and is thus formulated as:
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
\varPsi_ {d a t a} = \sum_ {s _ {1} \in S _ {1}} | | s _ {1} ^ {\prime} - c _ {i} | | ^ {2} \tag {12}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
where $c_{i}$ is the closest point to $s_1^\prime$ in $S_{2}$
|
| 169 |
+
|
| 170 |
+
$\varPsi_{rigid}$ is a rigidity regularization term that enforces the local rigidity of affine transformation. $\varPsi_{rigid}$ can be written as:
|
| 171 |
+
|
| 172 |
+
$$
|
| 173 |
+
\varPsi_ {r i g i d} = \sum_ {\mathbf {G}} | | A _ {i} ^ {T} A _ {i} - \mathbb {I} | | _ {F} ^ {2} + (d e t (A _ {i}) - 1) ^ {2} \tag {13}
|
| 174 |
+
$$
|
| 175 |
+
|
| 176 |
+
where $\mathbb{I}$ is an identity matrix.
|
| 177 |
+
|
| 178 |
+
The last term $\varPsi_{smooth}$ enforces the spatial smoothness of nearby nodes and is written as:
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\varPsi_ {s m o o t h} = \sum_ {\mathbf {G}} \sum_ {k \in \Omega (i)} | | A _ {i} \left(g _ {k} - g _ {i}\right) + g _ {i} + b _ {i} - \left(g _ {k} + b _ {k}\right) | | ^ {2} \tag {14}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
where $\Omega (i)$ refers to the set of nearest four neighbors of $g_{i}$ .
|
| 185 |
+
|
| 186 |
+
The overall energy function $\varPsi_{total}$ can be optimized with an iterative GaussNewton algorithm and the affine transformations $\mathbf{A}$ and $\mathbf{b}$ are thus solved. In our experiment, we use $\alpha_{1} = 50$ and $\alpha_{2} = 10$ for Eq. 11.
|
| 187 |
+
|
| 188 |
+
By applying Eq. 11, we can transform every particle $s_1 \in S_1$ to it new location $s_1'$ using the graph nodes' transformations. We then find $S_1$ 's corresponding set $S_2^c$ in the target $S_2$ using a nearest neighbor search (ie, $s_2^c = \mathrm{nnsearch}(s_1', s_2)$ ).
|
| 189 |
+
|
| 190 |
+
After we establish the one-to-one correspondences between $S_{1}$ and $S_{2}$ , our correspondence term can be formulated based on the color consistency assumption as follow:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
E _ {c o r r e s} (\mathbf {u}, S _ {1}, S _ {2} ^ {c}) = \sum_ {s _ {1} \in S _ {1}, s _ {2} ^ {c} \in S _ {2} ^ {c}} | | s _ {2} ^ {c} - (s _ {1} + \mathbf {u} (s _ {1})) | | _ {2} ^ {2} \tag {15}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
We show the effectiveness of the correspondence term by comparing the velocity field obtained with vs. without $E_{\text{corres}}$ . The results are shown in Fig. 4. This comparison demonstrates that our correspondence term greatly improves matching accuracy and hence benefits flow reconstruction.
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
Fig. 4. Particle matching between source and target volumes with vs. without using the correspondence term $E_{\text{corres}}$ . In our plots, green lines indicate correct correspondences and red lines indicate incorrect ones.
|
| 200 |
+
|
| 201 |
+
Divergence-Free Term. To enforce the physical properties of incompressible fluid, we add a divergence-free regularization term $E_{div}$ to the optical flow framework. Based on the Navier-Stoke equations, fluid velocity $\mathbf{u}$ can be split into two distinct components: irrotational component $\nabla P$ and solenoidal component $\mathbf{u}_{sol} = [u_{sol}, v_{sol}, w_{sol}]$ with the Helmholtz decomposition. The Irrotational component $\nabla P$ is curl-free and is determined by the gradient of a scalar function $P$ (eg, pressure). The solenoidal component $\mathbf{u}_{sol}$ is divergence-free and models an incompressible flow. From the divergence-free property, we have:
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\nabla \cdot \mathbf {u} _ {s o l} = 0 \tag {16}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
where $\nabla = [\frac{\partial}{\partial x}, \frac{\partial}{\partial y}, \frac{\partial}{\partial z}]^T$ is the divergence operator. Since $\mathbf{u} = \mathbf{u}_{sol} + \nabla P$ , taking divergence on both sides, we have:
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\nabla \cdot \mathbf {u} = \nabla^ {2} P \tag {17}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
We solve Eq. 17 by Poisson integration and compute the scalar field as $P = (\nabla^2)^{-1}(\nabla \cdot \mathbf{u})$ . We then project $\mathbf{u}$ into the divergence-free vector field: $\mathbf{u}_{sol} = \mathbf{u} - \nabla P$ . Similar to [11], we formulate a divergence-free term $E_{div}$ that enforces the flow velocity field $\mathbf{u}$ close to its divergence-free component $\mathbf{u}_{sol}$ :
|
| 214 |
+
|
| 215 |
+
$$
|
| 216 |
+
E _ {d i v} (\mathbf {u}) = \left\| \mathbf {u} - \mathbf {u} _ {s o l} \right\| _ {2} ^ {2} \tag {18}
|
| 217 |
+
$$
|
| 218 |
+
|
| 219 |
+
# 4 Experimental Results
|
| 220 |
+
|
| 221 |
+
To evaluate our fluid flow reconstruction algorithm, we perform experiments on both synthetic and real data under the light field PIV setting. We also evaluate our method on the John Hopkins Turbulence Database (JHUTDB) [24,34] that has the ground truth fluid flow. All experiments are performed on a PC with Intel i7-4700K CPU with 16G of memory. On the computational time, the entire process takes about 2 minutes: 30 seconds for particle location estimation and 40 seconds for correspondence matching, and 50 seconds for velocity field reconstruction.
|
| 222 |
+
|
| 223 |
+
# 4.1 Synthetic Data
|
| 224 |
+
|
| 225 |
+
We first evaluate our proposed approach on simulated flows: a vortex flow and a drop flow. The flows are simulated within a volume of $100 \times 100 \times 20$ voxels. We randomly sample tracer particles within the fluid volume. The particle density is 0.02 per voxel. We render light fields images with angular resolution $7 \times 7$ and spatial resolution $434 \times 625$ . We simulate the advection of particles over time following the method in [40]. We apply our algorithms on the rendered light fields to recover 3D fluid flows. In Fig. 5, we show our recovered velocity fields in comparison with the ground truth ones. Qualitatively, our reconstructed vector fields are highly consistent with the ground truth ones.
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
Fig. 5. Synthetic results in comparison with the ground truth.
|
| 229 |
+
|
| 230 |
+
We perform quantitative evaluations using two error metrics: the average end-point error (AEE) and the average angular error (AAE). AEE is computed as the averaged Euclidean distance between the estimated particle positions and ground truth ones. AAE is computed with the average difference of vector in the velocity field. We compare our method with the multi-scale Horn-Schunck (H & S) [30] and the rainbow PIV [47]. Specifically, we apply H & S on our recovered 3D particles and use it as the baseline algorithm for flow estimation. With this comparison, we hope to demonstrate the effectiveness of our regularization terms in flow estimation. For rainbow PIV, we have implemented a renderer to generate depth-dependent spectral images of virtual particles. To ensure fairness, the
|
| 231 |
+
|
| 232 |
+
rendered images have the same spatial resolution as our input light field (ie, $434 \times 625$ ).
|
| 233 |
+
|
| 234 |
+
We also perform ablation study by testing two variants of our method: "w/o $E_{corres}$ " that takes out the correspondence term and "w/o $E_{div}$ " that takes out the divergence-free term. The experiments are performed on the vortex flow with particle density 0.02. Quantitative evaluations are shown in Fig. 6. The error maps of recovered velocity fields for our ablation study are shown in Fig. 7. We can see that our method achieves the best performance when both regularization terms are imposed. Our outperforms both H & S and the rainbow PIV at various particle density levels. Further, our accumulated error over time grows much slower than the other two state-of-the-arts.
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
Fig. 6. Quantitative evaluation. The left two plots show errors with respect to different particle densities. The right two plots show accumulated errors over time.
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Fig. 7. Ablation study. We show the error maps of estimated velocity field at three fluid volume slices.
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
|
| 252 |
+

|
| 253 |
+
|
| 254 |
+
# 4.2 John Hopkins Turbulence Database (JHUTDB)
|
| 255 |
+
|
| 256 |
+
Next we conduct experiments on data generated from the Johns Hopkins Turbulence Database (JHUTDB) [24]. To reduce processing time, we crop out a volume of $256 \times 128 \times 80$ voxels for each turbulence in the dataset. The norm of the velocity field at each location ranges from 0 to 2.7 voxels per time step. We generate random tracer particles with density 0.025 per voxels and advect the particles according to the turbulence velocity field. In our evaluation, we render
|
| 257 |
+
|
| 258 |
+
two light field images at two consecutive frames to estimate the particle locations and reconstruct the velocity field. Our reconstruction results in comparison with the ground truth is shown in Fig. 8. We show our reconstructed velocity volume in $x, y, z$ directions. We also show the error map of magnitudes to illustrate that our method is highly accurate.
|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
Fig. 8. JHUTDB velocity field reconstruction results.
|
| 262 |
+
|
| 263 |
+
# 4.3 Real Data
|
| 264 |
+
|
| 265 |
+
We finally test our method on real captured flow data. Fig. 9 shows our acquisition system for capturing real 3D flows. We use a Lytro Illum light field camera with $30\mathrm{mm}$ focal length to capture the tracer particles in fluid. As Illum does not have video mode, we use an external control board to trigger the camera at high frequency to capture consecutive time frames. Due to the limitation of on-chip image buffer size, our acquisition cannot achieve very high frame rate. In our experiment, we set the trigger frequency to be $10\mathrm{Hz}$ . The capture light field has angular resolution $15\times 15$ and spatial resolution $625\times 434$ . We use
|
| 266 |
+
|
| 267 |
+
the light field calibration toolbox [8] to process and decode raw light field data into sub-aperture images. We use the center view as reference for depth estimation and the effective depth volume that we are able to reconstruct is around
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
Fig. 9. Our real experiment setup. We use a compact light field camera in PIV setting.
|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
Fig. 10. Real experiment results. We show our recovered velocity fields (upper row) and path line visualizations on four consecutive frames (lower row) for three types of flows: vortex, double vortex and a random complex flow.
|
| 274 |
+
|
| 275 |
+
$600 \times 500 \times 200 \, (\mathrm{mm})$ , slightly lower than the capture image because we enforce rectangular volumes inside the perspective view frustum.
|
| 276 |
+
|
| 277 |
+
We use green polyethylene microspheres with density $1\mathrm{g / cc}$ and size 1000- $1180~\mu m$ as tracer particles. Before dispersing the particles, we mix some surfactant with the particles to reduce surface tension caused by water in order to minimize agglomeration between particles. We test on three types of flows: vortex, double vortex, and random complex flows.
|
| 278 |
+
|
| 279 |
+
Fig. 10 shows our recovered fluid flow velocity field and path line visualization (please refer to the supplemental material for more reconstruction results). We show three flow types, vortex, double vortex, and random complex flows. The left column shows the velocity field between first and the second frame. The right column shows the path line visualization through 1 - 4 frames. We can see that our reconstructions well depicts the intended fluid motions and are highly reliable.
|
| 280 |
+
|
| 281 |
+
We also compare our method with a recent state-of-the-art scene flow method [26] on the real data. The scene flow method takes two consecutive RGB-D images as inputs and use rigidity transform network and flow network for motion estimation. Since the method also needs depth map as input, we first calculate a depth map for the center view of light field and then combine the depth map with the sub-aperture color image and use them as input for [26]. The flow estimation results are shown in Fig. 11. We show the projected scene flows and the flow vector fields for three types of flows (single vortex, double vortex, and random flow). The scene flow method fails to recover the flow structures, especially for vortex flows. This is because our particles are heavily occluded and have very similar appearances. Further, the scene flow algorithm does not take the physical properties of fluid into consideration.
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
Fig. 11. Comparison result with scene flow (Lv et al. [26]) on the real data. We compare the project scene flow and the flow vector field on three types of flows.
|
| 285 |
+
|
| 286 |
+
# 5 Conclusions
|
| 287 |
+
|
| 288 |
+
In this paper, we have presented a light field PIV solution that uses a commercial compact light field camera to recover volumetric 3D fluid motion from tracer particles. We have developed a 3D particle reconstruction algorithm by exploiting the light field focal stack symmetry in order to handle heavily occluded particles. To recover the fluid flow, we have refined the classical optical flow framework by introducing two novel regularization terms: 1) the correspondence term to enforce one-to-one particle matching; and 2) the divergence-free term to enforce the physical properties of incompressible fluid. Comprehensive synthetic and real experiments as well as comparisons with the state-of-the-arts have demonstrated the effectiveness of our method.
|
| 289 |
+
|
| 290 |
+
Although our method can faithfully recover fluid flows in a small to medium volume, our method still has several limitations. First of all, due to the small baseline of compact light field camera, the resolvable depth range is rather limited. As a result, our volumetric velocity field's resolution along the z-axis is much smaller than its x- or y-resolutions. One way to enhance the z-resolution is using a second light field camera capturing the fluid volume from an orthogonal angle. Second, in our fluid flow reconstruction step, only two consecutive frames are considered. Hence motion continuity might not always be satisfied. Adding temporal constraint to our optimization framework can be further improved.
|
| 291 |
+
|
| 292 |
+
# Acknowledgements
|
| 293 |
+
|
| 294 |
+
This work is partially supported by the National Science Foundation (NSF) under Grant CBET-1706130 and CRII-1948524, and the Louisiana Board of Regent under Grant LEQSF(2018-21)-RD-A-10.
|
| 295 |
+
|
| 296 |
+
# References
|
| 297 |
+
|
| 298 |
+
1. Adrian, R.J., Westerweel, J.: Particle image velocimetry. No. 30, Cambridge University Press (2011)
|
| 299 |
+
2. Aguirre-Pablo, A.A., Alarfaj, M.K., Li, E.Q., Hernández-Sánchez, J.F., Thoroddsen, S.T.: Tomographic particle image velocimetry using smartphones and colored shadows. In: Scientific Reports (2017)
|
| 300 |
+
3. Arroyo, M., Greated, C.: Stereoscopic particle image velocimetry. Measurement science and technology 2(12), 1181 (1991)
|
| 301 |
+
4. Atcheson, B., Ihrke, I., Heidrich, W., Tevs, A., Bradley, D., Magnor, M., Seidel, H.P.: Time-resolved 3d capture of non-stationary gas flows. In: ACM transactions on graphics (TOG). vol. 27, p. 132. ACM (2008)
|
| 302 |
+
5. Belden, J., Truscott, T.T., Axiak, M.C., Techet, A.H.: Three-dimensional synthetic aperture particle image velocimetry. Measurement Science and Technology 21(12), 125403 (2010)
|
| 303 |
+
6. Brücker, C.: 3d scanning pv applied to an air flow in a motored engine using digital high-speed video. Measurement Science and Technology 8(12), 1480 (1997)
|
| 304 |
+
7. Crocker, J.C., Grier, D.G.: Methods of digital video microscopy for colloidal studies. Journal of colloid and interface science 179(1), 298-310 (1996)
|
| 305 |
+
8. Dansereau, D.G., Pizarro, O., Williams, S.B.: Decoding, calibration and rectification for lenselet-based plenoptic cameras. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1027-1034 (2013)
|
| 306 |
+
9. Elsinga, G.E., Scarano, F., Wieneke, B., van Oudheusden, B.W.: Tomographic particle image velocimetry. Experiments in fluids 41(6), 933-947 (2006)
|
| 307 |
+
10. Fahringer, T., Thurow, B.: Tomographic reconstruction of a 3-d flow field using a plenoptic camera. In: 42nd AIAA Fluid Dynamics Conference and Exhibit. p. 2826 (2012)
|
| 308 |
+
1. Gregson, J., Ihrke, I., Thuerey, N., Heidrich, W.: From capture to simulation: connecting forward and inverse problems in fluids. ACM Transactions on Graphics (TOG) 33(4), 139 (2014)
|
| 309 |
+
2. Gu, J., Nayar, S.K., Grinspun, E., Belhumeur, P.N., Ramamoorthi, R.: Compressive structured light for recovering inhomogeneous participating media. IEEE Transactions on Pattern Analysis and Machine Intelligence 35, 1-1 (2013)
|
| 310 |
+
3. Hasinoff, S.W., Kutulakos, K.N.: Photo-consistent reconstruction of semitransparent scenes by density-sheet decomposition. IEEE Transactions on Pattern Analysis and Machine Intelligence 29, 870-885 (2007)
|
| 311 |
+
4. Hawkins, T., Einarsson, P., Debevec, P.: Acquisition of time-varying participating media. In: ACM Transactions on Graphics (ToG). vol. 24, pp. 812-815. ACM (2005)
|
| 312 |
+
5. Heitz, D., Mémin, E., Schnörr, C.: Variational fluid flow measurements from image sequences: synopsis and perspectives. Experiments in fluids 48(3), 369-393 (2010)
|
| 313 |
+
6. Hori, T., Sakakibara, J.: High-speed scanning stereoscopic piv for 3d vorticity measurement in liquids. Measurement Science and Technology 15(6), 1067 (2004)
|
| 314 |
+
7. Horn, B.K., Schunck, B.G.: Determining optical flow. Artificial intelligence 17(1-3), 185-203 (1981)
|
| 315 |
+
8. Ihrke, I., Kutulakos, K.N., Lensch, H.P., Magnor, M., Heidrich, W.: Transparent and specular object reconstruction. In: Computer Graphics Forum. vol. 29, pp. 2400-2426. Wiley Online Library (2010)
|
| 316 |
+
9. Ihrke, I., Magnor, M.A.: Image-based tomographic reconstruction of flames. In: Symposium on Computer Animation (2004)
|
| 317 |
+
|
| 318 |
+
20. Ji, Y., Ye, J., Yu, J.: Reconstructing gas flows using light-path approximation. 2013 IEEE Conference on Computer Vision and Pattern Recognition pp. 2507-2514 (2013)
|
| 319 |
+
21. Kajitani, L., Dabiri, D.: A full three-dimensional characterization of defocusing digital particle image velocimetry. Measurement Science and Technology 16(3), 790 (2005)
|
| 320 |
+
22. Lasinger, K., Vogel, C., Schindler, K.: Volumetric flow estimation for incompressible fluids using the stationary stokes equations. In: 2017 IEEE International Conference on Computer Vision (ICCV). pp. 2584-2592. IEEE (2017)
|
| 321 |
+
23. Lasinger, K., Vogel, C., Schindler, K.: Volumetric flow estimation for incompressible fluids using the stationary stokes equations. 2017 IEEE International Conference on Computer Vision (ICCV) pp. 2584-2592 (2017)
|
| 322 |
+
24. Li, Y., Perlman, E., Wan, M., Yang, Y., Meneveau, C., Burns, R., Chen, S., Szalay, A., Eyink, G.: A public turbulence database cluster and applications to study lagrangian evolution of velocity increments in turbulence. Journal of Turbulence (9), N31 (2008)
|
| 323 |
+
25. Lin, H., Chen, C., Bing Kang, S., Yu, J.: Depth recovery from light field using focal stack symmetry. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3451-3459 (2015)
|
| 324 |
+
26. Lv, Z., Kim, K., Troccoli, A., Sun, D., Rehg, J.M., Kautz, J.: Learning rigidity in dynamic scenes with a moving camera for 3d motion field estimation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 468-484 (2018)
|
| 325 |
+
27. Lynch, K., Fahringer, T., Thurow, B.: Three-dimensional particle image velocimetry using a plenoptic camera. In: 50th AIAA Aerospace Sciences Meeting including the New Horizons Forum and Aerospace Exposition. p. 1056 (2012)
|
| 326 |
+
28. Ma, C., Lin, X., Suo, J., Dai, Q., Wetzstein, G.: Transparent object reconstruction via coded transport of intensity. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3238-3245 (2014)
|
| 327 |
+
29. Maas, H., Gruen, A., Papantoniou, D.: Particle tracking velocimetry in three-dimensional flows. Experiments in Fluids 15(2), 133-146 (1993)
|
| 328 |
+
30. Meinhardt, E., Pérez, J.S., Kondermann, D.: Horn-schunck optical flow with a multi-scale strategy. IPOL Journal 3, 151-172 (2013)
|
| 329 |
+
31. Mendelson, L., Techet, A.H.: Quantitative wake analysis of a freely swimming fish using 3d synthetic aperture piv. Experiments in Fluids 56(7), 135 (2015)
|
| 330 |
+
32. Morris, N.J., Kutulakos, K.N.: Dynamic refraction stereo. IEEE transactions on pattern analysis and machine intelligence 33(8), 1518-1531 (2011)
|
| 331 |
+
33. Ng, R., Levoy, M., Brédif, M., Duval, G., Horowitz, M., Hanrahan, P., et al.: Light field photography with a hand-held plenoptic camera. Computer Science Technical Report CSTR 2(11), 1-11 (2005)
|
| 332 |
+
34. Perlman, E., Burns, R., Li, Y., Meneveau, C.: Data exploration of turbulence simulations using a database cluster. In: Proceedings of the 2007 ACM/IEEE conference on Supercomputing. p. 23. ACM (2007)
|
| 333 |
+
35. Pick, S., Lehmann, F.O.: Stereoscopic pav on multiple color-coded light sheets and its application to axial flow in flapping robotic insect wings. Experiments in fluids 47(6), 1009 (2009)
|
| 334 |
+
36. Schanz, D., Gesemann, S., Schröder, A.: Shake-the-box: Lagrangian particle tracking at high particle image densities. Experiments in fluids 57(5), 70 (2016)
|
| 335 |
+
37. Shi, S., Ding, J., Atkinson, C., Soria, J., New, T.H.: A detailed comparison of single-camera light-field piv and tomographic piv. Experiments in Fluids 59, 1-13 (2018)
|
| 336 |
+
|
| 337 |
+
38. Shi, S., Ding, J., New, T.H., Soria, J.: Light-field camera-based 3d volumetric particle image velocimetry with dense ray tracing reconstruction technique. Experiments in Fluids 58, 1-16 (2017)
|
| 338 |
+
39. Soria, J., Atkinson, C.: Towards 3c-3d digital holographic fluid velocity vector field measurement?tomographic digital holographic pav (tomo-hpv). Measurement science and technology 19(7), 074002 (2008)
|
| 339 |
+
40. Stam, J.: Stable fluids. In: Proceedings of the 26th annual conference on Computer graphics and interactive techniques. pp. 121-128. ACM Press/Addison-Wesley Publishing Co. (1999)
|
| 340 |
+
41. Strecke, M., Alperovich, A., Goldluecke, B.: Accurate depth and normal maps from occlusion-aware focal stack symmetry. In: Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on. pp. 2529-2537. IEEE (2017)
|
| 341 |
+
42. Sumner, R.W., Schmid, J., Pauly, M.: Embedded deformation for shape manipulation. In: ACM Transactions on Graphics (TOG). vol. 26, p. 80. ACM (2007)
|
| 342 |
+
43. Vedula, S., Baker, S., Rander, P., Collins, R.T., Kanade, T.: Three-dimensional scene flow. Proceedings of the Seventh IEEE International Conference on Computer Vision 2, 722-729 vol.2 (1999)
|
| 343 |
+
44. Wieneke, B.: Volume self-calibration for 3d particle image velocimetry. Experiments in fluids 45(4), 549-556 (2008)
|
| 344 |
+
45. Willert, C., Gharib, M.: Three-dimensional particle imaging with a single camera. Experiments in Fluids 12(6), 353-358 (1992)
|
| 345 |
+
46. Xiong, J., Fu, Q., Idoughi, R., Heidrich, W.: Reconfigurable rainbow piv for 3d flow measurement. In: Computational Photography (ICCP), 2018 IEEE International Conference on. pp. 1-9. IEEE (2018)
|
| 346 |
+
47. Xiong, J., Idoughi, R., Aguirre-Pablo, A.A., Aljedaani, A.B., Dun, X., Fu, Q., Thoroddsen, S.T., Heidrich, W.: Rainbow particle imaging velocimetry for dense 3d fluid velocity imaging. ACM Transactions on Graphics (TOG) 36(4), 36 (2017)
|
| 347 |
+
48. Xue, T., Rubinstein, M., Wadhwa, N., Levin, A., Durand, F., Freeman, W.T.: Refraction wiggles for measuring fluid depth and velocity from video. In: European Conference on Computer Vision. pp. 767-782. Springer (2014)
|
| 348 |
+
49. Ye, J., Ji, Y., Li, F., Yu, J.: Angular domain reconstruction of dynamic 3d fluid surfaces. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition. pp. 310-317. IEEE (2012)
|
| 349 |
+
50. Zhang, J., Tao, B., Katz, J.: Turbulent flow measurement in a square duct with hybrid holographic piv. Experiments in Fluids 23(5), 373-381 (1997)
|
3dfluidflowreconstructionusingcompactlightfieldpiv/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:743a86e01800e714bd465cdf8eed5209815a05f2f862ea7bf301de86a8ec3a23
|
| 3 |
+
size 582569
|
3dfluidflowreconstructionusingcompactlightfieldpiv/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b9795fa416504c3fdead2d499f0efec4b564082b4329d57e52da9e6af24c45a
|
| 3 |
+
size 477214
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:db69f9662a31c34a6f3bdd9343d772d8673467b4d8958313854b83c16b10d3b8
|
| 3 |
+
size 79868
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d8d56267af9cdd4e0a90fa8ad8fd1fce4cf328e27f196371f98dbc763df465f
|
| 3 |
+
size 97508
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/693358b7-35b3-44f8-9c22-c9099f946cfd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0142150b8ed4b5c13aeaa0aa8dec3ee03c215e7df3eef91e21335d325091275e
|
| 3 |
+
size 1549728
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/full.md
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Human Shape and Pose from a Single Low-Resolution Image with Self-Supervised Learning
|
| 2 |
+
|
| 3 |
+
Xiangyu Xu $^{1}$ , Hao Chen $^{2}$ , Francesc Moreno-Noguer $^{3}$ , László A. Jeni $^{1}$ , and Fernando De la Torre $^{1,4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Robotics Institute, Carnegie Mellon University, Pittsburgh, USA
|
| 6 |
+
$^{2}$ Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, USA
|
| 7 |
+
$^{3}$ Institut de Robòtica i Informàtica Industrial (CSIC-UPC), Barcelona, Spain
|
| 8 |
+
$^{4}$ Facebook Reality Labs (Oculus), Pittsburgh, USA
|
| 9 |
+
|
| 10 |
+
Abstract. 3D human shape and pose estimation from monocular images has been an active area of research in computer vision, having a substantial impact on the development of new applications, from activity recognition to creating virtual avatars. Existing deep learning methods for 3D human shape and pose estimation rely on relatively high-resolution input images; however, high-resolution visual content is not always available in several practical scenarios such as video surveillance and sports broadcasting. Low-resolution images in real scenarios can vary in a wide range of sizes, and a model trained in one resolution does not typically degrade gracefully across resolutions. Two common approaches to solve the problem of low-resolution input are applying super-resolution techniques to the input images which may result in visual artifacts, or simply training one model for each resolution, which is impractical in many realistic applications.
|
| 11 |
+
|
| 12 |
+
To address the above issues, this paper proposes a novel algorithm called RSC-Net, which consists of a Resolution-aware network, a Self-supervision loss, and a Contrastive learning scheme. The proposed network is able to learn the 3D body shape and pose across different resolutions with a single model. The self-supervision loss encourages scale-consistency of the output, and the contrastive learning scheme enforces scale-consistency of the deep features. We show that both these new training losses provide robustness when learning 3D shape and pose in a weakly-supervised manner. Extensive experiments demonstrate that the RSC-Net can achieve consistently better results than the state-of-the-art methods for challenging low-resolution images.
|
| 13 |
+
|
| 14 |
+
Keywords: 3D human shape and pose, low-resolution, neural network, self-supervised learning, contrastive learning.
|
| 15 |
+
|
| 16 |
+
# 1 Introduction
|
| 17 |
+
|
| 18 |
+
3D human shape and pose estimation from 2D images is of great interest to the computer vision and graphics community. Whereas significant progress has been
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Surveillance camera
|
| 22 |
+
Fig. 1. 3D human shape and pose estimation from a low-resolution image captured from a real surveillance video. SOTA method [25] that works well for high-resolution images performs poorly at low-resolution ones.
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
Input image
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
SOTA
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
RSC-Net
|
| 32 |
+
|
| 33 |
+
made in this field, it is often assumed that the input image is high-resolution and contains sufficient information for reconstructing the 3D human geometry in detail [1, 2, 6, 21, 22, 24, 25, 34, 40, 41, 42, 52]. However, this assumption does not always hold in practice, since lots of images in real scenes have low resolutions, such as surveillance cameras and sports videos [48, 46, 36, 35, 47, 38]. As a result, existing algorithms designed for high-resolution images are prone to fail when applied to low-resolution inputs as shown in Figure 1. In this paper, we study the relatively unexplored problem of estimating 3D human shape and pose from low-resolution images.
|
| 34 |
+
|
| 35 |
+
There are two major challenges of this low-resolution 3D estimation problem. First, the resolutions of the input images in real scenarios vary in a wide range, and a network trained for one specific resolution does not always work well for another. One might consider overcoming this problem by simply training different models, one for each image resolution. However, this is impractical in terms of memory and training computation. Alternatively, one could superresolve the images to a sufficiently large resolution, but the super-resolution step often results in visual artifacts, which leads to poor 3D estimation. To address this issue, we propose a resolution-aware deep neural network for 3D human shape and pose estimation that is robust to different image resolutions. Our network builds upon two main components: a feature extractor shared across different resolutions and a set of resolution-dependent parameters to adaptively integrate the different-level features.
|
| 36 |
+
|
| 37 |
+
Another challenge we encounter is due to the fact that high-quality 3D annotations are hard to obtain, especially for in-the-wild data, and only a small portion of the training images have 3D ground truth labels [21, 25], which complicates the training process. Whereas most training images have 2D keypoint labels, they are usually not sufficient for predicting the 3D outputs due to the inherent ambiguities in the 2D-to-3D mapping. This problem is further accentuated in our task, as the low-resolution 3D estimation is not well constrained and has a large solution space due to limited pixel observations. Therefore, directly training low-resolution models with incomplete information typically does not
|
| 38 |
+
|
| 39 |
+
achieve good results. Inspired by the self-supervised learning [26, 44], we propose a directional self-supervision loss to remedy the above issue. Specifically, we enforce the consistency across the outputs of the same input image with different resolutions, such that the results of the higher-resolution images can act as guidance for lower-resolution input. This strategy significantly improves the 3D estimation results.
|
| 40 |
+
|
| 41 |
+
In addition to enforcing output consistency, we also devise an approach to enforce consistency of the feature representations across different resolutions. Nevertheless, we find that the commonly used mean squared error is not effective in measuring discrepancies between high-dimensional feature vectors. Instead, we adapt the contrastive learning [39, 14, 7] which aims to maximize the mutual information across the feature representations at different resolutions, and encourages the network to produce better features for the low-resolution input.
|
| 42 |
+
|
| 43 |
+
To summarize, we make the following contributions in this work. First, we study the relatively unexplored problem of 3D human shape and pose estimation from low-resolution images and present a simple yet effective solution for it, called RSC-Net, which is based on a novel resolution-aware network that can handle arbitrary-resolution input with one single model. Second, we propose a self-supervision loss to address the issue of weak supervision. Furthermore, we introduce contrastive learning which effectively enforces the feature consistency across different resolutions. Extensive experiments demonstrate that the proposed method outperforms the state-of-the-art algorithms on challenging low-resolution inputs and achieves robust performance for high-quality 3D human shape and pose estimation.
|
| 44 |
+
|
| 45 |
+
# 2 Related Work
|
| 46 |
+
|
| 47 |
+
We first review the state-of-the-art methods for 3D human shape and pose estimation and then discuss the low-resolution image recognition algorithms.
|
| 48 |
+
|
| 49 |
+
3D human shape and pose estimation. Recent years have witnessed significant progress in the field of 3D human shape and pose estimation from a single image [1, 2, 3, 6, 9, 21, 22, 24, 34, 40, 41, 42, 52, 50, 49]. Existing methods for this task can be broadly categorized into two classes. The first kind of approaches generally splits the 3D human estimation process into two stages: first transforming the input image into new representations, such as human 2D keypoints [6, 40, 34, 2, 1, 9], human silhouettes [40, 2, 34], body part segmentations [1], UV mappings [3], and optical flow [9], and then regressing the 3D human parameters [29] from the transformed outputs of the last stage either with iterative optimization [6, 2] or neural networks [40, 1, 9, 34]. As these methods map the original input images into simpler representation forms which are generally sparse and can be easily rendered, they can exploit a large amount of synthetic data for training where there are sufficient high-quality 3D labels. However, these two-stage systems are error-prone, as the errors from early stage may be accumulated or even deteriorated [21]. In addition, the intermediate results may throw away valuable information in the image such as context.
|
| 50 |
+
|
| 51 |
+
More importantly, the task of the first stage, i.e., to estimate the intermediate representations, is usually difficult for low-resolution images, and thereby, the aforementioned two-stage models are not suitable to solve our problem of low-resolution 3D human shape and pose estimation.
|
| 52 |
+
|
| 53 |
+
Without relying on new representations, the second kind of approaches can directly regress the 3D parameters from the input image [21, 22, 25, 42, 24, 41, 50], where most methods are based on deep neural networks. While being concise and not requiring the estimation of intermediate results, these methods usually suffer from the problem of weak supervision due to a lack of high-quality 3D ground truth. Most existing works focus on this problem and have developed different techniques to solve it. As a typical example, Kanazawa et al. [21] include a generative adversarial network (GAN) [11] to constrain the solution space using the prior learned from 3D human data. However, we find the GAN-based algorithm less effective for low-resolution input images where substantially fewer pixels are available. Kolotouros [25] et al. integrate the optimization-based method [6] into the training process of the deep network to more effectively exploit the 2D keypoints. While achieving good improvements over [21] on high-resolution images, [25] cannot be easily applied to low-resolution input, as the low-resolution network cannot provide good initial results to start the optimization loop. In addition, it significantly increases the training time. On the other hand, temporal information has also been exploited to enforce temporal consistency of the 3D estimation results, which however requires high-resolution video input [22, 50, 24]. Different from the above methods, we propose a 3D human shape and pose estimation algorithm using a single low-resolution image as input. We propose self-supervision loss and contrastive feature loss which effectively remedy the problem of insufficient 3D supervision.
|
| 54 |
+
|
| 55 |
+
Low-resolution image recognition. While there is no prior work for low-resolution 3D human shape and pose estimation, there are some related approaches to process low-resolution inputs for other image recognition tasks, such as 2D body pose estimation [35], face recognition [10, 8, 48], image classification [46], image retrieval [43, 37], and object detection [12, 27]. Most of these methods address the low-resolution issue by enhancing the degraded input, in either the image space [12, 8, 46] or the feature space [10, 43, 27, 37]. One typical image-space method [12] applies a super-resolution network which is trained to improve both the image quality (i.e., per-pixel similarity such as PSNR) and the object detection performance. However, the loss functions for higher PSNR and better recognition performance do not always agree with each other, which may lead to inferior solutions. Moreover, the super-resolution model may bring unpleasant artifacts, resulting in domain gap between the super-resolved and real high-resolution images. Unlike the image enhancement based approaches, the feature enhancement based methods [10, 43, 27, 37] are not distracted by the image quality loss and thus can better focus on improving the recognition performance. As a representative example, Ge et al. [10] use mean squared error (MSE) to enforce the similarity between the features of low-resolution and high-resolution images, which achieves good results for face recognition. Differ-
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Fig. 2. Overview of the proposed algorithm. The resolution-aware network $f_{\mathrm{RA}}$ is trained with a combination of the basic loss (omitted in the figure for simplicity), self-supervision loss and contrastive feature loss. The modules with the same colors are shared across different resolutions, while the matrix $\alpha$ is resolution-dependent. Note that we resize the different resolution inputs $\{x_i\}$ to $224 \times 224$ with bicubic interpolation before feeding them into the network.
|
| 59 |
+
|
| 60 |
+
ent from the above approaches, Neumann et al. [35] propose a novel method for low-resolution 2D body pose estimation by predicting a probability map with Gaussian Mixture Model, which, however, cannot be easily extended to 3D human shape and pose estimation. In this work, we apply the feature enhancement strategy to low-resolution 3D human shape and pose estimation. Instead of using MSE for measuring feature similarity, we introduce the contrastive learning [39] which can more effectively maximize the mutual information across the features of different resolutions. In addition, we handle different-resolution input with a resolution-aware neural network.
|
| 61 |
+
|
| 62 |
+
# 3 Algorithm
|
| 63 |
+
|
| 64 |
+
We study the problem of 3D human shape and pose estimation for a low-resolution image $x$ . Instead of training different networks for each specific resolution, we propose a resolution-aware neural network $f_{\mathrm{RA}}$ which can handle the complex inputs with different resolutions. We first introduce the 3D human representation model and the baseline network for 3D human estimation with a single 2D image. Then we describe the proposed resolution-aware model as well as the self-supervision loss and the contrastive learning strategy for training the network. An overview of our method is shown in Figure 2.
|
| 65 |
+
|
| 66 |
+
# 3.1 3D Human Representation
|
| 67 |
+
|
| 68 |
+
We represent the 3D human body using the Skinned Multi-Person Linear (SMPL) model [29]. The SMPL is a parametric model which describes the body shape and pose with two sets of parameters $\beta$ and $\theta$ , respectively. The body shape is represented by a basis in a low-dimensional shape space learned from a training set of 3D human scans, and the parameters $\beta \in \mathbb{R}^{10}$ are coefficients of the basis
|
| 69 |
+
|
| 70 |
+
vectors. The body pose is defined by a skeleton rig with $K = 24$ joints including the body root, and the pose parameters $\theta \in \mathbb{R}^{3K}$ are the axis-angle representations of the relative rotation between different body parts as well as the global rotation of the body root. With $\beta$ and $\theta$ , we can obtain the 3D body mesh: $M = f_{\mathrm{SMPL}}(\beta, \theta)$ , where $M \in \mathbb{R}^{N \times 3}$ is a triangulated surface with $N = 6890$ vertices.
|
| 71 |
+
|
| 72 |
+
Similar to the prior works [21, 25], we can predict the 3D locations of the body joints $X$ with the body mesh using a pretrained mapping matrix $W \in \mathbb{R}^{K \times N}$ :
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
X \in \mathbb {R} ^ {K \times 3} = W M. \tag {1}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
With the 3D human joints, we use a perspective camera model to project the body joints from 3D to 2D. Assuming the camera parameters are $\delta \in \mathbb{R}^3$ which define the 3D translation of the camera, the 2D keypoints can be formulated as:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
J \in \mathbb {R} ^ {K \times 2} = f _ {\text {p r o j e c t}} (X, \delta), \tag {2}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $f_{\mathrm{project}}$ is the perspective projection function [13].
|
| 85 |
+
|
| 86 |
+
# 3.2 Resolution-Aware 3D Human Estimation
|
| 87 |
+
|
| 88 |
+
Baseline network. Similar to the existing methods [21, 25], we use the deep convolutional neural network (CNN) for 3D human estimation, where the ResNet-50 [15] is employed to extract features from the input image. The building block of the ResNet (i.e., ResBlock [16]) can be formulated as:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
z _ {k} = z _ {k - 1} + \phi_ {k} \left(z _ {k - 1}\right), \tag {3}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $z_{k}$ is the output features of the $k$ -th ResBlock, and $\phi_{k}$ represents the nonlinear function used to learn the feature residuals, which is modeled by several convolutional layers with ReLU activation [33]. The ResNet stacks $B$ ResBlocks together, and the final output can be written as:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
z _ {B} = z _ {0} + \sum_ {k = 1} ^ {B} \phi_ {k} \left(z _ {k - 1}\right), \tag {4}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
where $z_0$ is the low-level features extracted from the input image $x$ with convolutional layers, and $z_B$ is a combination of different level residual maps from all the ResBlocks. Note that we do not explicitly consider the downsampling ResBlocks in (4) for clarity. With the output features of the ResNet, we can use global average pooling to obtain a feature vector $\varphi$ and employ an iterative MLP for regressing the 3D parameters $\beta, \theta, \delta$ similar to [21, 25].
|
| 101 |
+
|
| 102 |
+
Resolution-aware network. The baseline network is originally designed for high-resolution images with input size $224 \times 224$ pixels, whereas the image resolutions for human in real scenarios can be much lower and vary in a wide range. A straightforward way to deal with these low-resolution inputs is to train different networks for all possible resolutions and choose the suitable one for each test image. However, this is impractical for real applications.
|
| 103 |
+
|
| 104 |
+
To solve this problem, we propose a resolution-aware network, and the main idea is that the different-resolution images with the same contents are largely similar as shown in Figure 2 and can share most parts of the feature extractor. And only a small amount of parameters are needed to be resolution-dependent to account for the characteristics of different image resolutions. Towards this end, instead of directly combining the different level features as in (4), we learn a matrix $\alpha$ to adaptively fuse the residual maps from the ResBlocks for each input resolution as shown in Figure 2, such that different resolutions can have suitable features for 3D estimation. Specifically, we formulate the output of the proposed resolution-aware network as:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
z _ {i, B} = z _ {i, 0} + \sum_ {k = 1} ^ {B} \alpha_ {i, k} \phi_ {k} \left(z _ {i, k - 1}\right), \quad i = 1, 2, \dots , R, \tag {5}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $i$ is the index for different image resolutions, and larger $i$ indicates smaller image. $i = 1$ corresponds to the original high-resolution input. $\alpha \in \mathbb{R}^{R \times B}$ , where $R$ denotes the number of all the image resolutions considered in this work. $z_{i,k}$ and $\alpha_{i,k}$ respectively represent the output and the fusion weight of the $k$ -th ResBlock for the $i$ -th input resolution. According to (5), the original ResBlock in (3) is modified as: $z_{i,k} = z_{i,k-1} + \alpha_{i,k}\phi_k(z_{i,k-1})$ . Note that we use a slightly different notation here compared with (3) and (4) which do not have the index $i$ for image resolution, as the baseline network is not resolution-aware and applies the same operations to different resolution inputs.
|
| 111 |
+
|
| 112 |
+
Note that for training the above network, each high-resolution image in the training dataset needs to conduct the downsampling operation for $R - 1$ times, such that each row of parameters in $\alpha$ have their corresponding training data. Whereas the original training datasets [31, 18, 32, 4, 28] are already quite large for the diversity of the training images, it will be further augmented by $R - 1$ times, which significantly increases the computational burden of the training process. To remedy the training issues and reduce the parameters in $\alpha$ , we divide all the $R$ resolutions into $P$ ranges and only learn one set of parameters for each range. We design the first resolution range to only have the original high-resolution image, and for the other ranges, we randomly sample a resolution in each range during each training iteration. The training images with different resolutions can be denoted as $\{x_{i}, i = 1,2,\dots,P\}$ where the smaller images $x_{2}, x_{3}, \ldots, x_{P}$ are synthesized from the same high-resolution image $x_{1}$ with bicubic interpolation. With this strategy, the training set can be much smaller without losing diversity, and we can have a lower-dimensional matrix $\alpha \in \mathbb{R}^{P\times B}$ , where the number of parameters can be reduced from $RB$ to $PB$ . During inference, we first decide the resolution range of the input image and then choose the suitable row of parameters in $\alpha$ for usage in the network.
|
| 113 |
+
|
| 114 |
+
Progressive training. Directly using different resolution images for training all at once can lead to difficulties in optimizing the proposed model since the network needs to handle inputs with complex resolution properties simultaneously. Instead, we train the proposed network in a progressive manner, where the higher-resolution images are easier to handle and thus first processed in training,
|
| 115 |
+
|
| 116 |
+
and more challenging ones with lower resolutions are subsequently added. In this way, we alleviate the difficulty of the training process and the proposed model can evolve progressively.
|
| 117 |
+
|
| 118 |
+
Basic loss function. Similar to the previous algorithms [21, 25], the basic loss of our network is a combination of 3D and 2D losses. Suppose the output of the proposed network for input image $x_{i}$ is $[\hat{\beta}_i,\hat{\theta}_i,\hat{\delta}_i] = f_{\mathrm{RA}}(x_i)$ where $i$ is the resolution index, and $X_{g},J_{g},\beta_{\mathrm{g}},\theta_{\mathrm{g}}$ are the ground truth 3D and 2D keypoints and SMPL parameters. The basic loss function can be written as:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
L _ {\mathrm {b}} = \sum_ {i} \| [ \hat {\beta} _ {i}, \hat {\theta} _ {i} ] - [ \beta_ {\mathrm {g}}, \theta_ {\mathrm {g}} ] \| _ {2} ^ {2} + \lambda_ {1} \| \hat {X} _ {i} - X _ {\mathrm {g}} \| _ {2} ^ {2} + \lambda_ {2} \| \hat {J} _ {i} - J _ {\mathrm {g}} \| _ {2} ^ {2}, \tag {6}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
where $\hat{X}_i$ and $\hat{J}_i$ are estimated with (1) and (2), respectively. $\lambda_1$ and $\lambda_2$ are hyper-parameters for balancing different terms. Note that while all the training images have 2D keypoint labels $J_{g}$ in (6), only a limited portion of them have 3D ground truth $X_{g},\beta_{g},\theta_{g}$ . For the training images without 3D labels, we simply omit the first two terms in (6) similar to [21, 22, 25]
|
| 125 |
+
|
| 126 |
+
# 3.3 Self-Supervision
|
| 127 |
+
|
| 128 |
+
The 3D human shape and pose estimation is a weakly-supervised problem as only a small part of the training data has 3D labels, and it is especially the case for inthe-wild images where accurate 3D annotations cannot be easily captured. This issue gets even worse for the low-resolution images, as the 3D estimation is not well constrained by limited pixel observations, which requires strong supervision signal during training to find a good solution.
|
| 129 |
+
|
| 130 |
+
To remedy this problem, we propose a self-supervision loss to assist the basic loss for training the resolution-aware network $f_{\mathrm{RA}}$ . This new loss term is inspired by the self-supervised learning algorithm [26] which improves the training by minimizing the MSE between the network predictions under different input augmentation conditions. For our problem, we naturally have the same input with different data augmentations, i.e., the different-resolution images synthesized from the same high-resolution image. Thus, the self-supervision loss can be formulated by enforcing the consistency across the outputs of different image resolutions:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\sum_ {i, j} \| f _ {\mathrm {R A}} \left(x _ {i}\right) - f _ {\mathrm {R A}} \left(x _ {j}\right) \| _ {2} ^ {2}. \tag {7}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
However, a major difference between our work and the original self-supervision method [26] is that we are generally more confident in the predictions of the higher-resolution images while [26] treats the results under different input augmentations equally. To exploit this prior knowledge, we improve the loss in (7) and propose a directional self-supervision loss:
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
L _ {\mathrm {s}} = \sum_ {i, j} w _ {i, j} \| \bar {f} _ {\mathrm {R A}} \left(x _ {i}\right) - f _ {\mathrm {R A}} \left(x _ {j}\right) \| _ {2} ^ {2}, \tag {8}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
w _ {i, j} = \mathbb {1} (j - i > 0) \cdot (j - i),
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $w_{i,j}$ is the loss weight for an image pair $(x_i, x_j)$ , and it is nonzero only when $x_i$ has higher-resolution than $x_j$ . $\bar{f}_{\mathrm{RA}}$ represents a fixed network, and the gradients are not back-propagated through it such that the lower-resolution image $x_j$ is encouraged to have similar output to higher-resolution $x_i$ but not vice versa. In addition, since higher-resolution results usually provide higher-quality guidance during training, we give a larger weight to larger resolution difference by the term $(j - i)$ in $w_{i,j}$ . Note that we use all the resolutions that are higher than $x_j$ as supervision in (8) instead of only using the highest resolution $x_1$ , as the results of $x_j$ and $x_1$ can differ from each other significantly for a large $j$ , and the results of the resolutions between $x_j$ and $x_1$ can act as soft targets during training. In [17], Hinton et al. show the effectiveness of the "dark knowledge" in soft targets, and similarly for low-resolution 3D human shape and pose estimation, we also find that it is important to provide the challenging input a hierarchical supervision signal such that the learning targets are not too difficult for the network to follow.
|
| 147 |
+
|
| 148 |
+
# 3.4 Contrastive Learning
|
| 149 |
+
|
| 150 |
+
While the self-supervision loss enforces the consistency of the network outputs across different image resolutions, we can further improve the model training by encouraging the consistency of the final feature representation $\varphi$ encoded by the network, such that features of lower-resolution images are closer to those of higher-resolution ones. Similar to (8), we have the feature consistency loss:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
L _ {\mathrm {f}} = \sum_ {i, j} w _ {i, j} g \left(\bar {\varphi} _ {i}, \varphi_ {j}\right), \tag {9}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $\varphi_{i}$ is the feature vector of the $i$ -th resolution input image $x_{i}$ , and $\bar{\varphi}$ denotes a fixed feature extractor without gradient back-propagation. $w_{i,j}$ is identical to that in (8). The function $g$ is used to measure the distance between two feature vectors, and a straightforward choice is the MSE as in (8). However, the extracted features $\varphi$ usually have very high dimensions, and the MSE loss is not effective in modeling correlations of the complex structures in high-dimensional representations, due to the fact that it can be decomposed element-wisely, i.e., assuming independence between elements in the feature vectors [39, 45]. Moreover, the unimodal losses such as MSE can be easily affected by the noise or insignificant structures in the features, while a better loss function should exploit more global structures [39].
|
| 157 |
+
|
| 158 |
+
Towards this end, we propose a contrastive feature loss similar to [39, 7, 14, 45] to maximize the mutual information across the feature representations of different resolutions. The main idea behind our contrastive loss is to encourage the feature representation to be close for the same image with different resolutions but far from different images. Mathematically, the contrastive function can be written as:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
g \left(\bar {\varphi} _ {i}, \varphi_ {j}\right) = - \log \frac {\exp \left(s \left(\bar {\varphi} _ {i} , \varphi_ {j}\right) / \tau\right)}{\exp \left(s \left(\bar {\varphi} _ {i} , \varphi_ {j}\right) / \tau\right) + \sum_ {q \in \mathcal {Q}} \exp \left(s \left(q , \varphi_ {j}\right) / \tau\right)}, \tag {10}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where $s$ represents the cosine similarity function, and $\tau$ is a temperature hyperparameter. $\varphi_{i},\varphi_{j}$ are the features of the same input with different resolutions. $\mathcal{Q}$ is a queue of data samples, which is constructed and progressively updated during training, and $\varphi_{i},\varphi_{j}\notin \mathcal{Q}$ . We use a method similar to [14] to update the queue, i.e., after each iteration, the current mini-batch is enqueued, and the oldest mini-batch in the queue is removed. Supposing the size of the queue is $|\mathcal{Q}|$ , the contrastive loss is essentially a $(|\mathcal{Q}| + 1)$ -way softmax-based classifier which classifies different resolutions $(\varphi_{i},\varphi_{j})$ as a positive pair while different contents $(q,\varphi_{j})$ as a negative pair. As the feature extractor of the higher resolution image does not have gradients in (10), the proposed loss function enforces the network to generate higher-quality features for the low-resolution input image.
|
| 165 |
+
|
| 166 |
+
Our final loss is a combination of the basic loss, self-supervision loss, and contrastive feature loss: $L_{\mathrm{b}} + \lambda_{\mathrm{s}}L_{\mathrm{s}} + \lambda_{\mathrm{f}}L_{\mathrm{f}}$ , where $\lambda_{\mathrm{s}}$ and $\lambda_{\mathrm{f}}$ are hyper-parameters.
|
| 167 |
+
|
| 168 |
+
# 4 Experiments
|
| 169 |
+
|
| 170 |
+
We first describe the implementation details of the proposed RSC-Net. Then we compare our results with the state-of-the-art 3D human estimation approaches for different image resolutions. We also perform a comprehensive ablation study to demonstrate the effect of our contributions.
|
| 171 |
+
|
| 172 |
+
# 4.1 Implementation Details
|
| 173 |
+
|
| 174 |
+
We train our model and the baselines using a combination of 2D and 3D datasets similar to previous works [21, 25]. For the 3D datasets, we use Human3.6M [18] and MPI-INF-3DHP [32] with ground truth of 3D keypoints, 2D keypoints, and SMPL parameters. These datasets are mostly captured in constrained environments, and models trained on them do not generalize well to diverse images in real world. For better performance on in-the-wild data, we also use the 2D datasets including LSP [19], LSP-Extended [20], MPII [4], and MS COCO [28], which only have 2D keypoint labels. We crop the human regions from the images and resize them to $224 \times 224$ . Images with significant occlusions or small human are discarded from the dataset. We consider human image resolutions ranging from 224 to 24. As introduced in Section 3.2, we split all the resolutions into $P = 5$ ranges: $\{224, (224, 128], (128, 64], (64, 40], (40, 24]\}$ , where the first range corresponds to the original high-resolution image $x_{1}$ . We obtain the lower-resolution images by downsampling the high-resolution images and resize them back to 224 with bicubic interpolation. During training, we apply data augmentations to the images including Gaussian noise, color jitters, rotation, and random flipping. For the loss functions, we set $\lambda_{1} = 5$ , $\lambda_{2} = 5$ , $\lambda_{\mathrm{s}} = 0.1$ , and $\lambda_{\mathrm{f}} = 0.1$ . For contrastive learning, we set the size of the queue as 8192 and $\tau = 0.1$ in (10) similar to [7]. As in [24], we initialize the baseline networks and our model with the parameters of [25]. We use the Adam algorithm [23] to optimize the network with a learning rate 5e-5. Similar to [24], we conduct evaluations on a large in-the-wild dataset 3DPW [31] with 3D joint ground truth to
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
Fig. 3. Visual comparisons with the state-of-the-art methods on challenging low-resolution input. The input image has a resolution of $32 \times 32$ . The results of high-resolution images are also included as a reference. All the baselines are trained with the same training data as our method.
|
| 178 |
+
|
| 179 |
+
Table 1. Quantitative evaluations against the state-of-the-arts on 3DPW [31].
|
| 180 |
+
|
| 181 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">MPJPE</td><td colspan="4">MPJPE-PA</td></tr><tr><td>176</td><td>96</td><td>52</td><td>32</td><td>176</td><td>96</td><td>52</td><td>32</td></tr><tr><td>HMR</td><td>117.86</td><td>118.91</td><td>125.95</td><td>142.29</td><td>70.28</td><td>70.89</td><td>73.64</td><td>79.73</td></tr><tr><td>SPIN</td><td>112.72</td><td>113.60</td><td>120.71</td><td>137.61</td><td>69.20</td><td>69.40</td><td>72.21</td><td>78.44</td></tr><tr><td>ImgSR</td><td>116.47</td><td>117.74</td><td>127.78</td><td>146.58</td><td>66.62</td><td>67.48</td><td>72.34</td><td>81.07</td></tr><tr><td>FeaEN</td><td>107.97</td><td>109.42</td><td>119.08</td><td>143.51</td><td>61.37</td><td>62.13</td><td>66.62</td><td>77.21</td></tr><tr><td>Ours</td><td>96.36</td><td>97.36</td><td>103.49</td><td>117.12</td><td>58.98</td><td>59.34</td><td>61.81</td><td>67.59</td></tr></table>
|
| 182 |
+
|
| 183 |
+
demonstrate the strength of our model in an in-the-wild setting. We also provide results for constrained indoor images using the MPI-INF-3DHP dataset [32]. Following [24, 21, 25], we compute the procrustes aligned mean per joint position error (MPJPE-PA) and mean per joint position error (MPJPE) for measuring the 3D keypoint accuracy. To evaluate the performance of different image resolutions, we report results for the middle point of each resolution range, i.e., 176, 96, 52, and 32.
|
| 184 |
+
|
| 185 |
+
# 4.2 Comparison to State-of-the-Art Methods
|
| 186 |
+
|
| 187 |
+
We compare against the state-of-the-art 3D human shape and pose estimation methods HMR [21] and SPIN [25] by fine-tuning them on different resolution images with the same training settings as our model. Since no previous approach
|
| 188 |
+
|
| 189 |
+
Table 2. Quantitative evaluations against the state-of-the-arts on MPI-INF-3DHP [32].
|
| 190 |
+
|
| 191 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">MPJPE</td><td colspan="4">MPJPE-PA</td></tr><tr><td>176</td><td>96</td><td>52</td><td>32</td><td>176</td><td>96</td><td>52</td><td>32</td></tr><tr><td>HMR</td><td>114.89</td><td>113.27</td><td>114.82</td><td>133.25</td><td>74.77</td><td>74.45</td><td>76.35</td><td>85.30</td></tr><tr><td>SPIN</td><td>108.46</td><td>108.25</td><td>113.36</td><td>127.27</td><td>71.19</td><td>71.53</td><td>74.76</td><td>83.38</td></tr><tr><td>ImgSR</td><td>107.98</td><td>107.56</td><td>112.14</td><td>125.91</td><td>72.13</td><td>72.76</td><td>75.64</td><td>83.52</td></tr><tr><td>FeaEN</td><td>110.40</td><td>109.91</td><td>113.09</td><td>124.99</td><td>71.49</td><td>71.52</td><td>73.92</td><td>81.80</td></tr><tr><td>Ours</td><td>103.36</td><td>103.39</td><td>106.04</td><td>115.80</td><td>70.01</td><td>70.27</td><td>72.56</td><td>78.68</td></tr></table>
|
| 192 |
+
|
| 193 |
+
has focused on the problem of low-resolution 3D human shape and pose estimation, we adapt the low-resolution image recognition algorithms to our task as new baselines, including both image super-resolution based [12] and feature enhancement based [43]. For the image super-resolution based method (denoted as ImgSR), we first use a state-of-the-art network RDN [51] to super-resolve the low-resolution image, and the output is then fed into SPIN [25] for regressing the SMPL parameters. Similar to [12], the network is trained to improve both the perceptual image quality and the 3D human shape and pose estimation accuracy. For feature enhancement (denoted as FeaEN), we apply the strategy in [43] which uses a GAN loss to enhance the discriminative ability of the low-resolution features for better image retrieval performance. Nevertheless, we find the WGAN [5] used in the original work [43] does not work well in our experiments, and we instead use the LSGAN [30] combined with the basic loss (6) to train a stronger baseline network.
|
| 194 |
+
|
| 195 |
+
As shown in Table 1 and 2, the proposed method compares favorably against the baseline approaches on both 3DPW and MPI-INF-3DHP datasets for all the image resolutions. Note that we achieve significant improvement over the baselines on the 3DPW dataset as shown in Table 1, which demonstrates the effectiveness of the proposed method on the challenging in-the-wild images. We also provide a qualitative comparison against the baseline models in Figure 3, where the proposed method generates higher-quality 3D human estimation results on the challenging low-resolution input.
|
| 196 |
+
|
| 197 |
+
# 4.3 Ablation Study
|
| 198 |
+
|
| 199 |
+
We provide an ablation study using the 3DPW dataset in Figure 4 and Table 3 to evaluate the proposed resolution-aware network, self-supervision loss, and contrastive feature loss. We first compare the proposed resolution-aware network with the baseline model ResNet50 [21, 15]. As shown by "RA" and "Ba" in Table 3, our network can obtain slightly better results than the baseline network with the basic loss (6) as loss function. Further, we can achieve a more significant improvement over the baseline when adding the self-supervision loss (8) for training, i.e., "RA+SS" vs. "Ba+SS", which further demonstrates the effectiveness of the resolution-aware structure.
|
| 200 |
+
|
| 201 |
+
Table 3. Ablation study of the proposed method. Ba: baseline network with basic loss function, RA: resolution-aware network with basic loss function, SS: self-supervision loss, MS: MSE feature loss, CD: cosine distance feature loss, CL: contrastive learning feature loss.
|
| 202 |
+
|
| 203 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">MPJPE</td><td colspan="4">MPJPE-PA</td></tr><tr><td>176</td><td>96</td><td>52</td><td>32</td><td>176</td><td>96</td><td>52</td><td>32</td></tr><tr><td>Ba</td><td>112.26</td><td>115.18</td><td>124.88</td><td>143.63</td><td>65.04</td><td>66.41</td><td>71.12</td><td>79.43</td></tr><tr><td>Ba+SS</td><td>107.51</td><td>109.58</td><td>116.54</td><td>128.88</td><td>62.32</td><td>63.27</td><td>66.78</td><td>72.49</td></tr><tr><td>RA</td><td>111.55</td><td>112.18</td><td>118.70</td><td>135.29</td><td>64.53</td><td>68.88</td><td>68.01</td><td>75.49</td></tr><tr><td>RA+SS</td><td>102.56</td><td>104.18</td><td>110.17</td><td>124.23</td><td>60.17</td><td>60.84</td><td>63.71</td><td>69.87</td></tr><tr><td>RA+SS+MS</td><td>105.96</td><td>106.15</td><td>111.33</td><td>124.85</td><td>60.90</td><td>61.76</td><td>64.55</td><td>70.40</td></tr><tr><td>RA+SS+CD</td><td>104.95</td><td>105.96</td><td>111.41</td><td>125.08</td><td>61.29</td><td>61.91</td><td>64.30</td><td>70.17</td></tr><tr><td>RA+SS+CL</td><td>96.36</td><td>97.36</td><td>103.49</td><td>117.12</td><td>58.98</td><td>59.34</td><td>61.81</td><td>67.59</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Second, we use the self-supervision loss in (8) to exploit the consistency of the outputs of the same input image with different resolutions. By comparing "RA+SS" against "RA" in Table 3, we show that the self-supervision loss is important for addressing the weak supervision issue of 3D human pose and shape estimation and thus effectively improves the results. The comparison between "Ba+SS" and "Ba" also leads to similar conclusions.
|
| 206 |
+
|
| 207 |
+
In addition, we propose to enforce the consistency of the features across different image resolutions. However, a normally-used MSE loss does not work well as show in "RA+SS+MS" of Table 3, which is mainly due to that the unimodal losses are not effective in modeling the correlations between high-dimensional vectors and can be easily affected by noise and insignificant structures in the embedded features [39]. In contrast, the proposed contrastive feature loss can more effectively improve the feature representations by maximizing the mutual information across the features of different resolutions, and achieve better results as in "RA+SS+CL" of Table 3. Note that we adopt the cosine similarity in the contrastive feature loss (10) similar to prior methods [39, 14, 45]. Alternatively, one may only use the cosine distance function for measuring the distance of two features instead of using the whole contrastive loss (10). Nevertheless, this strategy does not work well as shown by "RA+SS+CD" in Table 3, which demonstrates the effectiveness of the proposed algorithm.
|
| 208 |
+
|
| 209 |
+
Analysis of training strategies. We also provide a detailed analysis of the alternative training strategies of our model. First, as described in Section 3.2, we train our model as well as the baselines in a progressive manner to deal with the challenging multi-resolution input. As shown in the first row of Table 4 (i.e., "w/o PT"), directly training the model for all image resolutions without the progressive strategy leads to degraded results.
|
| 210 |
+
|
| 211 |
+
Second, the original self-supervision loss (7) treats the images under different augmentations equally, while we are generally more confident in the high-resolution predictions. Therefore, we propose a directional self-supervision loss in (8) to exploit this prior knowledge. As shown in the second row of Table 4
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+
Alternate viewpoint
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
Fig. 4. Visual example which shows the effectiveness of the resolution-aware network, the self-supervision loss, and the contrastive learning feature loss.
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+
Table 4. Analysis of the alternative training strategies. PT: Progressive Training, SS-o: original self-supervision loss, SS-h: only using the highest-resolution for supervision.
|
| 239 |
+
|
| 240 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">MPJPE</td><td colspan="4">MPJPE-PA</td></tr><tr><td>176</td><td>96</td><td>52</td><td>32</td><td>176</td><td>96</td><td>52</td><td>32</td></tr><tr><td>w/o PT</td><td>105.11</td><td>106.60</td><td>113.41</td><td>127.05</td><td>61.46</td><td>62.22</td><td>65.47</td><td>71.30</td></tr><tr><td>w/ SS-o</td><td>143.31</td><td>142.32</td><td>145.61</td><td>156.25</td><td>77.75</td><td>77.51</td><td>79.06</td><td>82.97</td></tr><tr><td>w/ SS-h</td><td>104.16</td><td>105.24</td><td>109.94</td><td>122.01</td><td>62.46</td><td>62.73</td><td>64.47</td><td>68.89</td></tr><tr><td>full model</td><td>96.36</td><td>97.36</td><td>103.49</td><td>117.12</td><td>58.98</td><td>59.34</td><td>61.81</td><td>67.59</td></tr></table>
|
| 241 |
+
|
| 242 |
+
(i.e., "w/ SS-o"), using the original self-supervision loss (7) is not able to achieve high-quality results, as the network can minimize (7) by simply degrading the high-resolution predictions without improving the results of low resolution. In addition, we provide hierarchical supervision for low-resolution images in (8) which can act as soft targets during training. As shown in Table 4, only using the highest-resolution predictions as guidance (i.e., "w/ SS-h") cannot produce as good results as the proposed approach (i.e., "full model").
|
| 243 |
+
|
| 244 |
+
# 5 Conclusion
|
| 245 |
+
|
| 246 |
+
In this work, we study the challenging problem of low-resolution 3D human shape and pose estimation and present an effective solution, the RSC-Net. We propose a resolution-aware neural network which can deal with different resolution images with a single model. For training the network, we propose a directional self-supervision loss which can exploit the output consistency across different resolutions to remedy the issue of lacking high-quality 3D labels. In addition, we introduce a contrastive feature loss which is more effective than MSE for measuring high-dimensional vectors and helps learn better feature representations. Our method performs favorably against the state-of-the-art methods on different resolution images and achieves high-quality results for low-resolution 3D human shape and pose estimation.
|
| 247 |
+
|
| 248 |
+
# References
|
| 249 |
+
|
| 250 |
+
1. Alldieck, T., Magnor, M., Bhatnagar, B.L., Theobalt, C., Pons-Moll, G.: Learning to reconstruct people in clothing from a single rgb camera. In: CVPR (2019) 2, 3
|
| 251 |
+
2. Alldieck, T., Magnor, M., Xu, W., Theobalt, C., Pons-Moll, G.: Video based reconstruction of 3d people models. In: CVPR (2018) 2, 3
|
| 252 |
+
3. Alldieck, T., Pons-Moll, G., Theobalt, C., Magnor, M.: Tex2shape: Detailed full human body geometry from a single image. In: ICCV (2019) 3
|
| 253 |
+
4. Andriluka, M., Pishchulin, L., Gehler, P., Schiele, B.: 2d human pose estimation: New benchmark and state of the art analysis. In: CVPR (2014) 7, 10
|
| 254 |
+
5. Arjovsky, M., Chintala, S., Bottou, L.: Wasserstein generative adversarial networks. In: ICML (2017) 12
|
| 255 |
+
6. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: ECCV (2016) 2, 3, 4
|
| 256 |
+
7. Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: ICML (2020) 3, 9, 10
|
| 257 |
+
8. Cheng, Z., Zhu, X., Gong, S.: Low-resolution face recognition. In: ACCV (2018) 4
|
| 258 |
+
9. Doersch, C., Zisserman, A.: Sim2real transfer learning for 3d human pose estimation: motion to the rescue. In: NeurIPS (2019) 3
|
| 259 |
+
0. Ge, S., Zhao, S., Li, C., Li, J.: Low-resolution face recognition in the wild via selective knowledge distillation. TIP 28(4), 2051-2062 (2018) 4
|
| 260 |
+
1. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: NIPS (2014) 4
|
| 261 |
+
2. Haris, M., Shakhnarovich, G., Ukita, N.: Task-driven super resolution: Object detection in low-resolution images. arXiv:1803.11316 (2018) 4, 12
|
| 262 |
+
3. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003) 6
|
| 263 |
+
4. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: CVPR (2020) 3, 9, 10, 13
|
| 264 |
+
5. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016) 6, 12
|
| 265 |
+
6. He, K., Zhang, X., Ren, S., Sun, J.: Identity mappings in deep residual networks. In: ECCV (2016) 6
|
| 266 |
+
7. Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv:1503.02531 (2015) 9
|
| 267 |
+
8. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. TPAMI 36(7), 1325-1339 (2013) 7, 10
|
| 268 |
+
9. Johnson, S., Everingham, M.: Clustered pose and nonlinear appearance models for human pose estimation. In: BMVC (2010) 10
|
| 269 |
+
20. Johnson, S., Everingham, M.: Learning effective human pose estimation from inaccurate annotation. In: CVPR (2011) 10
|
| 270 |
+
21. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: CVPR (2018) 2, 3, 4, 6, 8, 10, 11, 12
|
| 271 |
+
22. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: CVPR (2019) 2, 3, 4, 8
|
| 272 |
+
23. Kingma, D., Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2014) 10
|
| 273 |
+
|
| 274 |
+
24. Kocabas, M., Athanasiou, N., Black, M.J.: Vibe: Video inference for human body pose and shape estimation. In: CVPR (2020) 2, 3, 4, 10, 11
|
| 275 |
+
25. Kolotouros, N., Pavlakos, G., Black, M.J., Daniilidis, K.: Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In: ICCV (2019) 2, 3, 4, 6, 8, 10, 11, 12
|
| 276 |
+
26. Laine, S., Aila, T.: Temporal ensembling for semi-supervised learning. In: ICLR (2017) 3, 8
|
| 277 |
+
27. Li, J., Liang, X., Wei, Y., Xu, T., Feng, J., Yan, S.: Perceptual generative adversarial networks for small object detection. In: CVPR (2017) 4
|
| 278 |
+
28. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dólar, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: ECCV (2014) 7, 10
|
| 279 |
+
29. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Spl: A skinned multi-person linear model. ACM Transactions on Graphics 34(6), 248 (2015) 3, 5
|
| 280 |
+
30. Mao, X., Li, Q., Xie, H., Lau, R.Y., Wang, Z., Paul Smolley, S.: Least squares generative adversarial networks. In: ICCV (2017) 12
|
| 281 |
+
31. von Marcard, T., Henschel, R., Black, M.J., Rosenhahn, B., Pons-Moll, G.: Recovering accurate 3d human pose in the wild using imus and a moving camera. In: ECCV (2018) 7, 10, 11
|
| 282 |
+
32. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved cnn supervision. In: 3DV (2017) 7, 10, 11, 12
|
| 283 |
+
33. Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: ICML (2010) 6
|
| 284 |
+
34. Natsume, R., Saito, S., Huang, Z., Chen, W., Ma, C., Li, H., Morishima, S.: Siclope: Silhouette-based clothed people. In: CVPR (2019) 2, 3
|
| 285 |
+
35. Neumann, L., Vedaldi, A.: Tiny people pose. In: ACCV (2018) 2, 4, 5
|
| 286 |
+
36. Nishibori, K., Takahashi, T., Deguchi, D., Ide, I., Murase, H.: Exemplar-based human body super-resolution for surveillance camera systems. In: International Conference on Computer Vision Theory and Applications (VISAPP) (2014) 2
|
| 287 |
+
37. Noh, J., Bae, W., Lee, W., Seo, J., Kim, G.: Better to follow, follow to be better: Towards precise supervision of feature super-resolution for small object detection. In: ICCV (2019) 4
|
| 288 |
+
38. Oh, S., Hoogs, A., Perera, A., Cuntoor, N., Chen, C.C., Lee, J.T., Mukherjee, S., Aggarwal, J., Lee, H., Davis, L., et al.: A large-scale benchmark dataset for event recognition in surveillance video. In: CVPR (2011) 2
|
| 289 |
+
39. Oord, A.v.d., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv:1807.03748 (2018) 3, 5, 9, 13
|
| 290 |
+
40. Pavlakos, G., Zhu, L., Zhou, X., Daniilidis, K.: Learning to estimate 3d human pose and shape from a single color image. In: CVPR (2018) 2, 3
|
| 291 |
+
41. Pumarola, A., Sanchez-Riera, J., Choi, G., Sanfeliu, A., Moreno-Noguer, F.: 3dpeople: Modeling the geometry of dressed humans. In: ICCV (2019) 2, 3, 4
|
| 292 |
+
42. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In: ICCV (2019) 2, 3, 4
|
| 293 |
+
43. Tan, W., Yan, B., Bare, B.: Feature super-resolution: Make machine see more clearly. In: CVPR (2018) 4, 12
|
| 294 |
+
44. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In: NIPS (2017) 3
|
| 295 |
+
45. Tian, Y., Krishnan, D., Isola, P.: Contrastive multiview coding. arXiv preprint arXiv:1906.05849 (2019) 9, 13
|
| 296 |
+
|
| 297 |
+
46. Wang, Z., Chang, S., Yang, Y., Liu, D., Huang, T.S.: Studying very low resolution recognition using deep networks. In: CVPR (2016) 2, 4
|
| 298 |
+
47. Xu, X., Ma, Y., Sun, W.: Towards real scene super-resolution with raw images. In: CVPR (2019) 2
|
| 299 |
+
48. Xu, X., Sun, D., Pan, J., Zhang, Y., Pfister, H., Yang, M.H.: Learning to superresolve blurry face and text images. In: ICCV (2017) 2, 4
|
| 300 |
+
49. Zanfir, A., Marinoiu, E., Sminchisescu, C.: Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In: CVPR (2018) 3
|
| 301 |
+
50. Zhang, J.Y., Felsen, P., Kanazawa, A., Malik, J.: Predicting 3d human dynamics from video. In: ICCV (2019) 3, 4
|
| 302 |
+
51. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: CVPR (2018) 12
|
| 303 |
+
52. Zheng, Z., Yu, T., Wei, Y., Dai, Q., Liu, Y.: Deephuman: 3d human reconstruction from a single image. In: ICCV (2019) 2, 3
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4797cb7ef8627fbba8038a7f39f050ae30e168723dd0a45b4404b8921cfbc2e7
|
| 3 |
+
size 429341
|
3dhumanshapeandposefromasinglelowresolutionimagewithselfsupervisedlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e02eaa2d27ea0fe5013a22a3fdac18768b0db0cf936b99283fbb7da7eaf40b47
|
| 3 |
+
size 420339
|
3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9baa188f93cb57620f7235c4aa3f78ef44252a427174f25e834412efe346a463
|
| 3 |
+
size 76095
|
3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c1ebcc0d687f98af3737d9f0403a11ecd22bbebe9803e6ebde5865f8f2666a91
|
| 3 |
+
size 96560
|
3dhumanshapereconstructionfromapolarizationimage/125ff13a-b1e2-45fc-bffd-467e0bdbd0c5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b7529a35fee2fdf4d5309024e3c279a7057d45d61154d33c6c7284e2efde44e
|
| 3 |
+
size 2520580
|
3dhumanshapereconstructionfromapolarizationimage/full.md
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Human Shape Reconstruction from a Polarization Image
|
| 2 |
+
|
| 3 |
+
Shihao Zou $^{1}$ , Xinxin Zuo $^{1}$ , Yiming Qian $^{2}$ , Sen Wang $^{1}$ , Chi Xu $^{3}$ , Minglun Gong $^{4}$ , and Li Cheng $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> University of Alberta
|
| 6 |
+
2 Simon Fraser University
|
| 7 |
+
|
| 8 |
+
$^{3}$ School of Automation, China University of Geosciences, Wuhan 430074, China
|
| 9 |
+
|
| 10 |
+
<sup>4</sup> University of Guelph {szou2, xzuo, sen9, lcheng5}@ualberta.ca, yimingq@sfu.ca, xuchi@cug.edu.cn, minglun@uoguelph.ca
|
| 11 |
+
|
| 12 |
+
Abstract. This paper tackles the problem of estimating 3D body shape of clothed humans from single polarized 2D images, i.e. polarization images. Polarization images are known to be able to capture polarized reflected lights that preserve rich geometric cues of an object, which has motivated its recent applications in reconstructing surface normal of the objects of interest. Inspired by the recent advances in human shape estimation from single color images, in this paper, we attempt at estimating human body shapes by leveraging the geometric cues from single polarization images. A dedicated two-stage deep learning approach, SfP, is proposed: given a polarization image, stage one aims at inferring the fined-detailed body surface normal; stage two gears to reconstruct the 3D body shape of clothing details. Empirical evaluations on a synthetic dataset (SURREAL) as well as a real-world dataset (PHSPD) demonstrate the qualitative and quantitative performance of our approach in estimating human poses and shapes. This indicates polarization camera is a promising alternative to the more conventional color or depth imaging for human shape estimation. Further, normal maps inferred from polarization imaging play a significant role in accurately recovering the body shapes of clothed people.
|
| 13 |
+
|
| 14 |
+
Keywords: Human Pose and Shape Estimation, Clothed 3D Human body, Shape from Polarization
|
| 15 |
+
|
| 16 |
+
# 1 Introduction
|
| 17 |
+
|
| 18 |
+
Compared to the task of color-image based pose estimation [1-20] that predicts 3D joint positions of an articulated skeleton, human shapes provide much richer information of a human body in 3D and are visually more appealing. It, on the other hand, remains a challenging problem, partly owing to the relative high-dimensional space of human body shapes. The issue is somewhat alleviated by the emerging low-dimensional modelling of human shape, such as SCAPE [21] and SMPL [22], statistical models that are learned from large sets of carefully
|
| 19 |
+
|
| 20 |
+
scanned 3D body shapes. Based on these low-dimensional human shape representations, a number of end-to-end deep learning methods [23-37] are subsequently developed to estimate human shapes directly from color images. The predicted human shapes, however, are usually naked and lacking in surface details, since e.g. SMPL model is learned from naked human body scans.
|
| 21 |
+
|
| 22 |
+
Volume-based techniques [38, 39] are widely used in capturing surface details of a clothed human body from a single image. Due to finite computational resource, the estimated human shapes from these methods are usually of low resolution. Saito et al. [40] consider to remedy this by predicting a pixel-aligned implicit surface function that captures more detailed body surface. It however relies on a large training set of detailed 3D human bodies, and the method is still unable to handle complex poses. In the meantime, the methods of [41] and [42] aim to exploit additional geometric cues arising from color image inputs; [41] instead focuses on predicting fine depth maps, and [42] takes on the shading aspect. Unfortunately, accurate and reliable prediction of these geometric cues from a color image is yet another challenging issue - it remains unclear how much one can leverage from such cues. Motivated by these efforts and their limitations, we consider in this paper to work with a new 2D imaging modality, polarization camera, that is known at better preserving fine-scale geometric properties of 3D objects, including human shapes. The intuition comes from basic physics principle: when a light ray reflects off an object, it is polarized and conveys ample geometric cues concerning local surface details of the object, usually represented as surface normal [43, 44]. It may be found to note some biological species are even able to directly perceive light polarization [45, 46], which significantly facilitates their 3D sensing. Empirically, our experiments support that the surface normal maps obtained out of the input 2D polarization images could play an instrumental role in producing accurate and reliable 3D clothed human shapes.
|
| 23 |
+
|
| 24 |
+
As shown in Fig. 1, our approach, also called SfP, contains two stages. Stage 1 concentrates on predicting accurate surface normal maps from single polarization images by exploiting the associated physics laws as priors. It is then fed into stage 2 in reconstructing the final clothed human shape.
|
| 25 |
+
|
| 26 |
+
Unlike existing efforts in normal map prediction [41, 42], our approach predicts normal maps by explicitly incorporating the underlying physical laws of polarization imaging, which results in more reliable performance. To achieve this, there are two main challenges we need to overcome, namely $\pi$ -ambiguity of the azimuth angle and the possibly large noise in practical applications. To this end we introduce two ambiguous normal maps $\mathbf{n}_1$ and $\mathbf{n}_2$ (Sec. 3.1) as a physical prior, based on the assumption that the light reflected by human clothing is mostly diffused. Different from [44], each pixel is then classified into one of the three types: the two ambiguous normal maps and background. This is followed by a refinement step to deliver the final surface normal prediction of $\hat{\mathbf{n}}$ , that accounts for the possibly-noisy fused normal map output owing to environmental noise and the digital quantization of the polarization camera. Based on the
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Fig. 1. Given a single polarization image, a two-stage process is executed in our approach. (1) Stage 1, in blue, estimates the surface normal from the polarization image based on the physical assumption that reflected light from an object is polarized. After calculating the two ambiguous normal maps, $(\mathbf{n}_1,\mathbf{n}_2)$ , as physical priors from the polarization image (see Sec. 3.1 for details), image pixels are classified as belonging to either of the two normals or a background, thus obtaining the fused normal $\mathbf{n}_3$ . Unfortunately, this normal is often noisy, thud a further step is carried out in regressing a final accurate surface normal $\hat{\mathbf{n}}$ , by integrating these physical normal maps and the raw polarization image. (2) Stage 2, in orange, concatenates the polarization image and the surface normal as the input to estimate clothed body shape in two steps. The first step focuses on estimating the parameters of SMPL, a rough & naked shape model parameterized by $\Theta$ ; the pose (3D joint positions) $\mathbf{J}$ is directly obtained as a by-product of the rigged shape model. The next step deforms the SMPL shape guided by the final surface normal of stage 1, to reconstruct the refined 3D human shape with clothing details.
|
| 30 |
+
|
| 31 |
+
raw polarization image and output of stage 1, stage 2 concerns the estimation of clothed human shape. It starts from predicting a coarse SMPL shape model, which is then deformed by leveraging the geometric details from surface normal, our stage 1 output, to form the final human shape. Empirically our two-stage pipeline is shown to be capable of accurately reconstructing human shapes, while retaining clothing details such as cloth wrinkles.
|
| 32 |
+
|
| 33 |
+
To summarize, there are two main contributions in this work. (1) A new problem of inferring high-resolution 3D human shapes from a single polarization image is proposed and investigated. This lead us to curate a dedicated Polarization Human Shape and Pose Dataset (PHSPD). (2) A dedicated deep learning approach, SfP, is proposed<sup>6</sup>, where the detail-preserving surface normal maps
|
| 34 |
+
|
| 35 |
+
are obtained following the physical laws, and are shown to significantly improve the reconstruction performance of clothed human shapes. Empirical evaluations on a synthetic SURREAL dataset as well as a real-world dataset demonstrate the applicability of our approach. Our work provides sound evidence in engaging 2D polarization camera to estimate 3D human poses and shapes, a viable alternative to conventional 2D color or 3D depth cameras.
|
| 36 |
+
|
| 37 |
+
# 2 Related Work
|
| 38 |
+
|
| 39 |
+
Shape from polarization (SfP) focuses on the inference of shape (normally represented as surface normal) from the polarimetric information in the multiple channels of a polarization image, captured under linear polarizers with different angles. The main issue of SfP is angle ambiguity. Previous methods are mainly physics-based that rely on other additional information or assumptions to elucidate the possible ambiguities, such as smooth object surfaces [47], coarse depth map [48, 43] and multi-view geometric constraint [49, 50]. The recent work of [44] proposes to blend physical priors (ambiguous normal maps) with deep learning in uncovering the normal map. Using physical priors as part of the input, deep learning model can then be trained to account for the ambiguity and be noise-resilient. We improve upon [44] by classifying ambiguous normal and background for each pixel, and regressing the normal given the ambiguous and classified physical priors.
|
| 40 |
+
|
| 41 |
+
3D human pose estimation from single images has been extensively investigated in the past five years, centering around color or depth imaging. Many of the studies [51-57] utilize dictionary-based learning strategies. More recent efforts aim to directly regress 3D pose using deep learning techniques, including CNNs [1-3] and Graph CNNs [58, 59]. In practicur, several recent efforts [4-12, 12-20] look into a common framework of estimating 2D pose (either 2D joint positions or heatmap), which is then lifted to 3D. Ideas from self-supervised learning [20, 17] and adversarial learning [11, 18] also gain attention in e.g. predicting 3D pose under additional constraints imposed from re-projection or adversarial losses.
|
| 42 |
+
|
| 43 |
+
Human shape estimation from single images has drawn growing attentions recently, thanks to development of human shape models of SCAPE and SMPL [21, 22]. These two statistical models learn low-dimensional representations of human shape from large corpus of human body scans. Together with deep learning techniques, it has since been feasible to estimate human body shapes from single color or depth images. Earlier activities focus more on optimizing the SCAPE or SMPL model parameters toward better fitting to various dedicated visual or internal representations, such as foreground silhouette [23-25] and pose [26, 27]. Deep learning based approaches are more commonplace in recent efforts [28-31], which typically learn to predict the SMPL parameters by incorporating the constrains from 2/3D pose, silhouette, as well as adversarial learning losses. [32] takes the body pixel-to-surface correspondence map as proxy representation
|
| 44 |
+
|
| 45 |
+
and then performs estimation of parameterized human pose and shape. In [33], optimization-based methods [26] and regression-based methods [28] are combined to form a self-improved fitting loop. point cloud is considered as input in [60] to regress SMPL parameters. Instead of single color images, our work is based on single polarization image; rather than inferring coarse human body shape, we aim to recover high-res human shapes.
|
| 46 |
+
|
| 47 |
+
As for the estimation of clothed human shape, volume-based methods [38-40] are proposed to reconstruct textured body shapes. they unfortunately suffer from the low resolution issue of volumetric representation. Our work is closely related to [42], which combines the robustness of parametric model and the flexibility of free-form 3D deformation in a hierarchical manner. The major difference is, the clothing details of our work are provided by the reliable normal map estimated from the polarization image, whereas the network in [42] deforms depth image by employing the shading information trained on additional data, that are inherently unreliable due to the lack of ground-truth information of surface normal, albedo and environmental lighting. Our work is also related to [41] which recovers detailed human shape from a color image, by iteratively incorporating both rough depth map and estimated surface normal for improved surface details.
|
| 48 |
+
|
| 49 |
+
# 3 The Proposed SfP Approach
|
| 50 |
+
|
| 51 |
+
There are two main stages in our approach: (1) estimate surface normal from a single polarization image; (2) estimate human pose and shape from the estimated surface normal and the raw polarization image, followed by body shape refinement from the estimated surface normal.
|
| 52 |
+
|
| 53 |
+
# 3.1 Surface Normal Estimation
|
| 54 |
+
|
| 55 |
+
The reflected light from a surface mainly includes three components [50], the polarized specular reflection, the polarized diffuse reflection, and the unpolarized diffuse reflection. A polarization camera has an array of linear polarizer mounted right on top of the CMOS imager, similar to the RGB Bayer filters. During the imaging process of a polarization camera, a pixel intensity typically varies sinusoidally with the angle of the polarizer [43]. In this work, we assume that the light reflected off human clothes is dominated by polarized diffuse reflection and unpolarized diffuse reflection. For a specific polarizer angle $\phi_{\mathrm{pol}}$ , the illumination intensity at a pixel with dominant diffuse reflection is
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
\mathrm {I} \left(\phi_ {\text {p o l}}\right) = \frac {\mathrm {I} _ {\text {m a x}} + \mathrm {I} _ {\text {m i n}}}{2} + \frac {\mathrm {I} _ {\text {m a x}} - \mathrm {I} _ {\text {m i n}}}{2} \cos \left(2 \left(\phi_ {\text {p o l}} - \varphi\right)\right). \tag {1}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
Here $\varphi$ is the azimuth angle of surface normal, $\mathrm{I}_{\mathrm{max}}$ and $\mathrm{I}_{\mathrm{min}}$ are the upper and lower bounds of the illumination intensity. $\mathrm{I}_{\mathrm{max}}$ and $\mathrm{I}_{\mathrm{min}}$ are mainly determined by the unpolarized diffuse reflection, and the sinusoidal variation is mainly determined by the polarized diffuse reflection. Note that there is $\pi$ -ambiguity in the azimuth angle $\varphi$ in Eq. (1), which means that $\varphi$ and $\pi + \varphi$ will result in the
|
| 62 |
+
|
| 63 |
+
same illumination intensity of the pixel. As for the zenith angle $\theta$ , it is related to the degree of polarization $\rho$ , where
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\rho = \frac {\mathrm {I} _ {\max } - \mathrm {I} _ {\min }}{\mathrm {I} _ {\max } + \mathrm {I} _ {\min }}. \tag {2}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
According to [47], when diffuse reflection dominates, the degree of polarization $\rho$ is a function of the zenith angle $\theta$ and the refractive index $n$ ,
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\rho = \frac {\left(n - \frac {1}{n}\right) ^ {2} \sin^ {2} \theta}{2 + 2 n ^ {2} - \left(n + \frac {1}{n}\right) ^ {2} \sin^ {2} \theta + 4 \cos \theta \sqrt {n ^ {2} - \sin^ {2} \theta}}. \tag {3}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
In this paper, we assume the refractive index $n = 1.5$ since the material of human clothes is mainly cotton or nylon. With $n$ known, the solution of $\theta$ in Eq. (3) is a close-form expression of $n$ and $\rho$ .
|
| 76 |
+
|
| 77 |
+
Taking into account the $\pi$ -ambiguity of $\varphi$ , we have two possible solutions to the surface normal for each pixel, that form the physical priors. We propose to train a network to classify each pixel into three categories: background, ambiguous normal $\mathbf{n}_1(\varphi, \theta)$ and ambiguous normal $\mathbf{n}_2(\pi + \varphi, \theta)$ with probability $p_0$ , $p_1$ , and $p_2$ respectively. Then we have the fused normal as follows,
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mathbf {n} _ {3} = \left(1 - p _ {0}\right) \cdot \frac {p _ {1} \mathbf {n} _ {1} + p _ {2} \mathbf {n} _ {2}}{\| p _ {1} \mathbf {n} _ {1} + p _ {2} \mathbf {n} _ {2} \| _ {2}}, \tag {4}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $(1 - p_0)$ is a soft mask of the foreground human body. Unfortunately, due to the environmental noise and the digital quantization of camera in real-world applications, the fused normal map $\mathbf{n}_3$ is noisy and non-smooth. Thus taking the fused noisy normal as an improved physical prior, a denoising network is further trained to take both the polarization image and the physical priors $(\mathbf{n}_1,\mathbf{n}_2,\mathbf{n}_3)$ as input, and to produce a smoothed normal $\hat{\mathbf{n}}$ . The loss function for normal estimation consists of the cross entropy (CE) loss of classification and the L1 loss of the cosine similarity,
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
L _ {n} = \frac {1}{H W} \sum_ {i = 1} ^ {H} \sum_ {j = 1} ^ {W} \left[ \lambda_ {c} \operatorname {C E} \left(y ^ {i, j}, p ^ {i, j}\right) + \lambda_ {n} \left(1 - \langle \hat {\mathbf {n}} ^ {i, j}, \mathbf {n} ^ {i, j} \rangle\right) \right], \tag {5}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\lambda_{c}$ and $\lambda_{n}$ are the weights of each loss, $y^{i,j}$ is the label indicating which category the pixel $(i,j)$ belongs to, and $\langle \hat{\mathbf{n}}^{i,j},\mathbf{n}^{i,j}\rangle$ denotes the cosine similarity between the predicted and target normal vectors of pixel $(i,j)$ . Note that the category label $y^{i,j}$ is created by discriminating whether the pixel is background or which ambiguous normal has higher cosine similarity with the target normal. $\lambda_{c}$ and $\lambda_{n}$ is 2 and 1 respectively in our experiment.
|
| 90 |
+
|
| 91 |
+
# 3.2 Human Pose and Shape Estimation
|
| 92 |
+
|
| 93 |
+
To start with, the SMPL [22] representation is used for describing 3D human shapes, which is a differentiable function $\mathcal{M}(\pmb {\beta},\pmb {\theta})\in \mathbb{R}^{6,890\times 3}$ that outputs a
|
| 94 |
+
|
| 95 |
+
triangular mesh with 6,890 vertices given 82 parameters $[\pmb{\beta},\pmb{\theta}]$ . The shape parameter $\pmb{\beta} \in \mathbb{R}^{10}$ is the linear coefficients of a PCA shape space that mainly determines individual body features such height, weight and body proportions. The PCA shape space is learned from a large dataset of body scans [22]. $\pmb{\theta} \in \mathbb{R}^{72}$ is the pose parameter that mainly describes the articulated pose, consisting of one global rotation of the body and the relative rotations of 23 joints in axis-angle representation. Finally, our clothed body shape is produced by first applying shape-dependent and pose-dependent deformations to the template pose, then using forward-kinematics to articulate the body shape back to its current pose, and deforming the surface mesh by linear blend skinning. $\mathbf{J} \in \mathbb{R}^{24 \times 3}$ are the 3D joint positions that can be obtained by linear regression from the output mesh vertices.
|
| 96 |
+
|
| 97 |
+
In addition to the SMPL parameters, we also need to predict the global translation $\mathbf{t} \in \mathbb{R}^3$ . Thus for the task of human pose and shape estimation, the output vector is of 85-dimension, $\hat{\Theta} = [\hat{\boldsymbol{\beta}}, \hat{\boldsymbol{\theta}}, \hat{\mathbf{t}}]$ . Given $\hat{\Theta}$ , we can also obtain the predicted 3D joint positions $\hat{\mathbf{J}}$ . To this end, the loss function is defined as
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
L _ {s} = \lambda_ {\beta} \| \boldsymbol {\beta} - \hat {\boldsymbol {\beta}} \| _ {2} ^ {2} + \lambda_ {\theta} \| \boldsymbol {\theta} - \hat {\boldsymbol {\theta}} \| _ {2} ^ {2} + \lambda_ {t} \| \mathbf {t} - \hat {\mathbf {t}} \| _ {2} ^ {2} + \lambda_ {J} \| \mathbf {J} - \hat {\mathbf {J}} \| _ {2} ^ {2}, \tag {6}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $\lambda_{\beta}$ , $\lambda_{\theta}$ , $\lambda_{t}$ and $\lambda_{J}$ are weights of each component in the loss function, which are fixed to 0.2, 0.5, 100, and 3, respectively.
|
| 104 |
+
|
| 105 |
+
The reconstructed SMPL human shape thus far is naked 3D shape and lacking fine surface details. Our goal is to refine this intermediate naked shape under the guidance of our smoothed surface normal estimate. It is carried out as follows. The SMPL body shape is rendered on the image plane to form a base depth map. The technique in [61] is then engaged here to obtain an optimized depth map from the predicted surface normal and the base depth map. It is carried out under three constraints: first, the predicted normal should be perpendicular to the local tangent of the optimized depth surface; second, the optimized depth should be close to the base depth; Third, a smoothness constraint is enforced on nearby pixels of the optimized depth map. This depth map is obtained as a solution of a linear least-squares system. Weights of the normal term, the depth data term, and the smoothness term are empirically set to 1.0, 0.06, and 0.55, respectively. Finally, our clothed body shape is produced by upsampling & deforming the SMPL mesh according to the Laplacian of the optimized depth map.
|
| 106 |
+
|
| 107 |
+
# 3.3 Polarization Human Pose and Shape Dataset
|
| 108 |
+
|
| 109 |
+
To facilitate empirical evaluation of our approach in real-world scenarios, a home-grown dataset is curated, referred as Polarization Human Shape and Pose Dataset, or PHSPD. A complete description of this PHSPD dataset is provided in [62]. In data Requisition stage, a system of four soft-synchronized cameras are engaged, consisting of a polarization camera and three Kinects V2, with each Kinect v2 having a depth and a color cameras. 12 subjects are recruited in data collection, where 9 are male and 3 are female. Each subject performs 3 different
|
| 110 |
+
|
| 111 |
+
groups of actions (out of 18 different action types) 4 times, plus an addition period of free-form motion at the end of the session. Thus for each subject, there are 13 short videos (of around 1,800 frames per video with 10-15 FPS); the total number of frames for each subject amounts to $22\mathrm{K}$ . Overall, our dataset consists of $287\mathrm{K}$ frames, each frame here contains a synchronized set of images - one polarization image, three color and three depth images.
|
| 112 |
+
|
| 113 |
+
The SMPL shape parameters and the 3D joint positions of a body shape are obtained from the image collection of current frame as follows. For each frame, its initial 3D pose estimation is obtained by integrating the Kinect readouts as well as the corresponding 2D joint estimation from OpenPose [63] across the depth and color sensors. Then the body shape, i.e. parameters of the SMPL model, is estimated as optimal fit to the initial pose estimate [26]. The 3D point cloud of body surface acquired from three depth cameras are now utilized in our final step, resulting in the estimation of refined body shape with clothing details [64], by iteratively minimizing the distance of SMPL shape vertex to its nearest point of the 3D point cloud. Exemplar clothed human shapes are shown in Fig. 2.
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Fig. 2. Exemplar 3D poses and SMPL shapes in the real-world PHSPD dataset. We render the SMPL shape on four images (one polarization image and three-view color images) and we also show the pose in 3D space.
|
| 117 |
+
|
| 118 |
+
# 4 Empirical Evaluations
|
| 119 |
+
|
| 120 |
+
Empirical evaluations are carried out on two major aspects. (1) For normal estimation, we report the mean angle error (MAE), which measures the angle between the target and estimated normal map, $e_{\mathrm{angle}} = \arccos (\langle \mathbf{n}^{i,j},\hat{\mathbf{n}}^{i,j}\rangle)$ for
|
| 121 |
+
|
| 122 |
+
pixel $(i,j)$ , where $\langle \cdot, \cdot \rangle$ denotes cosine similarity. (2) For human pose and shape estimation, we report the mean per joint position error (MPJPE) and the 3D surface distance error. MPJPE is defined as the average distance between predicted and annotated joints of the test samples. In both SURREAL and PHSPD datasets, there are 24 annotated 3D joints. We also report the MPJPE for 20 joints by removing the hand and foot joints. The 3D surface error measures the distance between the predicted mesh and the ground truth mesh, by averaged distance of vertex pairs, as follows: for each vertex of the human body mesh, its closest vertex in ground truth mesh is identified to form its vertex pair; the average distance of all such vertex pairs is then computed.
|
| 123 |
+
|
| 124 |
+
For the real-world PHSPD dataset, subject 4 is chosen to form the validation set (23,786 samples); the test set contains those of subjects 7, 11, and 12 (69,283 samples); the train set has everything else (186,746 samples).
|
| 125 |
+
|
| 126 |
+
We also demonstrate the effectiveness of our SfP approach on SURREAL [29], a synthetic dataset of color images rendered from motion-captured human body shapes. Polarization images can be synthesized using color and depth images (details are in supplementary material). We choose subset "run1" and select one frame with a gap of ten frames. Finally, the train set has 245,759 samples, validation set has 14,528 samples and test set has 52,628 samples.
|
| 127 |
+
|
| 128 |
+
# 4.1 Evaluation of Surface Normal Estimation
|
| 129 |
+
|
| 130 |
+
In this task, our approach is compared with a recent work Physics [44], a traditional method Linear [65], and three ablation variants of our method as baselines. Ours (color image) uses only color image for estimating the normal map. Ours (no physical priors) does not incorporate the ambiguous normal maps as the physical priors and employs the polarization image as the only input. Ours (no fused normal) is similar to Physics [44], in which we use the two ambiguous normal maps as the only priors, discarding the fused normal maps.
|
| 131 |
+
|
| 132 |
+
<table><tr><td></td><td>SURREAL</td><td>PHSPD</td></tr><tr><td>Linear [65]</td><td>20.03</td><td>34.97</td></tr><tr><td>Physics [44]</td><td>7.45</td><td>21.45</td></tr><tr><td>ours (color image)</td><td>19.49</td><td>25.02</td></tr><tr><td>ours (no physical priors)</td><td>13.89</td><td>24.71</td></tr><tr><td>ours (no fused normal)</td><td>7.43</td><td>21.65</td></tr><tr><td>ours</td><td>7.10</td><td>20.75</td></tr></table>
|
| 133 |
+
|
| 134 |
+
Table 1. Comparison of surface normal estimation evaluated in MAE. The competing methods include Linear [65], Physics [44], ours, and three ablation variants of our method.
|
| 135 |
+
|
| 136 |
+
Through both the quantitative results of Table 1 and the visual results of Fig. 3, it is observed that our method has consistently outperforms the state-
|
| 137 |
+
|
| 138 |
+
of-the-art surface normal prediction methods [44, 65] in both SURREAL and PHSPD datasets. The poor performance of [65] may be attributed to its unrealistic assumption of noise-free environment in the captured images. Let us look at the three ablation baselines of our approach: using only color images delivers relatively similar performance to that of removing physical priors when compared in PHSPD. Intuitively, it is challenging for neural networks to encode information of ambiguous normal maps (physical priors) directly from raw polarization images. Therefore, removing the physical priors results in similar performance to that of using only color images. [44] and ours (no fused normal) both utilize ambiguous normal as a physical prior, thus produce similar results. By incorporating the fused normal which discriminates the ambiguity of azimuth angle estimation, the results of our full-fledged approach surpasses those of [44].
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Fig.3. Exemplar results of normal map prediction by five competing methods: [65, 44], ours (no physical priors), ours (color image), and ours. Original color images and polarization images are shown in the first and third column with pixelated faces.
|
| 142 |
+
|
| 143 |
+
# 4.2 Evaluation of Pose and Shape Estimation
|
| 144 |
+
|
| 145 |
+
The focus of this section is qualitative and quantitative evaluations on estimating poses & SMPL shapes, as well as our final estimation of clothed human shapes.
|
| 146 |
+
|
| 147 |
+
In pose estimation, it is of interest to inspect the effect of engaging surface normal maps in our SfP approach. Besides our SfP approach, the competing methods consist of HMR [28] and a ablation variant of SfP, ours (w/o normal). The latter is obtained by engaging only the polarization image, without considering normal map estimation. Since HMR is trained on single color images, it is re-trained using the first three channels of a polarization image. In addition to HMR that works on color images, for fair comparison, HMR is also re-trained on the polarization images of our PHSPD dataset, as HMR (polarization). From Table 2, it is observed that our method produces the lowest MPJPE values of all competing methods; the results of ours (w/o normal) is comparable to those of HMR (polarization). The quantitative results confirm that the polarization images is capable of producing accurate estimation of human poses. Moreover, the visual results in Fig. 4 provide qualitative evidence that further performance gain is to be expected, when we have access to the normal maps. Similar observation is again obtained in Table 3, when quantitative examination is systematically carried out over w/ and w/o estimated normal map, on color and polarization images, in both datasets. Note the performance gain is particularly significant for polarization images, which may attribute to the rich geometric information encoded in the normal map representation. On color images, there is still noticeable improvement, also less significant. Our explanation is that the normal maps estimated from color images are not as reliable as those obtained from the polarization image counterparts.
|
| 148 |
+
|
| 149 |
+
<table><tr><td></td><td>SURREAL</td><td colspan="2">PHSPD</td></tr><tr><td></td><td>GT-t</td><td>GT-t</td><td>Pred-t</td></tr><tr><td>HMR [28]</td><td>116.68/136.32</td><td>82.96/91.46</td><td>-</td></tr><tr><td>HMR (polarization)</td><td>-</td><td>77.57/88.74</td><td>97.24/106.20</td></tr><tr><td>ours (w/o normal)</td><td>83.43/94.00</td><td>84.44/96.42</td><td>93.38/104.48</td></tr><tr><td>ours</td><td>67.25/75.94</td><td>66.32/74.46</td><td>74.58/81.85</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 2. Quantitative evaluations using MPJPE evaluation metric on both SURREAL and PHSPD datasets. The unit of the error is millimeter. GT-t means the camera translation is known and Pred-t means the predicted camera translation is used to compute the joint error. We report the MPJPE results of 20/24 joints, which removes two hand and two foot joints following similar settings of previous work [28, 66].
|
| 152 |
+
|
| 153 |
+
To evaluate the effectiveness of our approach on clothed human shape recovery, the state-of-the-art methods on human surface reconstruction from single color images are recruited. They are $PIFu$ [40], $Depth$ Human [41] and $HMD$ [42]. Quantitative results are obtained in the PHSPD dataset by computing the 3D
|
| 154 |
+
|
| 155 |
+
<table><tr><td></td><td colspan="2">SURREAL</td><td colspan="2">PHSPD</td></tr><tr><td></td><td>ours (w/o normal)</td><td>ours</td><td>ours (w/o normal)</td><td>ours</td></tr><tr><td rowspan="2">polarization image improvement</td><td>83.43/94.00</td><td>67.25/75.94</td><td>84.44/96.42</td><td>66.32/74.46</td></tr><tr><td colspan="2">16.18/18.06</td><td colspan="2">18.12/21.96</td></tr><tr><td rowspan="2">color image improvement</td><td>88.53/100.32</td><td>80.70/91.51</td><td>85.67/80.34</td><td>77.72/70.07</td></tr><tr><td colspan="2">7.82/8.81</td><td colspan="2">7.95/10.27</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 3. Qualitative ablation study of our SfP approach (w/ vs. w/o the estimated surface normal). MPJPE is the evaluation metric with millimeter unit. Experiments are carried out on both color and polarization images of SURREAL and PHSPD datasets.
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Fig. 4. Exemplar shape estimation results. The first column is polarization images. HMR (polarization) means the HMR model is retrained on polarization images of our PHSPD dataset. Ours (w/o normal) means the model is trained without the normal map as a part of the input.
|
| 161 |
+
|
| 162 |
+
surface error of the predicted human mesh with respect to the ground-truth mesh. Scaled rigid ICP is performed before the evaluation so as to scale and transform the predicted mesh into the same coordinates as the ground-truth surface. The results are displayed in Table 4. $PIF_u$ [40] performs the worst, partly as it does not take human pose into consideration when predicting the implicit surface function inside a volume. The 3D surface error from HMD [42] and Depth Human [41] are relatively small; our SfP approach achieves the best performance, which is partly due to its exploitation of the estimated normal maps. Note the comparison methods of $PIF_u$ [40], $Depth$ Human [41] and HMD [42] only work with color images as input. In this experiment, for each of the polarization images used by the two SfP variants, namely our $(w / o$ deform) and ours, the closet color image captured in the multi-camera setup of PHSPD is taken as its corresponding input to the three comparison methods.
|
| 163 |
+
|
| 164 |
+
Exemplar visual results are presented in Fig. 5, where the predicted body shapes are overlaid onto the input images. It is observed that the body shapes predicted by $PIFu$ and $Depth$ Human are generally well-aligned with the input image as they are actually predicting the implicit function value or depth value for each pixel of the foreground human shape. Meanwhile, it does not necessarily indicate accurate alignment of 3D surface mesh, as is evidenced in Table 4. For
|
| 165 |
+
|
| 166 |
+
<table><tr><td></td><td>PIFu [40]</td><td>Depth Human [41]</td><td>HMD [42]</td><td>ours (w/o deform)</td><td>ours</td></tr><tr><td>3D surface error(mm)</td><td>73.13</td><td>51.02</td><td>51.71</td><td>41.10</td><td>38.92</td></tr></table>
|
| 167 |
+
|
| 168 |
+
Table 4. Quantitative evaluation of clothed human shape recovery performance methods in the PHSPD dataset.
|
| 169 |
+
|
| 170 |
+
PIFu and Depth Human, the exterior surfaces tend to be overly smooth. Besides, in Depth Human, only a partial mesh with respect to the view in the input image is produced. HMD, on the other hand, does not work well, as evidenced by the often error-prone surface details. This may be attributed to the less reliable shading representation, given the new environmental lighting and texture ambiguities existed in these color images. Our SfP approach is shown capable of producing reliable prediction of clothed body shapes, which again demonstrates the applicability of polarization imaging in shape estimation, as well as the benefit of engaging the surface normal maps in our approach.
|
| 171 |
+
|
| 172 |
+
Qualitative results presented in Fig. 6 showcase the robust test performance in novel settings. Note the polarization images are intentionally acquired from unseen human subjects at new geo-locations, so the background scenes are very different from those in the training images.
|
| 173 |
+
|
| 174 |
+
# 5 Conclusion
|
| 175 |
+
|
| 176 |
+
This paper tackles a new problem of estimating clothed human shapes from single 2D polarization images. Our work demonstrates the applicability of engaging polarization cameras as a promising alternative to the existing imaging sensors for human pose and shape estimation. Moreover, by exploiting the rich geometric details in the surface normal of the input polarization images, our SfP approach is capable of reconstructing clothed human body shapes of surface details.
|
| 177 |
+
|
| 178 |
+
# Acknowledgement
|
| 179 |
+
|
| 180 |
+
This work is supported by the NSERC Discovery Grants, and the University of Alberta-Huawei Joint Innovation Collaboration grants.
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Fig. 5. Exemplar estimation results of clothed body shapes. The first and fifth column are color images and polarization images, respectively. $PIFu$ [40], Depth Human [41] and HMD [42] are the results based on color input images. Ours (w/o deformation) and ours are the results with the polarization image as the input.
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Fig. 6. Exemplar estimation results of clothed body shapes, obtained on polarization images from novel test scenarios (new human subject and scene context).
|
| 187 |
+
|
| 188 |
+
# References
|
| 189 |
+
|
| 190 |
+
1. Park, S., Hwang, J., Kwak, N.: 3d human pose estimation using convolutional neural networks with 2d pose information. In: European Conference on Computer Vision, Springer (2016) 156-169
|
| 191 |
+
2. Li, S., Zhang, W., Chan, A.B.: Maximum-margin structured learning with deep networks for 3d human pose estimation. In: Proceedings of the IEEE International Conference on Computer Vision. (2015) 2848-2856
|
| 192 |
+
3. Tekin, B., Katircioglu, I., Salzmann, M., Lepetit, V., Fua, P.: Structured prediction of 3d human pose with deep neural networks. In: British Machine Vision Conference (BMVC). (2016)
|
| 193 |
+
4. Tome, D., Russell, C., Agapito, L.: Lifting from the deep: convolutional 3d pose estimation from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 2500-2509
|
| 194 |
+
5. Martinez, J., Hossain, R., Romero, J., Little, J.J.: A simple yet effective baseline for 3d human pose estimation. In: Proceedings of the IEEE International Conference on Computer Vision. (2017) 2640-2649
|
| 195 |
+
6. Zhao, R., Wang, Y., Martinez, A.M.: A simple, fast and highly-accurate algorithm to recover 3d shape from 2d landmarks on a single image. IEEE transactions on pattern analysis and machine intelligence 40(12) (2017) 3059-3066
|
| 196 |
+
7. Moreno-Noguer, F.: 3d human pose estimation from a single image via distance matrix regression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 2823-2832
|
| 197 |
+
8. Nie, B.X., Wei, P., Zhu, S.C.: Monocular 3d human pose estimation by predicting depth on joints. In: Proceedings of the IEEE International Conference on Computer Vision, IEEE (2017) 3467-3475
|
| 198 |
+
9. Zhou, X., Huang, Q., Sun, X., Xue, X., Wei, Y.: Towards 3d human pose estimation in the wild: a weakly-supervised approach. In: Proceedings of the IEEE International Conference on Computer Vision. (2017) 398-407
|
| 199 |
+
0. Wang, M., Chen, X., Liu, W., Qian, C., Lin, L., Ma, L.: Drpose3d: depth ranking in 3d human pose estimation. In: Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence. (2018) 978-984
|
| 200 |
+
1. Yang, W., Ouyang, W., Wang, X., Ren, J., Li, H., Wang, X.: 3d human pose estimation in the wild by adversarial learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 5255-5264
|
| 201 |
+
2. Fang, H.S., Xu, Y., Wang, W., Liu, X., Zhu, S.C.: Learning pose grammar to encode human body configuration for 3d pose estimation. In: Thirty-Second AAAI Conference on Artificial Intelligence. (2018) 6821-6828
|
| 202 |
+
3. Pavlakos, G., Zhou, X., Daniilidis, K.: Ordinal depth supervision for 3d human pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 7307-7316
|
| 203 |
+
4. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: Proceedings of the European Conference on Computer Vision (ECCV). (2018) 529-545
|
| 204 |
+
5. Liu, J., Ding, H., Shahroudy, A., Duan, L.Y., Jiang, X., Wang, G., Chichung, A.K.: Feature boosting network for 3d pose estimation. IEEE transactions on pattern analysis and machine intelligence 42(2) (2020) 494-501
|
| 205 |
+
6. Sharma, S., Varigonda, P.T., Bindal, P., Sharma, A., Jain, A., Bangalore, S.B.: Monocular 3d human pose estimation by generation and ordinal ranking. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 2325-2334
|
| 206 |
+
|
| 207 |
+
17. Habibie, I., Xu, W., Mehta, D., Pons-Moll, G., Theobalt, C.: In the wild human pose estimation using explicit 2d features and intermediate 3d representations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 10905-10914
|
| 208 |
+
18. Wandt, B., Rosenhahn, B.: Repnet: weakly supervised training of an adversarial reprojection network for 3d human pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 7782-7791
|
| 209 |
+
19. Li, C., Lee, G.H.: Generating multiple hypotheses for 3d human pose estimation with mixture density network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 9887-9895
|
| 210 |
+
20. Wang, K., Lin, L., Jiang, C., Qian, C., Wei, P.: 3d human pose machines with self-supervised learning. IEEE transactions on pattern analysis and machine intelligence (2019)
|
| 211 |
+
21. Anguelov, D., Srinivasan, P., Koller, D., Thrun, S., Rodgers, J., Davis, J.: Scape: shape completion and animation of people. In: ACM transactions on graphics (TOG). Volume 24., ACM (2005) 408-416
|
| 212 |
+
22. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Smpl: A skinned multi-person linear model. ACM transactions on graphics (TOG) 34(6) (2015) 248
|
| 213 |
+
23. Balan, A.O., Sigal, L., Black, M.J., Davis, J.E., Haussecker, H.W.: Detailed human shape and pose from images. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, IEEE (2007) 1-8
|
| 214 |
+
24. Dibra, E., Jain, H., Oztireli, C., Ziegler, R., Gross, M.: Human shape from silhouettes using generative hks descriptors and cross-modal neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 4826-4836
|
| 215 |
+
25. Dibra, E., Jain, H., Öztireli, C., Ziegler, R., Gross, M.: Hs-nets: Estimating human body shape from silhouettes with convolutional neural networks. In: Fourth International Conference on 3D Vision (3DV), IEEE (2016) 108-117
|
| 216 |
+
26. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: European Conference on Computer Vision, Springer (2016) 561-578
|
| 217 |
+
27. Lassner, C., Romero, J., Kiefel, M., Bogo, F., Black, M.J., Gehler, P.V.: Unite the people: Closing the loop between 3d and 2d human representations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 6050-6059
|
| 218 |
+
28. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 7122-7131
|
| 219 |
+
29. Varol, G., Romero, J., Martin, X., Mahmood, N., Black, M.J., Laptev, I., Schmid, C.: Learning from synthetic humans. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 109-117
|
| 220 |
+
30. Pavlakos, G., Zhu, L., Zhou, X., Daniilidis, K.: Learning to estimate 3d human pose and shape from a single color image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 459-468
|
| 221 |
+
31. Omran, M., Lassner, C., Pons-Moll, G., Gehler, P., Schiele, B.: Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In: International Conference on 3D Vision (3DV), IEEE (2018) 484-494
|
| 222 |
+
32. Xu, Y., Zhu, S.C., Tung, T.: Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 7760-7770
|
| 223 |
+
|
| 224 |
+
33. Kolotouros, N., Pavlakos, G., Black, M.J., Daniilidis, K.: Learning to reconstruct 3d human pose and shape via model-fitting in the loop. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 2252-2261
|
| 225 |
+
34. Zanfir, A., Marinoiu, E., Sminchisescu, C.: Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 2148-2157
|
| 226 |
+
35. Sun, Y., Ye, Y., Liu, W., Gao, W., Fu, Y., Mei, T.: Human mesh recovery from monocular images via a skeleton-disentangled representation. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 5349-5358
|
| 227 |
+
36. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 5614-5623
|
| 228 |
+
37. Arnab, A., Doersch, C., Zisserman, A.: Exploiting temporal context for 3d human pose estimation in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 3395-3404
|
| 229 |
+
38. Varol, G., Ceylan, D., Russell, B., Yang, J., Yumer, E., Laptev, I., Schmid, C.: Bodynet: volumetric inference of 3d human body shapes. In: Proceedings of the European Conference on Computer Vision (ECCV). (2018) 20-36
|
| 230 |
+
39. Zheng, Z., Yu, T., Wei, Y., Dai, Q., Liu, Y.: Deephuman: 3d human reconstruction from a single image. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 7739-7749
|
| 231 |
+
40. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: pixel-aligned implicit function for high-resolution clothed human digitization. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 2304-2314
|
| 232 |
+
41. Tang, S., Tan, F., Cheng, K., Li, Z., Zhu, S., Tan, P.: A neural network for detailed human depth estimation from a single image. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 7750-7759
|
| 233 |
+
42. Zhu, H., Zuo, X., Wang, S., Cao, X., Yang, R.: Detailed human shape estimation from a single image by hierarchical mesh deformation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2019) 4491-4500
|
| 234 |
+
43. Yang, L., Tan, F., Li, A., Cui, Z., Furukawa, Y., Tan, P.: Polarimetric dense monocular slam. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2018) 3857-3866
|
| 235 |
+
44. Ba, Y., Chen, R., Wang, Y., Yan, L., Shi, B., Kadambi, A.: Physics-based neural networks for shape from polarization. arXiv preprint arXiv:1903.10210 (2019)
|
| 236 |
+
45. Wehner, R., Müller, M.: The significance of direct sunlight and polarized skylight in the ant's celestial system of navigation. Proceedings of the National Academy of Sciences 103(33) (2006) 12575-12579
|
| 237 |
+
46. Daly, I.M., How, M.J., Partridge, J.C., Temple, S.E., Marshall, N.J., Cronin, T.W., Roberts, N.W.: Dynamic polarization vision in mantis shrimps. Nature communications 7 (2016) 12140
|
| 238 |
+
47. Atkinson, G.A., Hancock, E.R.: Recovery of surface orientation from diffuse polarization. IEEE transactions on image processing 15(6) (2006) 1653-1664
|
| 239 |
+
48. Kadambi, A., Taamazyan, V., Shi, B., Raskar, R.: Depth sensing using geometrically constrained polarization normals. International Journal of Computer Vision 125(1-3) (2017) 34-51
|
| 240 |
+
49. Chen, L., Zheng, Y., Subpa-Asa, A., Sato, I.: Polarimetric three-view geometry. In: Proceedings of the European Conference on Computer Vision (ECCV). (2018) 20-36
|
| 241 |
+
|
| 242 |
+
50. Cui, Z., Gu, J., Shi, B., Tan, P., Kautz, J.: Polarimetric multi-view stereo. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 1558-1567
|
| 243 |
+
51. Zhou, X., Zhu, M., Leonardos, S., Derpanis, K.G., Daniilidis, K.: Sparseness meets deepness: 3d human pose estimation from monocular video. In: Proceedings of the IEEE conference on computer vision and pattern recognition. (2016) 4966-4975
|
| 244 |
+
52. Akhter, I., Black, M.J.: Pose-conditioned joint angle limits for 3d human pose reconstruction. In: Proceedings of the IEEE conference on computer vision and pattern recognition. (2015) 1446-1455
|
| 245 |
+
53. Wang, C., Wang, Y., Lin, Z., Yuille, A.L., Gao, W.: Robust estimation of 3d human poses from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2014) 2361-2368
|
| 246 |
+
54. Ramakrishna, V., Kanade, T., Sheikh, Y.: Reconstructing 3d human pose from 2d image landmarks. In: European Conference on Computer Vision, Springer (2012) 573-586
|
| 247 |
+
55. Zhou, X., Zhu, M., Pavlakos, G., Leonardos, S., Derpanis, K.G., Daniilidis, K.: Monocap: Monocular human motion capture using a cnn coupled with a geometric prior. IEEE transactions on pattern analysis and machine intelligence 41(4) (2019) 901-914
|
| 248 |
+
56. Zhou, X., Zhu, M., Leonardos, S., Daniilidis, K.: Sparse representation for 3d shape estimation: A convex relaxation approach. IEEE transactions on pattern analysis and machine intelligence 39(8) (2016) 1648-1661
|
| 249 |
+
57. Chen, C.H., Ramanan, D.: 3d human pose estimation = 2d pose estimation + matching. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. (2017) 7035-7043
|
| 250 |
+
58. Ci, H., Wang, C., Ma, X., Wang, Y.: Optimizing network structure for 3d human pose estimation. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 2262-2271
|
| 251 |
+
59. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 2272-2281
|
| 252 |
+
60. Jiang, H., Cai, J., Zheng, J.: Skeleton-aware 3d human shape reconstruction from point clouds. In: Proceedings of the IEEE International Conference on Computer Vision. (2019) 5431-5441
|
| 253 |
+
61. Nehab, D., Rusinkiewicz, S., Davis, J., Ramamoorthi, R.: Efficiently combining positions and normals for precise 3d geometry. ACM transactions on graphics (TOG) 24(3) (2005) 536-543
|
| 254 |
+
62. Zou, S., Zuo, X., Qian, Y., Wang, S., Xu, C., Gong, M., Cheng, L.: Polarization human shape and pose dataset. arXiv preprint arXiv:2004.14899 (2020)
|
| 255 |
+
63. Cao, Z., Martinez, G.H., Simon, T., Wei, S., Sheikh, Y.A.: Openpose: Realtime multi-person 2d pose estimation using part affinity fields. IEEE Transactions on Pattern Analysis and Machine Intelligence (2019)
|
| 256 |
+
64. Zuo, X., Wang, S., Zheng, J., Yu, W., Gong, M., Yang, R., Cheng, L.: Sparsefusion: Dynamic human avatar modeling from sparse rgb images. IEEE Transactions on Multimedia (2020)
|
| 257 |
+
65. Smith, W.A., Ramamoorthi, R., Tozza, S.: Linear depth estimation from an uncalibrated, monocular polarisation image. In: European Conference on Computer Vision, Springer (2016) 109-125
|
| 258 |
+
|
| 259 |
+
66. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3.6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence 36(7) (2014) 1325-1339
|
3dhumanshapereconstructionfromapolarizationimage/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:669d0c545d4391562ab4a9383073a251ba6e32617d560366f671ba2acd9400da
|
| 3 |
+
size 513635
|
3dhumanshapereconstructionfromapolarizationimage/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af7676bf7067328ab7ef71e8fd40f77451ab29ab0cd924abbde50d31f1a8f0af
|
| 3 |
+
size 379649
|
3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:199f4bd2c89980927bb16214b354c137eae15217a96162836125fdb330be1878
|
| 3 |
+
size 83337
|
3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c9e2e4e2082b86175b43d940d9e11cc77adeb75d229255c76d508ce70fe3c7d0
|
| 3 |
+
size 100691
|
3drotationequivariantquaternionneuralnetworks/b24ef9dc-ab0a-4997-98c2-4809a97518ff_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f887ddbc7844c5dd55f4ddd353d621b725bd1a508983597a5ab79301acb0805
|
| 3 |
+
size 2037628
|
3drotationequivariantquaternionneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D-Rotation-Equivariant Quaternion Neural Networks
|
| 2 |
+
|
| 3 |
+
Wen Shen $^{2,*}$ , Binbin Zhang $^{2,*}$ , Shikun Huang $^{2,*}$ , Zhihua Wei $^{2}$ , and Quanshi Zhang $^{1,\dagger}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Shanghai Jiao Tong University, $^{2}$ Tongji University, Shanghai $\{\text{wen_shen}, 0206zbb, \text{hsk}, \text{zhihua_wei}\} @$ tongji.edu.cn, zqs1022@sjtu.edu.cn
|
| 6 |
+
|
| 7 |
+
Abstract. This paper proposes a set of rules to revise various neural networks for 3D point cloud processing to rotation-equivariant quaternion neural networks (REQNNs). We find that when a neural network uses quaternion features, the network feature naturally has the rotation-equivariance property. Rotation equivariance means that applying a specific rotation transformation to the input point cloud is equivalent to applying the same rotation transformation to all intermediate-layer quaternion features. Besides, the REQNN also ensures that the intermediate-layer features are invariant to the permutation of input points. Compared with the original neural network, the REQNN exhibits higher rotation robustness.
|
| 8 |
+
|
| 9 |
+
Keywords: Rotation Equivalence, Permutation Invariance, 3D Point Cloud Processing, Quaternion
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
3D point cloud processing has attracted increasing research attention in recent years. Unlike images with rich color information, 3D point clouds mainly use spatial contexts for feature extraction. Therefore, the rotation is not supposed to have essential impacts on 3D tasks, such as 3D shape classification and reconstruction. Besides, reordering input points should not have crucial effects on these tasks as well, which is termed the permutation-invariance property.
|
| 14 |
+
|
| 15 |
+
In this study, we focus on the problem of learning neural networks for 3D point cloud processing with rotation equivariance and permutation invariance.
|
| 16 |
+
|
| 17 |
+
- Rotation equivariance: Rotation equivariance has been discussed in recent research [6]. In this study, we define rotation equivariance for neural networks as follows. If an input point cloud is rotated by a specific angle, then the feature generated by the network is equivalent to applying the transformation w.r.t. the same rotation to the feature of the original point cloud (see Fig. 1 (left)). In this way, we can use the feature of a specific point cloud to synthesize features of the same point cloud with different orientations. Specifically, we can apply the transformation of a specific rotation to the current feature to synthesize the target feature.
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Fig. 1. Overview. In the REQNN, both input point clouds and intermediate-layer features are represented by quaternion features (blue). In this paper, the rotation equivariance is defined as follows. When we rotate the input point cloud $\mathbf{x}$ (a) with a specific angle $(e.g.60^{\circ})$ to obtain the same point cloud $\mathbf{x}'$ (b) with a different orientation, then the intermediate-layer feature (d) generated by the REQNN is equivalent to applying the transformation $w.r.t.$ the same rotation to the feature (c) of the original point cloud. I.e. the rotation equivariance is defined as $g(\mathrm{Rotate}_{\theta}(\mathbf{x})) = \mathrm{Rotate}_{\theta}(g(\mathbf{x}))$ . REQNNs exhibit significantly higher rotation robustness than traditional neural networks.
|
| 21 |
+
|
| 22 |
+
- Permutation invariance: Permutation invariance measures whether intermediate-layer features essentially keep unchanged when we reorder input 3D points. Fortunately, we find that quaternion features in a neural network naturally satisfy the rotation-equivariance property under certain conditions (details will be introduced later). Therefore, we propose a set of rules to revise most existing neural networks to rotation-equivariant quaternion neural networks (REQNNs). Given a specific neural network for 3D point cloud processing (e.g. PointNet [21], PointNet++ [22], DGCNN [33], PointConv [37], etc., which are learned for various tasks, such as 3D shape classification and reconstruction), our rules can help revise the network to a REQNN with both properties of rotation equivariance and permutation invariance.
|
| 23 |
+
|
| 24 |
+
To revise a neural network to a REQNN with rotation equivariance, we transform both the input and intermediate-layer features of the original neural network into quaternion features (i.e. vectors/matrices/tensors, in which each element is a quaternion). A quaternion is a hyper-complex number with three imaginary parts $(\pmb{i},\pmb{j}$ ,and $\pmb{k})$ [11]. 3D rotations can be represented using quaternions. I.e. rotating a quaternion $\mathbf{q}\in \mathbb{H}$ with an angle $\theta \in [0,2\pi)$ around an axis $\mathbf{o} = 0 + o_1\pmb {i} + o_2\pmb {j} + o_3\pmb {k}\in \mathbb{H}$ $(o_{1},o_{2},o_{3}\in \mathbb{R})$ can be represented as $\mathbf{Rq}\overline{\mathbf{R}}$ , where $\mathbf{R} = e^{\mathbf{o}\frac{\theta}{2}}\in \mathbb{H};\overline{\mathbf{R}} = e^{-\mathbf{o}\frac{\theta}{2}}\in \mathbb{H}$ is the conjugation of $\mathbf{R}$
|
| 25 |
+
|
| 26 |
+
In this way, the rotation equivariance of a REQNN is defined as follows. When we apply a specific rotation to the input $\mathbf{x} \in \mathbb{H}^n$ , i.e. $\mathbf{x}' = \mathbf{R} \circ \mathbf{x} \circ \overline{\mathbf{R}}$ , the network will generate an intermediate-layer quaternion feature $g(\mathbf{x}') \in \mathbb{H}^d$ , where $\circ$ denotes the element-wise multiplication. The rotation equivariance ensures that $g(\mathbf{x}') = \mathbf{R} \circ g(\mathbf{x}) \circ \overline{\mathbf{R}}$ . Note that the input and the feature here can be vectors/matrices/tensors, in which each element is a quaternion.
|
| 27 |
+
|
| 28 |
+
Therefore, we revise a number of layerwise operations in the original neural network to make them rotation-equivariant, such as operations of the convolution, ReLU, batch-normalization, max-pooling, 3D coordinates weighting [37], etc., in order to ensure the rotation-equivariance property.
|
| 29 |
+
|
| 30 |
+
Note that most tasks, such as the shape classification, require outputs composed of real numbers. However, the REQNN's features consist of quaternions. Therefore, for real applications, we revise quaternion features of a specific high layer of the REQNN into ordinary features, in which each element is a real number. We transform quaternion features into real numbers by using the square of the norm of each quaternion element in the feature to replace the corresponding feature element. We put the revised real-valued features into the last few layers to generate real-valued outputs, as Fig. 1 (right) shows. Such revision ensures that the last few layerwise operations are rotation invariant. We will introduce this revision in Section 3.5.
|
| 31 |
+
|
| 32 |
+
Besides the rotation-equivariance property, the REQNN is also supposed to have the permutation-invariance property as follows. When we reorder 3D points in the input point cloud $\mathbf{x}$ to obtain the same point cloud $\mathbf{x}^{\mathrm{reorder}}$ with a different order, the network will generate the same feature, i.e. $g(\mathbf{x}^{\mathrm{reorder}}) = g(\mathbf{x})$ . Therefore, we revise a few operations in the original neural network to be permutation invariant, e.g. the farthest point sampling [22] and the ball-query-search-based grouping [22], to ensure the permutation-invariance property of the REQNN.
|
| 33 |
+
|
| 34 |
+
In this study, we do not limit our attention to a specific architecture. Our method can be applied to various neural networks for different tasks. Experimental results proved that REQNNs exhibited superior performance than the original networks in terms of rotation robustness.
|
| 35 |
+
|
| 36 |
+
Contributions of our study are summarized as follows. We propose a set of generic rules to revise various neural networks to REQNNs with both properties of rotation equivariance and permutation invariance. The proposed rules can be broadly applied to different neural networks for different tasks, such as 3D shape classification and point cloud reconstruction. Experiments have demonstrated the effectiveness of our method that REQNNs exhibit higher rotation robustness than traditional neural networks.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
Deep learning for 3D point cloud processing: Recently, a series of studies have focused on deep neural networks (DNNs) for 3D point cloud processing and have achieved superior performance in various 3D tasks [21, 28, 27, 41, 32, 25]. As a pioneer of using DNNs for 3D point cloud processing, PointNet [21] aggregated all individual point features into a global feature using a max-pooling operation. In order to further extract contextual information of 3D point clouds, existing studies have made lots of efforts. PointNet++ [22] hierarchically used PointNet as a local descriptor. KC-Net [24] proposed kernel correlation to measure the similarity between two point sets, so as to represent local geometric structures around each point. PointSIFT [14] proposed a SIFT-like operation to encode
|
| 41 |
+
|
| 42 |
+
contextual information of different orientations for each point. Point2Sequence [18] employed an RNN-based encoder-decoder structure to capture correlations between different areas in a local region by aggregating multi-scale areas of each local region with attention.
|
| 43 |
+
|
| 44 |
+
Unlike images, 3D point clouds cannot be processed by traditional convolution operators. To address this problem, Kd-network [16] built a kd-tree on subdivisions of the point cloud, and used such kd-tree structure to mimics the convolution operator to extract and aggregate features according to the subdivisions. PointCNN [17] proposed an $\mathcal{X}$ -Conv operator to aggregate features from neighborhoods into fewer representative points. Pointwise CNN [12] binned nearest neighbors into kernel cells of each point and convolved them with kernel weights. PointConv [37] treated convolution kernels as nonlinear functions that were learned from local coordinates of 3D points and their densities, respectively. Besides, some studies introduced graph convolutional neural networks for the extraction of geodesic information [27, 33]. Some studies focused on the use of spatial relations between neighboring points [19, 44]. In this study, we aim to learn DNNs with properties of rotation equivariance and permutation invariance.
|
| 45 |
+
|
| 46 |
+
3D rotation robustness: The most widely used method to improve the rotation robustness was data augmentation [31]. However, data augmentation significantly boosted the computational cost. Spatial Transformer Networks (STNs) [13] allowed spatial manipulations of data and features within the network, which improved the rotation robustness.
|
| 47 |
+
|
| 48 |
+
Some studies went beyond rotation robustness and focused on rotation invariance. The rotation-invariance property means that the output always keeps unchanged when we rotate the input. One intuitive way to achieve rotation invariance was to project 3D points onto a sphere [40, 23, 42] and constructed spherical CNNs [5] to extract rotation-invariant features. Other studies learned rotation-invariant representations that discarded orientation information of input point clouds [4, 8, 43].
|
| 49 |
+
|
| 50 |
+
However, such rotation-invariant methods directly discarded rotation information, so the rotation equivariance is proposed as a more promising property of feature representations. Rotation-equivariant methods both encode rotation information and disentangle rotation-independent information from the point cloud. To the best of our knowledge, there were very few studies in this direction. Previous studies developed specific network architectures [45] or designed specific operations [29] to achieve rotation equivariance. In comparison, we aim to propose a set of generic rules to revise most existing neural networks to achieve the rotation-equivariance property. Unlike [45, 29], our method can be applied to various neural networks for different tasks.
|
| 51 |
+
|
| 52 |
+
Complex and quaternion networks: Recently, besides neural networks using real-valued features, neural networks using complex-valued features or quaternion-valued [11] features have been developed [2, 35, 7, 36, 10, 30, 39, 9, 15, 46, 20]. In this study, we use quaternions to represent intermediate-layer features and 3D rotations to achieve 3D rotation-equivariance property.
|
| 53 |
+
|
| 54 |
+
# 3 Approach
|
| 55 |
+
|
| 56 |
+
# 3.1 Quaternion Features in Neural Networks and Rotations
|
| 57 |
+
|
| 58 |
+
Quaternion: A quaternion [11] $\mathbf{q} = q_0 + q_1\mathbf{i} + q_2\mathbf{j} + q_3\mathbf{k} \in \mathbb{H}$ is a hyper-complex number with a real part $(q_0)$ and three imaginary parts $(q_1\mathbf{i}, q_2\mathbf{j}, q_3\mathbf{k})$ , where $q_0, q_1, q_2, q_3 \in \mathbb{R}$ ; $\mathbb{H}$ denotes the algebra of quaternions. If the real part of $\mathbf{q}$ is 0, then $\mathbf{q}$ is a pure quaternion. If the norm of a quaternion $\| \mathbf{q} \| = \sqrt{q_0^2 + q_1^2 + q_2^2 + q_3^2} = 1$ , then $\mathbf{q}$ is a unit quaternion. The conjugation of $\mathbf{q}$ is $\overline{\mathbf{q}} = q_0 - q_1\mathbf{i} - q_2\mathbf{j} - q_3\mathbf{k}$ .
|
| 59 |
+
|
| 60 |
+
The products of basis elements $i, j$ , and $k$ are defined by $i^2 = j^2 = k^2 = ijk = -1$ and $ij = k, jk = i, ki = j, ji = -k, kj = -i$ , and $ik = -j$ . Note that the multiplication of two quaternions is non-commutative, i.e. $ij \neq ji, jk \neq kj$ , and $ki \neq ik$ .
|
| 61 |
+
|
| 62 |
+
Each quaternion has a polar decomposition. In this study, we only focus on the polar decomposition of a unit quaternion in the form of $\mathbf{q} = \cos \frac{\theta}{2} + \sin \frac{\theta}{2}(o_1\mathbf{i} + o_2\mathbf{j} + o_3\mathbf{k})$ , $\sqrt{o_1^2 + o_2^2 + o_3^2} = 1$ . The polar decomposition of such a unit quaternion is $\mathbf{q} = e^{\mathbf{o}\frac{\theta}{2}}$ , where $\mathbf{o} = o_1\mathbf{i} + o_2\mathbf{j} + o_3\mathbf{k}$ . As aforementioned, multiplication of two quaternions is non-commutative, therefore, $e^{\mathbf{o}\frac{\theta}{2}}\mathbf{p}e^{-\mathbf{o}\frac{\theta}{2}} \neq \mathbf{p}$ .
|
| 63 |
+
|
| 64 |
+
For a traditional neural network, inputs, features, and parameters are vectors/matrices/tensors, in which each element is a real number. However, in a REQNN, inputs and features are vectors/matrices/tensors composed of quaternions; parameters are still vectors/matrices/tensors composed of real numbers.
|
| 65 |
+
|
| 66 |
+
Quaternion inputs and features: In a REQNN, each $u$ -th point $([x_u,y_u,z_u]^\top \in \mathbb{R}^3)$ in a 3D point cloud is represented as a pure quaternion $\mathbf{x}_u = 0 + x_u\pmb{i} + y_u\pmb{j} + z_u\pmb{k}$ . Each $v$ -th element of the intermediate-layer feature is also a pure quaternion $\mathbf{f}_v = 0 + a_v\pmb{i} + b_v\pmb{j} + c_v\pmb{k}$ , where $a_v,b_v,c_v \in \mathbb{R}$ .
|
| 67 |
+
|
| 68 |
+
Quaternion rotations: Each element of a feature, $\mathbf{f}_v = 0 + a_v\pmb{i} + b_v\pmb{j} + c_v\pmb{k}$ , can be considered to have an orientation, i.e. $[a_v,b_v,c_v]^\top$ . In this way, 3D rotations can be represented using quaternions. Suppose we rotate $\mathbf{f}_v$ around an axis $\mathbf{o} = 0 + o_1\pmb{i} + o_2\pmb{j} + o_3\pmb{k}$ (where $o_1,o_2,o_3\in \mathbb{R}$ , $\| \mathbf{o}\| = 1$ ) with an angle $\theta \in [0,2\pi)$ to get $\mathbf{f}_v'$ . Such a rotation can be represented using a unit quaternion $\mathbf{R} = \cos \frac{\theta}{2} + \sin \frac{\theta}{2}(o_1\pmb{i} + o_2\pmb{j} + o_3\pmb{k}) = e^{\mathbf{o}\frac{\theta}{2}}$ and its conjugation $\overline{\mathbf{R}}$ , as follows.
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mathbf {f} _ {v} ^ {\prime} = \mathbf {R} \mathbf {f} _ {v} \overline {{\mathbf {R}}}. \tag {1}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
Note that $\mathbf{f}_v' = \mathbf{R}\mathbf{f}_v\overline{\mathbf{R}} \neq \mathbf{f}_v$ . The advantage of using quaternions to represent rotations is that quaternions do not suffer from the singularity problem, but the Euler Angle [34] and the Rodrigues parameters [26] do. Besides, although the redundancy ratio of quaternions is two, the redundancy does not affect the rotation equivariance property of the REQNN.
|
| 75 |
+
|
| 76 |
+
To ensure that all quaternion features are rotation equivariant, all imaginary parts (i.e. $\pmb{i}$ , $\pmb{j}$ , and $\pmb{k}$ ) of a quaternion element share the same real-valued
|
| 77 |
+
|
| 78 |
+
parameter $w$ . Take the convolution operation $\otimes$ as an example, $w \otimes \mathbf{f} = w \otimes (0 + a\mathbf{i} + b\mathbf{j} + c\mathbf{k}) = 0 + (w \otimes a)\mathbf{i} + (w \otimes b)\mathbf{j} + (w \otimes c)\mathbf{k}$ , where $w$ is the real-valued parameter; $\mathbf{f}$ is the quaternion feature; $a$ , $b$ , and $c$ are real-valued tensors of the same size for the convolution operation in this example.
|
| 79 |
+
|
| 80 |
+
# 3.2 Rotation Equivariance
|
| 81 |
+
|
| 82 |
+
In order to recursively achieve the rotation-equivariance property for a REQNN, we should ensure that each layerwise operation of the REQNN has the rotation-equivariance property. In a REQNN, the rotation equivariance is defined as follows. Let $\mathbf{x} \in \mathbb{H}^n$ and $\mathbf{y} = \boldsymbol{\Phi}(\mathbf{x}) \in \mathbb{H}^C$ denote the input and the output of the REQNN, respectively. Note that outputs for most tasks are traditional vectors/matrices/tensors, in which each element is a real number. In this way, we learn rotation-equivariant quaternion features in most layers, and then transform these features into ordinary real-valued rotation-invariant features in the last few layers, as shown in Fig. 1 (right). We will introduce details for such revision in Section 3.5.
|
| 83 |
+
|
| 84 |
+
For each rotation $\mathbf{R} = e^{\mathbf{o}\frac{\theta}{2}}$ and its conjugation $\overline{\mathbf{R}}$ , the rotation equivariance of a REQNN is defined as follows.
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\boldsymbol {\Phi} (\mathbf {x} ^ {(\theta)}) = \mathbf {R} \circ \boldsymbol {\Phi} (\mathbf {x}) \circ \overline {{\mathbf {R}}}, \quad \text {s . t .} \quad \mathbf {x} ^ {(\theta)} \triangleq \mathbf {R} \circ \mathbf {x} \circ \overline {{\mathbf {R}}}, \tag {2}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\circ$ denotes the element-wise multiplication (e.g. $\mathbf{x}^{(\theta)}\triangleq \mathbf{R}\circ \mathbf{x}\circ \overline{\mathbf{R}}$ can also be formulated as $\mathbf{x}_u^{(\theta)}\triangleq \mathbf{R}\mathbf{x}_u\overline{\mathbf{R}},u = 1,2,\dots,n)$ . As discussed in the previous paragraph, outputs for most tasks are real-valued features. Therefore, Equation (2) does not hold for all layers in the neural network. Instead, we transform features in last few layers to be real-valued rotation-invariant features.
|
| 91 |
+
|
| 92 |
+
To achieve the above rotation equivariance, we must ensure the layerwise rotation equivariance. Let $\Phi (\mathbf{x}) = \varPhi_{L}(\varPhi_{L-1}(\dots \varPhi_{1}(\mathbf{x})))$ represent the cascaded functions of multiple layers of a neural network, where $\varPhi_l(\cdot)$ denotes the function of the $l$ -th layer. Let $\mathbf{f}_l = \varPhi_l(\mathbf{f}_{l-1}) \in \mathbb{H}^d$ denote the output of the $l$ -th layer. The layerwise rotation equivariance is given as follows.
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\varPhi_ {l} \left(\mathbf {f} _ {l - 1} ^ {(\theta)}\right) = \mathbf {R} \circ \varPhi_ {l} \left(\mathbf {f} _ {l - 1}\right) \circ \overline {{\mathbf {R}}}, \quad \text {s . t .} \quad \mathbf {f} _ {l - 1} ^ {(\theta)} \triangleq \mathbf {R} \circ \mathbf {f} _ {l - 1} \circ \overline {{\mathbf {R}}}. \tag {3}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
This equation recursively ensures the rotation-equivariance property of the RE-QNN. Let us take a neural network with three layers as a toy example. $\Phi (\mathbf{x}^{\theta}) =$ $\varPhi_{3}(\varPhi_{2}(\varPhi_{1}(\mathbf{R}\circ\mathbf{x}\circ\overline{\mathbf{R}})))=\varPhi_{3}(\varPhi_{2}(\mathbf{R}\circ\varPhi_{1}(\mathbf{x})\circ\overline{\mathbf{R}}))=\varPhi_{3}(\mathbf{R}\circ\varPhi_{2}(\varPhi_{1}(\mathbf{x}))\circ\overline{\mathbf{R}})=$ $\mathbf{R}\circ \varPhi_3(\varPhi_2(\varPhi_1(\mathbf{x})))\circ \overline{\mathbf{R}} = \mathbf{R}\circ \Phi (\mathbf{x})\circ \overline{\mathbf{R}}.$ Please see supplementary materials for more discussion.
|
| 99 |
+
|
| 100 |
+
# 3.3 Rules for Rotation Equivariance
|
| 101 |
+
|
| 102 |
+
We propose a set of rules to revise layerwise operations in the original neural network to make them rotation-equivariant, i.e. satisfying Equation (3). Table 1 shows the list of layerwise operations in the original neural network with
|
| 103 |
+
|
| 104 |
+
<table><tr><td>Operation</td><td>Rotation equivariance</td><td>Permutation invariance</td><td>Operation</td><td>Rotation equivariance</td><td>Permutation invariance</td></tr><tr><td>Convolution</td><td>×</td><td>-</td><td>Grouping (k-NN)[37]</td><td>✓</td><td>✓</td></tr><tr><td>ReLU</td><td>×</td><td>-</td><td>Grouping (ball query) [22]</td><td>✓</td><td>×</td></tr><tr><td>Batch-normalization</td><td>×</td><td>-</td><td>Density estimation [37]</td><td>✓</td><td>✓</td></tr><tr><td>Max-pooling</td><td>×</td><td>-</td><td>3D coordinates weighting [37]</td><td>×</td><td>✓</td></tr><tr><td>Dropout</td><td>×</td><td>-</td><td>Graph construction [33]</td><td>✓</td><td>✓</td></tr><tr><td>Farthest point sampling[22]</td><td>✓</td><td>×</td><td></td><td></td><td></td></tr></table>
|
| 105 |
+
|
| 106 |
+
Table 1. Rotation-equivariance and permutation-invariance properties of layerwise operations in the original neural network. “×” denotes that the operation does not have the property, “√” denotes that the operation naturally has the property, and “-” denotes that the layerwise operation is naturally unrelated to the property (which will be discussed in the last paragraph of Section 3.4). Please see Section 3.3 and Section 3.4 for rules of revising layerwise operations to be rotation-equivariant and permutation-invariant, respectively.
|
| 107 |
+
|
| 108 |
+
the rotation-equivariance property, and those without the rotation-equivariance property. The rotation-equivariance property of the revised layerwise operations has been proved in supplementary materials.
|
| 109 |
+
|
| 110 |
+
Convolution: We revise the operation of the convolution layer, $Conv(\mathbf{f}) = w \otimes \mathbf{f} + b$ , to be rotation-equivariant by removing the bias term $b$ , where $w$ is the real-valued parameter and $\mathbf{f}$ is the quaternion feature.
|
| 111 |
+
|
| 112 |
+
ReLU: We revise the ReLU operation as follows to make it rotation-equivariant.
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
R e L U (\mathbf {f} _ {v}) = \frac {\| \mathbf {f} _ {v} \|}{\max \left\{\| \mathbf {f} _ {v} \| , c \right\}} \mathbf {f} _ {v}, \tag {4}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $\mathbf{f}_v\in \mathbb{H}$ denotes the $v$ -th element in the feature $\mathbf{f}\in \mathbb{H}^d$ . $c$ is a positive constant, which can be implemented as $c = \frac{1}{d}\sum_{v = 1}^{d}\| \mathbf{f}_{v}\|$
|
| 119 |
+
|
| 120 |
+
Batch-normalization: We revise the batch-normalization operation to be rotation-equivariant, as follows.
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\operatorname {n o r m} \left(\mathbf {f} _ {v} ^ {(i)}\right) = \frac {\mathbf {f} _ {v} ^ {(i)}}{\sqrt {\mathbb {E} _ {j} \left[ \| \mathbf {f} _ {v} ^ {(j)} \| ^ {2} \right] + \epsilon}}, \tag {5}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $\mathbf{f}^{(i)}\in \mathbb{H}^d$ denotes the feature of the $i$ -th sample in the batch; $\epsilon$ is a tiny positive constant to avoid dividing by 0.
|
| 127 |
+
|
| 128 |
+
Max-pooling: We revise the max-pooling operation, as follows.
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\operatorname {m a x P o o l} (\mathbf {f}) = \mathbf {f} _ {\hat {v}} \quad \text {s . t .} \quad \hat {v} = \underset {v = 1, \dots , d.} {\arg \max } [ \| \mathbf {f} _ {v} \| ]. \tag {6}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
Note that for 3D point cloud processing, a special element-wise max-pooling operation designed in [21] is widely used. The revision for this special max-pooling operation can be decomposed to a group of operations as Equation (6) defined. Please see our supplementary materials for revision details of this operation.
|
| 135 |
+
|
| 136 |
+
Dropout: For the dropout operation, we randomly drop out a number of quaternion elements from the feature. For each dropped element, both the real and imaginary parts are set to zero. Such revision naturally satisfies the rotation-equivalence property in Equation (3).
|
| 137 |
+
|
| 138 |
+
3D coordinates weighting: The 3D coordinates weighting designed in [37] focuses on the use of 3D coordinates' information to reweight intermediate-layer features. This operation is not rotation-equivariant, because the rotation changes coordinates of points. To make this operation rotation-equivariant, we use the Principal Components Analysis (PCA) to transform 3D points to a new local reference frame (LRF). Specifically, we choose eigenvectors corresponding to the first three principal components as new axes $x$ , $y$ , and $z$ of the new LRF. In this way, the coordinate system rotates together with input points, so the transformed new coordinates are not changed. Note that contrary to [45] relying on the LRF, our research only uses LRF to revise the 3D coordinates weighting operation, so as to ensure the specific neural network designed in [37] to be rotation equivariant.
|
| 139 |
+
|
| 140 |
+
The following five layerwise operations in the original neural network, which are implemented based on distances between points, are naturally rotation-equivariant, including the farthest point sampling [22], the $k$ -NN-search-based grouping [33, 37], the ball-query-search-based grouping [22], the density estimation [37], and the graph construction [33] operations.
|
| 141 |
+
|
| 142 |
+
# 3.4 Rules for Permutation Invariance
|
| 143 |
+
|
| 144 |
+
As shown in Table 1, the farthest point sampling [22], and the ball-query-search-based grouping [22] are not permutation-invariant. Therefore, we revise these two operations to be permutation-invariant as follows.
|
| 145 |
+
|
| 146 |
+
Farthest point sampling: The farthest point sampling (FPS) is an operation for selecting a subset of points from the input point cloud, in order to extract local features [22]. Suppose that we aim to select $n$ points from the input point cloud, if $i - 1$ points have already been selected, i.e. $S_{i - 1} = \{x_1,x_2,\ldots ,x_{i - 1}\}$ , then the next selected point $x_{i}$ is the farthest point from $S_{i - 1}$ . The FPS is not permutation-invariant, because the subset selected by this operation depends on which point is selected first. To revise the FPS to be permutation-invariant, we always use the centroid of a point cloud, which is a virtual point, as the first selected point. In this way, the FPS would be permutation-invariant.
|
| 147 |
+
|
| 148 |
+
Grouping (ball query): The ball-query-search-based grouping is used to find $K$ neighboring points within a radius for each given center point, in order to extract contextual information [22]. This operation is not permutation-invariant, because when there are more than $K$ points within the radius, the top $K$ points will be selected according to the order of points. To revise this operation to be permutation-invariant, we replace the ball query search by $k$ -NN search when the number of points within the radius exceeds the required number.
|
| 149 |
+
|
| 150 |
+
Other operations that implemented based on distances between points are permutation-invariant, because reordering input points has no effects on distances between points, including the $k$ -NN-search-based grouping [33, 37], the
|
| 151 |
+
|
| 152 |
+
density estimation [37], the 3D coordinates weighting [37], and the graph construction [33] operations.
|
| 153 |
+
|
| 154 |
+
Note that there is no need to discuss the permutation invariance of the convolution, the ReLU, the batch-normalization, the max-pooling, and the dropout operations. It is because the permutation invariance of these operations depends on receptive fields. I.e. if the receptive field of each neural unit keeps the same when we reorder input points, then the operation is permutation-invariant. Whereas receptive fields are determined by other operations (e.g. the FPS and grouping).
|
| 155 |
+
|
| 156 |
+
# 3.5 Overview of the REQNN
|
| 157 |
+
|
| 158 |
+
Although using quaternions to represent intermediate-layer features helps achieve the rotation-equivariance property, most existing tasks (e.g. the shape classification) require outputs of real numbers. Thus, we need to transform quaternion features into ordinary real-valued features, in which each element is a real number. Note that for the point cloud reconstruction task, features of the entire neural network are quaternions. It is because outputs required by the point cloud reconstruction task are 3D coordinates, which can be represented by quaternions.
|
| 159 |
+
|
| 160 |
+
Therefore, as Fig. 1 shows, the REQNN consists of (a) rotation-equivariant quaternion module, (b) Quaternion2Real module, and (c) task module.
|
| 161 |
+
|
| 162 |
+
Rotation-equivariant quaternion module: Except for very few layers on the top of the REQNN, other layers in the REQNN comprise the rotation-equivariant quaternion module. This module is used to extract rotation-equivariant quaternion features. We use rules proposed in Section 3.3 to revise layerwise operations in the original neural network to be rotation-equivariant, so as to obtain the rotation-equivariant quaternion module. We also use rules in Section 3.4 to revise these layerwise operations to be permutation invariant.
|
| 163 |
+
|
| 164 |
+
Quaternion2Real module: The Quaternion2Real module is located after the rotation-equivariant quaternion module. The Quaternion2Real module is used to transform quaternion features into real-valued vectors/matrices/tensors as features. Specifically, we use an element-wise operation to compute the square of the norm of each quaternion element as the real-valued feature element. I.e. for each $v$ -th element of a quaternion feature, $\mathbf{f}_v = 0 + a_v\mathbf{i} + b_v\mathbf{j} + c_v\mathbf{k}$ , we compute the square of the norm $\| \mathbf{f}_v\|^2 = a_v^2 + b_v^2 + c_v^2$ as the corresponding element of the real-valued feature. Note that the transformed features are rotation-invariant.
|
| 165 |
+
|
| 166 |
+
Task module: The task module is composed of the last few layers of the REQNN. The task module is used to obtain ordinary real-valued outputs, which are required by the task of 3D shape classification. As aforementioned, the Quaternion2Real module transforms quaternion features into real-valued vectors/matrices/tensors as features. In this way, the task module (i.e. the last few layers) in the REQNN implements various tasks just like traditional neural networks.
|
| 167 |
+
|
| 168 |
+
Complexity of the REQNN: The REQNN's parameter number is no more than that of the original neural network. The REQNN's operation number is theoretically less than three times of that of the original neural network. We
|
| 169 |
+
|
| 170 |
+
<table><tr><td rowspan="2"></td><td colspan="2">PointNet++1 [22]</td><td colspan="2">DGCNN2 [33]</td><td colspan="2">PointConv [37]</td><td colspan="2">PointNet [21]</td></tr><tr><td>FLOPs(G)</td><td>#Params(M)</td><td>FLOPs(G)</td><td>#Params(M)</td><td>FLOPs(G)</td><td>#Params(M)</td><td>FLOPs(G)</td><td>#Params(M)</td></tr><tr><td>Ori.</td><td>0.87</td><td>1.48</td><td>3.53</td><td>2.86</td><td>1.44</td><td>19.57</td><td>0.30</td><td>0.29</td></tr><tr><td>REQNN</td><td>2.51</td><td>1.47</td><td>8.24</td><td>2.86</td><td>4.22</td><td>20.61</td><td>0.88</td><td>0.28</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 2. Comparisons of the number of floating-point operations (FLOPs) and the number of parameters (#Params) of original neural networks and REQNNs. All neural networks were tested on the ModelNet40 dataset.
|
| 173 |
+
|
| 174 |
+
<table><tr><td>Layerwise operation</td><td>PointNet++ [22]</td><td>DGCNN [33]</td><td>PointConv [37]</td><td>PointNet [21]</td></tr><tr><td>Convolution</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>ReLU</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Batch-normalization</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Max-pooling</td><td>✓</td><td>✓</td><td></td><td>✓</td></tr><tr><td>Dropout</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Farthest point sampling</td><td>✓</td><td></td><td>✓</td><td></td></tr><tr><td>Grouping (k-NN)</td><td></td><td>✓</td><td>✓</td><td></td></tr><tr><td>Grouping (ball query) [22]</td><td>✓</td><td></td><td></td><td></td></tr><tr><td>Density estimation [37]</td><td></td><td></td><td>✓</td><td></td></tr><tr><td>3D coordinates weighting [37]</td><td></td><td></td><td>✓</td><td></td></tr><tr><td>Graph construction [33]</td><td></td><td>✓</td><td></td><td></td></tr></table>
|
| 175 |
+
|
| 176 |
+
Table 3. Layerwise operations of different neural networks. “√” denotes that the network contains the layerwise operation.
|
| 177 |
+
|
| 178 |
+
have compared numbers of operations and numbers of parameters of original neural networks and REQNNs in Table 2.
|
| 179 |
+
|
| 180 |
+
# 3.6 Revisions of Traditional Neural Networks into REQNNs
|
| 181 |
+
|
| 182 |
+
In this study, we revise the following four neural networks to REQNNs, including PointNet++ [22], DGCNN [33], PointConv [33], and PointNet [21].
|
| 183 |
+
|
| 184 |
+
Model 1, PointNet++: As Table 3 shows, the PointNet++ [22] for shape classification includes seven types of layerwise operations. To revise the PointNet++ for shape classification<sup>1</sup> to a REQNN, we take the last three fully-connected (FC) layers as the task module and take other layers as the rotation-equivariant quaternion module. We add a Quaternion2Real module between these two modules. We use rules proposed in Section 3.3 to revise four types of layerwise operations to be rotation-equivariant, including the convolution, ReLU, batch-normalization, and max-pooling operations. We also use rules proposed in Section 3.4 to revise farthest point sampling and ball-query-search-based grouping operations in the original PointNet++ to be permutation-invariant.
|
| 185 |
+
|
| 186 |
+
Model 2, DGCNN: As Table 3 shows, the DGCNN [33] for shape classification contains seven types of layerwise operations. To revise the DGCNN for shape classification to a REQNN, we take the last three FC layers as the task module and take other layers as the rotation-equivariant quaternion module. The Quaternion2Real module $^{2}$ is added between these two modules. We revise four types of layerwise operations to be rotation-equivariant, including the convolution, ReLU, batch-normalization, and max-pooling operations. All layerwise operations in the original DGCNN are naturally permutation-invariant. Therefore, there is no revision for permutation invariance here.
|
| 187 |
+
|
| 188 |
+
Model 3, PointConv: As Table 3 shows, the PointConv [37] for shape classification includes eight types of layerwise operations. To revise the PointConv for shape classification to a REQNN, we take the last three FC layers as the task module and take other layers as the rotation-equivariant quaternion module. The Quaternion2Real module is added between these two modules. We revise the following four types of layerwise operations to be rotation-equivariant, i.e. the convolution, ReLU, batch-normalization, and 3D coordinates weighting operations. We also revise all farthest point sampling operations in the original PointConv to be permutation-invariant.
|
| 189 |
+
|
| 190 |
+
Model 4, PointNet: In order to construct a REQNN for shape reconstruction, we slightly revise the architecture of the PointNet [21] for shape classification. As Table 3 shows, the PointNet for shape classification contains five types of layerwise operations. We take all remaing layers in the PointNet as the rotation-equivariant quaternion module except for the max-pooling and Spatial Transformer Network (STN) [13]. The STN discards all spatial information (including the rotation information) of the input point cloud. Therefore, in order to encode rotation information, we remove the STN from the original PointNet.
|
| 191 |
+
|
| 192 |
+
Note that there is no the Quaternion2Real module or the task module in this REQNN, so that all features in the REQNN for reconstruction are quaternion features. We revise the following four types of layerwise operations to be rotation-equivariant, i.e. the convolution, the ReLU, the batch-normalization, and the dropout operations.
|
| 193 |
+
|
| 194 |
+
# 4 Experiments
|
| 195 |
+
|
| 196 |
+
Properties of the rotation equivariance and the permutation invariance of REQNNs could be proved theoretically, please see our supplementary materials for details. In order to demonstrate other advantages of REQNNs, we conducted the following experiments. We revised three widely used neural networks to REQNNs for the shape classification task, including PointNet++ [22], DGCNN [33], and PointConv [37]. We revised the PointNet [21] to a REQNN for the point
|
| 197 |
+
|
| 198 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">ModelNet40 dataset</td><td colspan="3">3D MNIST dataset</td></tr><tr><td>Baseline w/o rotations</td><td>Baseline w/ rotations</td><td>REQNN</td><td>Baseline w/o rotations</td><td>Baseline w/ rotations</td><td>REQNN</td></tr><tr><td>PointNet++1 [22]</td><td>23.573</td><td>26.43</td><td>63.95</td><td>44.15</td><td>51.16</td><td>68.99</td></tr><tr><td>DGCNN2 [33]</td><td>30.053</td><td>31.34</td><td>83.03</td><td>45.37</td><td>49.25</td><td>82.09</td></tr><tr><td>PointConv [37]</td><td>21.93</td><td>23.72</td><td>78.14</td><td>44.63</td><td>50.95</td><td>78.59</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Table 4. Accuracy of 3D shape classification on the ModelNet40 and the 3D MNIST datasets. "Baseline w/o rotations" indicates the original neural network learned without rotations. "Baseline w/ rotations" indicates the original neural network learned with the z-axis rotations (data augmentation with the z-axis rotations has been widely applied in [22, 33, 37]). "REQNN" indicates the REQNN learned without rotations. Note that the accuracy of shape classification reported in [22, 33, 37] was obtained under the test without rotations. The accuracy reported here was obtained under the test with rotations. Therefore, it is normal that the accuracy in this paper is lower than the accuracy in those papers.
|
| 201 |
+
|
| 202 |
+
cloud reconstruction task. In all experiments, we set $c = 1$ in Equation (4) and set $\epsilon = 10^{-5}$ in Equation (5).
|
| 203 |
+
|
| 204 |
+
3D shape classification: We used the ModelNet40 [38] dataset (in this study, we used corresponding point clouds provided by PointNet [21]) and the 3D MNIST [1] dataset for shape classification. The ModelNet40 dataset consisted of 40 categories; and the 3D MNIST dataset consisted of 10 categories. Each shape consisted of 1024 points. In this experiment, we conducted experiments on three types of baseline neural networks, including (1) the original neural network learned without rotations, (2) the original neural network learned with the z-axis rotations (the z-axis rotations were widely used in [22, 33, 37] for data augmentation), and (3) the REQNN learned without rotations (the REQNN naturally had the rotation-equivariance property, so it did not require any rotation augmentation). The testing set was generated by arbitrarily rotating each sample ten times. We will release this testing set when this paper is accepted.
|
| 205 |
+
|
| 206 |
+
As Table $4^{3}$ shows, the REQNN always outperformed all baseline neural networks learned with or without rotations. We achieved the highest accuracy of $83.03\%$ using the REQNN revised from DGCNN<sup>2</sup>. Baseline neural networks that were learned without rotations exhibited very low accuracy (21.93%-31.34% on the ModelNet40 dataset and 44.15%-51.16% on the 3D MNIST dataset). In comparison, baseline neural networks that were learned with z-axis rotations had little improvement in rotation robustness.
|
| 207 |
+
|
| 208 |
+
Besides, we compared the REQNN with several state-of-the-art methods for 3D point cloud processing in two scenarios, including neural networks learned
|
| 209 |
+
|
| 210 |
+
<table><tr><td>Method</td><td>NR/NR (do not consider rotation in testing)</td><td>NR/AR (consider rotation in testing)</td></tr><tr><td>PointNet [21]</td><td>88.45</td><td>12.47</td></tr><tr><td>PointNet++ [22]</td><td>89.82</td><td>21.353</td></tr><tr><td>Point2Sequence [18]</td><td>92.60</td><td>10.53</td></tr><tr><td>KD-Network [16]</td><td>86.20</td><td>8.49</td></tr><tr><td>RS-CNN [19]</td><td>92.38</td><td>22.49</td></tr><tr><td>DGCNN [33]</td><td>92.90</td><td>29.743</td></tr><tr><td>PRIN [40]</td><td>80.13</td><td>68.85</td></tr><tr><td>QE-Capsule network [45]</td><td>74.73</td><td>74.07</td></tr><tr><td>REQNN (revised from DGCNN2)</td><td>83.03 =</td><td>83.03</td></tr></table>
|
| 211 |
+
|
| 212 |
+
Table 5. Comparisons of 3D shape classification accuracy between different methods on the ModelNet40 dataset. NR/NR denotes that neural networks were learned and tested with No Rotations. NR/AR denotes that neural networks were learned with No Rotations and tested with Arbitrary Rotations. Experimental results show that the REQNN exhibited the highest rotation robustness. Note that the classification accuracy of the REQNN in scenarios of NR/NR and NR/AR was the same due to the rotation-equivariance property of the REQNN.
|
| 213 |
+
|
| 214 |
+
with No Rotations and tested with No Rotations, and neural networks learned with No Rotations and tested with Arbitrary Rotations, as Table 5 shows. Note that the classification accuracy of the REQNN in the scenario of NR/NR was the same as that of NR/AR, because the REQNN was rigorously rotation equivariant. The best REQNN in this paper (i.e. the REQNN revised from the DGCNN $^2$ ) achieved the highest accuracy of $83.03\%$ in the scenario of NR/AR, which indicated the significantly high rotation robustness of the REQNN. Traditional methods, including PointNet [21], PointNet++ [22], Point2Sequence [18], KD-Network [16], RS-CNN [19], and DGCNN [33], achieved high accuracy in the scenario of NR/NR. However, these methods performed poor in the scenario of NR/AR, because they could not deal with point clouds with unseen orientations. Compared with these methods, PRIN [40] and QE-Capsule network [45] made some progress in handling point clouds with unseen orientations. Our REQNN outperformed them by $14.18\%$ and $8.96\%$ , respectively, in the scenario of NR/AR.
|
| 215 |
+
|
| 216 |
+
3D point cloud reconstruction: In this experiment, we aimed to prove that we could rotate intermediate-layer quaternion features of the original point cloud to synthesize new point clouds with target orientations. Therefore, we learned a REQNN revised from the PointNet [21] for point cloud reconstruction on the ShapeNet [3] dataset. Each point cloud consisted of 1024 points in our implementation. We took the output quaternion feature of the top fourth linear transformation layer of the REQNN to synthesize quaternion features with different orientations. Such synthesized quaternion features were used to reconstruct point clouds with target orientations.
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Fig. 2. Manual manipulation of intermediate-layer features to control the object rotation in 3D point cloud reconstruction. The experiment was conducted to prove that point clouds reconstructed using the synthesized quaternion features had the same orientations as point clouds generated by directly rotating the original point cloud. Here we displayed results of four random orientations for each point cloud. Point clouds ("original" (b-e)) were generated by directly rotating the original point cloud ("original" (a)) around axis $[0.46, 0.68, 0.56]^{\top}$ with angle $\frac{\pi}{3}$ , around axis $[-0.44, -0.61, 0.66]^{\top}$ with angle $\frac{\pi}{4}$ , around axis $[0.34, 0.94, 0.00]^{\top}$ with angle $\frac{\pi}{6}$ , and around axis $[0.16, 0.83, 0.53]^{\top}$ with angle $\frac{2\pi}{3}$ , respectively. Given a specific intermediate-layer quaternion feature of the original point cloud ("original" (a)), we rotated the quaternion feature with the same angles to obtain quaternion features with different orientations, which were used to reconstruct point clouds ("reconstructed" (b-e)).
|
| 220 |
+
As Fig. 2 shows, for each given point cloud (Fig. 2 "original" (a)), we directly rotated it with different angles (Fig. 2 "original" (b-e)). For comparison, we rotated the corresponding quaternion feature of the original point cloud with the same angles to synthesize quaternion features. These generated quaternion features were used to reconstruct point clouds (Fig. 2 "reconstructed" (b-e)). We observed that these reconstructed point clouds had the same orientations with those of point clouds generated by directly rotating the original point cloud.
|
| 221 |
+
|
| 222 |
+
# 5 Conclusion
|
| 223 |
+
|
| 224 |
+
In this paper, we have proposed a set of generic rules to revise various neural networks for 3D point cloud processing to REQNNs. We have theoretically proven that the proposed rules can ensure each layerwise operation in the neural network is rotation equivariant and permutation invariant. Experiments on various tasks have shown the rotation robustness of REQNNs.
|
| 225 |
+
|
| 226 |
+
We admit that revising a neural network to a REQNN has some negative effects on its representation capacity. Besides, it is challenging to revise all layer-wise operations in all neural networks for 3D point cloud processing.
|
| 227 |
+
|
| 228 |
+
Acknowledgments The work is partially supported by the National Key Research and Development Project (No. 213), the National Nature Science Foundation of China (No. 61976160, U19B2043, and 61906120), the Special Project of the Ministry of Public Security (No. 20170004), and the Key Lab of Information Network Security, Ministry of Public Security (No.C18608).
|
| 229 |
+
|
| 230 |
+
# References
|
| 231 |
+
|
| 232 |
+
1. https://www.kaggle.com/daavoo/3d-mnist/version/13
|
| 233 |
+
2. Arjovsky, M., Shah, A., Bengio, Y.: Unitary evolution recurrent neural networks. In: International Conference on Machine Learning. pp. 1120-1128 (2016)
|
| 234 |
+
3. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015)
|
| 235 |
+
4. Chen, C., Li, G., Xu, R., Chen, T., Wang, M., Lin, L.: Clusternet: Deep hierarchical cluster network with rigorously rotation-invariant representation for point cloud analysis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 4994-5002 (2019)
|
| 236 |
+
5. Cohen, T.S., Geiger, M., Kohler, J., Welling, M.: Spherical CNNs. In: International Conference on Learning Representations (2018), https://openreview.net/forum?id=Hkbd5xZRb
|
| 237 |
+
6. Cohen, T.S., Welling, M.: Steerable cnns. arXiv preprint arXiv:1612.08498 (2016)
|
| 238 |
+
7. Danihelka, I., Wayne, G., Uria, B., Kalchbrenner, N., Graves, A.: Associative long short-term memory. arXiv preprint arXiv:1602.03032 (2016)
|
| 239 |
+
8. Deng, H., Birdal, T., Ilic, S.: Ppf-foldnet: Unsupervised learning of rotation invariant 3d local descriptors. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 602-618 (2018)
|
| 240 |
+
9. Gaudet, C.J., Maida, A.S.: Deep quaternion networks. In: 2018 International Joint Conference on Neural Networks (IJCNN). pp. 1-8. IEEE (2018)
|
| 241 |
+
10. Guberman, N.: On complex valued convolutional neural networks. arXiv preprint arXiv:1602.09046 (2016)
|
| 242 |
+
1. Hamilton, W.R.: Xi. on quaternions; or on a new system of imaginaries in algebra. The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 33(219), 58-60 (1848)
|
| 243 |
+
2. Hua, B.S., Tran, M.K., Yeung, S.K.: Pointwise convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 984-993 (2018)
|
| 244 |
+
3. Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. In: Advances in neural information processing systems. pp. 2017-2025 (2015)
|
| 245 |
+
4. Jiang, M., Wu, Y., Zhao, T., Zhao, Z., Lu, C.: Pointsift: A sift-like network module for 3d point cloud semantic segmentation. arXiv preprint arXiv:1807.00652 (2018)
|
| 246 |
+
5. Kendall, A., Grimes, M., Cipolla, R.: Posenet: A convolutional network for real-time 6-dof camera relocalization. In: Proceedings of the IEEE international conference on computer vision. pp. 2938-2946 (2015)
|
| 247 |
+
6. Klokov, R., Lempitsky, V.: Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 863-872 (2017)
|
| 248 |
+
7. Li, Y., Bu, R., Sun, M., Wu, W., Di, X., Chen, B.: Pointcnn: Convolution on x-transformed points. In: Advances in Neural Information Processing Systems. pp. 820-830 (2018)
|
| 249 |
+
8. Liu, X., Han, Z., Liu, Y.S., Zwicker, M.: Point2sequence: Learning the shape representation of 3d point clouds with an attention-based sequence to sequence network. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 33, pp. 8778-8785 (2019)
|
| 250 |
+
9. Liu, Y., Fan, B., Xiang, S., Pan, C.: Relation-shape convolutional neural network for point cloud analysis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 8895-8904 (2019)
|
| 251 |
+
|
| 252 |
+
20. Parcollet, T., Zhang, Y., Morchid, M., Trabelsi, C., Linarès, G., De Mori, R., Bengio, Y.: Quaternion convolutional neural networks for end-to-end automatic speech recognition. In: Interspeech 2018, 19th Annual Conference of the International Speech Communication Association. pp. 22-26 (2018)
|
| 253 |
+
21. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 652-660 (2017)
|
| 254 |
+
22. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: Advances in neural information processing systems. pp. 5099-5108 (2017)
|
| 255 |
+
23. Rao, Y., Lu, J., Zhou, J.: Spherical fractal convolutional neural networks for point cloud recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 452-460 (2019)
|
| 256 |
+
24. Shen, Y., Feng, C., Yang, Y., Tian, D.: Mining point cloud local structures by kernel correlation and graph pooling. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4548-4557 (2018)
|
| 257 |
+
25. Shi, S., Wang, X., Li, H.: Pointcnn: 3d object proposal generation and detection from point cloud. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 770-779 (2019)
|
| 258 |
+
26. Shuster, M.D., et al.: A survey of attitude representations. Navigation 8(9), 439-517 (1993)
|
| 259 |
+
27. Simonovsky, M., Komodakis, N.: Dynamic edge-conditioned filters in convolutional neural networks on graphs. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3693-3702 (2017)
|
| 260 |
+
28. Su, H., Jampani, V., Sun, D., Maji, S., Kalogerakis, E., Yang, M.H., Kautz, J.: Splatnet: Sparse lattice networks for point cloud processing. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2530-2539 (2018)
|
| 261 |
+
29. Thomas, N., Smidt, T., Kearnes, S., Yang, L., Li, L., Kohlhoff, K., Riley, P.: Tensor field networks: Rotation-and translation-equivariant neural networks for 3d point clouds. arXiv preprint arXiv:1802.08219 (2018)
|
| 262 |
+
30. Trabelsi, C., Bilaniuk, O., Zhang, Y., Serdyuk, D., Subramanian, S., Santos, J.F., Mehri, S., Rostamzadeh, N., Bengio, Y., Pal, C.J.: Deep complex networks. arXiv preprint arXiv:1705.09792 (2017)
|
| 263 |
+
31. Van Dyk, D.A., Meng, X.L.: The art of data augmentation. Journal of Computational and Graphical Statistics 10(1), 1-50 (2001)
|
| 264 |
+
32. Wang, W., Yu, R., Huang, Q., Neumann, U.: Sgpn: Similarity group proposal network for 3d point cloud instance segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2569-2578 (2018)
|
| 265 |
+
33. Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph cnn for learning on point clouds. arXiv preprint arXiv:1801.07829 (2018)
|
| 266 |
+
34. Weisstein, E.W.: Euler angles (2009)
|
| 267 |
+
35. Wisdom, S., Powers, T., Hershey, J., Le Roux, J., Atlas, L.: Full-capacity unitary recurrent neural networks. In: Advances in Neural Information Processing Systems. pp. 4880-4888 (2016)
|
| 268 |
+
36. Wolter, M., Yao, A.: Complex gated recurrent neural networks. In: Advances in Neural Information Processing Systems. pp. 10536-10546 (2018)
|
| 269 |
+
37. Wu, W., Qi, Z., Fuxin, L.: Pointconv: Deep convolutional networks on 3d point clouds. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 9621-9630 (2019)
|
| 270 |
+
|
| 271 |
+
38. Wu, Z., Song, S., Khosla, A., Yu, F., Zhang, L., Tang, X., Xiao, J.: 3d shapenets: A deep representation for volumetric shapes. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1912-1920 (2015)
|
| 272 |
+
39. Xiang, L., Ma, H., Zhang, H., Zhang, Y., Zhang, Q.: Complex-valued neural networks for privacy protection. arXiv preprint arXiv:1901.09546 (2019)
|
| 273 |
+
40. You, Y., Lou, Y., Liu, Q., Ma, L., Wang, W., Tai, Y., Lu, C.: Prin: Pointwise rotation-invariant network. arXiv preprint arXiv:1811.09361 (2018)
|
| 274 |
+
41. Yu, L., Li, X., Fu, C.W., Cohen-Or, D., Heng, P.A.: Pu-net: Point cloud upsampling network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2790–2799 (2018)
|
| 275 |
+
42. Zhang, Y., Lu, Z., Xue, J.H., Liao, Q.: A new rotation-invariant deep network for 3d object recognition. In: 2019 IEEE International Conference on Multimedia and Expo (ICME). pp. 1606-1611. IEEE (2019)
|
| 276 |
+
43. Zhang, Z., Hua, B.S., Rosen, D.W., Yeung, S.K.: Rotation invariant convolutions for 3d point clouds deep learning. In: 2019 International Conference on 3D Vision (3DV). pp. 204-213. IEEE (2019)
|
| 277 |
+
44. Zhao, H., Jiang, L., Fu, C.W., Jia, J.: Pointweb: Enhancing local neighborhood features for point cloud processing. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5565-5573 (2019)
|
| 278 |
+
45. Zhao, Y., Birdal, T., Lenssen, J.E., Menegatti, E., Guibas, L., Tombari, F.: Quaternion equivariant capsule networks for 3d point clouds. arXiv preprint arXiv:1912.12098 (2019)
|
| 279 |
+
46. Zhu, X., Xu, Y., Xu, H., Chen, C.: Quaternion convolutional neural networks. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 631-647 (2018)
|
3drotationequivariantquaternionneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5287cb60f0222183086c88c61e2e81a31e24615125668408ff9df3436a002600
|
| 3 |
+
size 261771
|
3drotationequivariantquaternionneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:008d9a8c0d1252c08d2d4aefa9e26b5193b85e1fdc22328ac2bd514fb178bf11
|
| 3 |
+
size 422659
|
3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ba9c44ad6e2fea5c17995c42c2a526e3c8db3a4ead51057308b2626ba059604
|
| 3 |
+
size 73973
|
3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b5c3fde93144fad5698951879590e11c3208168e0b0187235a0e55da366c211
|
| 3 |
+
size 89070
|
3dscenereconstructionfromasingleviewport/b63fd647-1c21-44b0-bebc-65b270d2ca68_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e7c0c6eac0631c9b248009483fdc83377ae515f51efb809671e6c48b94fdc4a
|
| 3 |
+
size 9947694
|
3dscenereconstructionfromasingleviewport/full.md
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Scene Reconstruction from a Single Viewport
|
| 2 |
+
|
| 3 |
+
Maximilian Denninger $^{1,2[0000-0002-1557-2234]}$ and Rudolph Triebel $^{1,2[0000-0002-7975-036X]}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ German Aerospace Center (DLR), 82234 Wessling, Germany {maximilian.denninger, rudolph.triebel}@dlr.de
|
| 6 |
+
$^{2}$ Technical University Munich (TUM), 80333 Munich, Germany
|
| 7 |
+
|
| 8 |
+
Abstract. We present a novel approach to infer volumetric reconstructions from a single viewpoint, based only on an RGB image and a reconstructed normal image. To overcome the problem of reconstructing regions in 3D that are occluded in the 2D image, we propose to learn this information from synthetically generated high-resolution data. To do this, we introduce a deep network architecture that is specifically designed for volumetric TSDF data by featuring a specific tree net architecture. Our framework can handle a 3D resolution of $512^{3}$ by introducing a dedicated compression technique based on a modified autoencoder. Furthermore, we introduce a novel loss shaping technique for 3D data that guides the learning process towards regions where free and occupied space are close to each other. As we show in experiments on synthetic and realistic benchmark data, this leads to very good reconstruction results, both visually and in terms of quantitative measures.
|
| 9 |
+
|
| 10 |
+
Keywords: Scene Reconstruction $\cdot$ 3D from Single Images $\cdot$ Space Compression
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
One of the most fundamental tasks for visual perception systems - both natural and artificial - is the acquisition of the 3D environment structure from a given visual input, e.g. an image. The main challenge of this task is that this visual input is usually the result of a projection mapping from the 3D environment onto a lower-dimensional manifold and that this mapping is not bijective, i.e. it can not be inverted. Thus, mapping back to the 3D environment, which is denoted as the 3D reconstruction task, is an inverse problem. Nevertheless, humans and other living beings are capable of generating reasonably accurate representations of the true 3D structure, even when provided with only a single visual stimulus, which means that they are able to recover the information that was lost during the projection process. The key resource to achieve this are experiences made earlier, and this is our primary motivation to resort to machine learning techniques to solve the 3D reconstruction task for artificial systems such as robots, only from single images. The potential applications of such a technique are manifold. While most current approaches generating 3D environment models rely on the fusion of many images, which are acquired at different viewpoints. The single
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
Fig. 1. 3D reconstruction from a single RGB image and a normal image (not shown). On the left the input color image is shown, in the middle the 3D ground truth scene and to the right our reconstruction is depicted. Note especially the reconstruction quality in areas where the 2D view caused occlusions (shown in pink).
|
| 18 |
+
|
| 19 |
+
image reconstruction has the benefit of producing a 3D representation fast and without having to move the camera. This can be very useful for mobile robots that need to explore unknown environments as it reduces the risk colliding with obstacles, and it can lead to denser and more accurate maps from less input data. Furthermore, it provides the ability to plan paths through the environment, e.g. to avoid occluded obstacles, even if only a single view is given.
|
| 20 |
+
|
| 21 |
+
The enormous attractiveness of these capabilities yet comes with a number of major challenges that need to be resolved. First and foremost, the curse of dimensionality is the major hurdle when dealing with 3D data, both in terms of memory requirements and regarding the algorithmic formulation. To address these issues, we propose both a novel network architecture that can reason efficiently on high resolution 3D data and a fast and efficient technique to generate and represent volumetric training data. We also introduce a specifically designed loss function for the training process. In summary, our main contributions are:
|
| 22 |
+
|
| 23 |
+
- A tree net architecture to reconstruct volumetric data.
|
| 24 |
+
- An autoencoder to efficiently compress TSDF volumes.
|
| 25 |
+
- A dedicated loss shaping technique for 3D reasoning.
|
| 26 |
+
- A framework to generate TSDF volumes from meshes.
|
| 27 |
+
|
| 28 |
+
In the following sections, we describe each of these contributions in more detail, after discussing previous works that are related to ours.
|
| 29 |
+
|
| 30 |
+
# 2 Related work
|
| 31 |
+
|
| 32 |
+
The four research topics that are most related to our work are shape completion, segmentation, depth reconstruction, and full scene reconstruction. In the following, we show the relations of these works to ours.
|
| 33 |
+
|
| 34 |
+
Shape completion There is some prior work that focuses on the reconstruction of single objects [18,25,26]. In particular, Wu et al [29] introduced 3D ShapeNets, which apply a deep belief network to a given shape database. This network can
|
| 35 |
+
|
| 36 |
+
complete and generate shapes and also repair broken meshes. Later, Wu et al [27] used an autoencoder to convert color into normals and depth, and ultimately to a 3D scene with a resolution of $128^3$ . They extended this using an adversarially trained deep naturalness regularizer, which provides a solution to the problem of blurry mean outputs. In our approach, we also avoid this by training an autoencoder, which we use to compress the TSDF volumes. Tatarchenko et al [24] use an octree generative network to reconstruct objects and scenes. However, this relies on the assumption that the coarse prediction steps can always find even small details, which is often not justified. Therefore, we use a block-wise compression to benefit both from a high resolution and an efficient representation. The 3D-EPN approach introduced by Dai et al [4] can predict object shapes based on sparse input data. Park et al [16] showed an interesting approach, where instead of reconstructing a volume, they reconstruct for given points a certain SDF value. This however, struggles to generalize for complete scenes because of the missing spatial link between the input image and the output. Matryoshka Networks fuse multiple nested depth maps to a single volume [17], but the same struggle of generalization to full scenes appears.
|
| 37 |
+
|
| 38 |
+
Segmentation The reconstruction of scenes is also sometimes covered in the field of semantic segmentation of 3D volumes. Using semantic information the reconstruction task can be improved, as the network knows for some objects what it is reconstructing [8,21]. Song et al [21] showed in their work how to use pure depth data to generate semantic segmented volumetric predictions. Nonetheless, their work requires the use of a depth camera and the knowledge of all appearing objects in the scene to correctly classify them, whereas in our approach, we are free of such limitations. Additionally, instead of using a resolution of $240 \times 144 \times 240$ , we work with $512^3$ . This is 16 times more data. Dai et al [3] showed how to complete a scene in several iterations on different resolutions by also predicting segmentation masks. However, their approach requires a rough 3D scene model, whereas we can start only with an RGB image. Also, our main focus is on scene reconstruction with as little extra knowledge as possible, thus semantic segmentation is not considered here.
|
| 39 |
+
|
| 40 |
+
Depth reconstruction In contrast to our approach, a large amount of prior research has been devoted to depth reconstruction on mono or stereo images. For example, multi-scale CNNs were used by Eigen et al [6] to generate robust depth estimations. A combination of CNN with CRF based regularization was shown by Liu et al [13,14], where they jointly learn CNN and CRF. Ma et al [15] showed how to generate depth images based solely on a few depth points and an input image. Kim et al showed that going from RGB images to TSDF works [12]. However, these are only 2.5D images of the scene and not a complete reconstruction of the occupation of the 3D space, which is the main objective of our approach.
|
| 41 |
+
|
| 42 |
+
Full scene reconstruction This area is mostly related to our work. For example, Firman et al [7] introduced Voxlets, which use random forests to predict unknown voxel neighborhoods. However, their approach only works on the local
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Fig. 2. Compressed form of our proposed architecture. On the left, an exemplary RGB and normal image are input to the network. From this, several convolution and pooling operations are done down to a size of $32^2$ . Then, we split the path of the network in two, where one represents the front and the other one the rear part of the depth channel. This split is done two more times, and the resulting depth slices are combined into a 3D structure with 64 channels. On that, we perform some 3D convolutions and use the autoencoder to decode the output of the tree net. Note that, our real model has one tree layer more, where the second layer is repeated once more.
|
| 46 |
+
|
| 47 |
+
neighborhood, which limits the prediction of bigger structures. There are methods, which reconstruct scenes by placing preexisting CAD models [11]. However, these are limited to the known CAD models, where we try to learn general shapes. In contrast to Silberman et al [20] who fill incomplete scenes using a novel CRF method, we build on the assumption that scenes are piece-wise planar and use deep learning to reconstruct a scene from one image. Also, many prior works focus on the reconstruction of small table scenes, where a majority of the objects are partly or entirely known. We believe the main reason for this is the lack of datasets to evaluate on. In order to not limit ourselves to such table scenes, we use the synthetic SUNCG dataset [21] and also the real-world Replica-dataset [22] to generate TSDF volumes on which we can measure our performance. Furthermore, we rely on the toolchain named BlenderProc [5] to generate realistic color and normal images.
|
| 48 |
+
|
| 49 |
+
# 3 Problem Description and General Approach
|
| 50 |
+
|
| 51 |
+
We formulate our problem as finding a mapping from 2D image coordinates $\mathbf{x}_{c,d} = (x_{c,d},y_{c,d})$ to 3D scene coordinates $\mathbf{x}_s = (x_s,y_s,z_s)$ . Our input is an RGB image $I_{c}\colon \varOmega_{c}\to [0,255]^{3}$ and a normal vector image $I_{n}\colon \varOmega_{d}\to [-1,1]^{3}$ , where $\varOmega_c\subset \mathbb{R}^2$ and $\varOmega_d\subset \mathbb{R}^2$ . The output is a high-resolution 3D truncated signed distance field (TSDF) $V\colon \varOmega_v\to [-\sigma_{tsdf},\dots,\sigma_{tsdf}]$ where $\varOmega_v=\{0,\dots,511\}^3$ . This voxel grid represents free space with positive values and occupied areas with negative values. Absolute values are the distances to the closest surface.
|
| 52 |
+
|
| 53 |
+
To perform the 3D reconstruction, we propose to train a specifically designed deep network architecture on synthetic data, which can then be used to infer 3D reconstructions from new test images. An overview of our architecture is shown in Fig. 2, where the details will be presented in the following sections. Note that the input of this network consists of an RGB image and a normal vector image. Our motivation to use surface normals as an additional input is to provide continuity information so that planar surfaces can be reconstructed more precisely. Here, we take inspiration from Zhang et al [31] who also used normals for depth generation. In this paper, we focus on the 3D reconstruction part, and we use normals from a simulation pipeline named BlenderProc [5], which can generate RGB and normal images on the SUNCG dataset, as well as normal images on the Replica-dataset [21, 22]. Throughout this paper, such renderings were used to obtain training data, while during testing we use a U-net architecture [19] trained on soley SUNCG to generate normals (see Sec. 7).
|
| 54 |
+
|
| 55 |
+
For the design of our training procedure, we had to face three major challenges. First, we had to find a way to efficiently produce and represent the output training samples, which consist of voxel grids with $512^{3} = 134,217,728$ voxels. Second, we had to design a network architecture that can represent 3D spatial information in hierarchical form. And third, we needed to find an appropriate loss function for the training process. All three parts will be described next.
|
| 56 |
+
|
| 57 |
+
# 4 Generating Synthetic 3D Training Data
|
| 58 |
+
|
| 59 |
+
Our output data consists of high-resolution 3D TSDF voxel grids. TSDF volumes offer in comparison to meshes or point clouds a dense representation, providing a deterministic reconstruction target, which we can align with the input domain. TSDF grids are widely used in computer vision, and there are several approaches to compute these volumes fast. However, most of them use approximations, because an accurate result is usually not needed and their test scenes have a smaller resolution than $512^{3}$ [28]. We propose three steps to achieve an accurate result on such a resolution. First, we simplify the reconstruction task by aligning the output voxel grids with the camera frame and not the world frame, which is explained next. Then, we employ a fast algorithm to compute a TSDF voxel grid from a given 3D scene (see Sec. 4.2). In the end, we use a compression algorithm to store the voxel data with comparably low memory requirements (see Sec. 4.3).
|
| 60 |
+
|
| 61 |
+
# 4.1 Viewport Alignment
|
| 62 |
+
|
| 63 |
+
An important distinction between our work and most others in learning-based 3D reconstruction [4,27] is that for our training procedure, we use input RGB images and 3D voxel grids that are aligned within the same coordinate frame, namely the camera frame. For that, we transform vertices used for training from world coordinates $\mathbf{x}_w$ into the camera frame using the camera matrix $C$ , i.e. $\mathbf{x}_s = C\mathbf{x}_w$ . Then, a perspective projection $P$ is applied to $\mathbf{x}_s$ such that the camera frustum is mapped to a cubical 3D volume. The resulting projected points $\mathbf{x}_p = P\mathbf{x}_s$ are
|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
Fig.3. Triangle $t$ with all normals in orange that are used to efficiently compute $d(\mathbf{v},t)$ . The corresponding orthogonal planes are shown in blue and dashed.
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
Fig.4. Approximated structure of the autoencoder to compress TSDF volumes, which is applied on each $16^{3}$ block plus padding $(= 30^{3})$ on the $512^{3}$ input space. The result of this is the encoded latent values in the middle, which we use as the reconstruction target in the tree network.
|
| 70 |
+
|
| 71 |
+
then in the range $[-1, 1]^3$ . Now we voxelize this 3D volume with a resolution of 512. Then, the center point $\mathbf{x}_e$ of each voxel can be computed from its index $\mathbf{v}$ as $\mathbf{x}_e = (\mathbf{v} / 512) \cdot 2 - 1$ . The center $\mathbf{x}_e = (x_e, y_e, z_e)$ can now be directly mapped to the 2D image, which also has a resolution of 512, i.e. $\mathbf{x}_c = (x_e, y_e) \cdot 256 + 256$ . Similarly, the inverse mapping from pixels $\mathbf{x}_c$ to points $\mathbf{x}$ in the 3D grid is done by setting $x = x_c$ , $y = y_c$ and $z = (\pi (\mathbf{x}_c) - d_{min}) / (d_{max} - d_{min}) \cdot 2 - 1$ , where $\pi$ is the projected depth of the pixel position $\mathbf{x}_c$ , and $d_{min}$ and $d_{max}$ are predefined values for the minimal and maximal depth range within which the voxel grid is defined. In our implementation, we use $d_{min} = 1m$ and $d_{max} = 4m$ . In contrast to this inverse mapping, we predict the TSDF values along the camera ray.
|
| 72 |
+
|
| 73 |
+
This means that in our 3D reconstruction of the occupancy, we can directly link the input pixel values with the occupancies along the camera rays. This way, we can learn the transformation from a 2D image to a 3D space. For visualization, we project them back from the cube to the camera frustum.
|
| 74 |
+
|
| 75 |
+
# 4.2 Fast Generation of TSDF Voxel Data
|
| 76 |
+
|
| 77 |
+
To produce synthetic 3D training samples, we start with a set $\mathcal{T}$ of 3D triangles, which we map into the camera frame using a predefined transform $\tau = P\cdot C$ . Then, for the center point $\mathbf{x}$ of each voxel $\mathbf{v} = (v_{x},v_{y},v_{z})$ we need to compute the distance $d_{x}$ to the closest point on a triangle $t\in \mathcal{T}$ and truncate the absolute distance at a maximum value $\sigma_{tsdf}$ , i.e.
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
V [ \mathbf {v} ] = d _ {x} = \max \left(- \sigma_ {t s d f}, \min \left(\sigma_ {t s d f}, \min _ {\forall t \in \mathcal {T}} \{d (\mathbf {x}, t) \}\right)\right), \forall \mathbf {v} \in \Omega_ {v}. \tag {1}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
To achieve that, we developed a very fast technique that transforms the triangles and computes $d(\mathbf{x}, t)$ for each voxel $\mathbf{v}$ . It uses a combination of flood filling, octrees, and a fast distance computation. With this, we can process the 134 million voxels in the order of seconds. A more detailed description
|
| 84 |
+
|
| 85 |
+
Algorithm 1 In this distance calculation algorithm we use three different variable colors, which correspond to the main, the edge and the border planes.
|
| 86 |
+
```txt
|
| 87 |
+
1: procedure CALCULATEDISTANCE(Point p)
|
| 88 |
+
2: plnDist $\leftarrow$ mainPln.distTo(p)
|
| 89 |
+
3: for nr $\in$ [1,2,3] do
|
| 90 |
+
4: if edgePln[nr].distTo(p) < 0 then ▷ Outside, check border planes
|
| 91 |
+
5: if borderPln[nr][1].distTo(p) < 0 then ▷ Dist to left point
|
| 92 |
+
6: return sgn(plnDist) $\cdot$ \|p - borderPln[nr][1].p\|2
|
| 93 |
+
7: else if borderPln[nr][2].distTo(p) < 0 then ▷ Dist to right point
|
| 94 |
+
8: return sgn(plnDist) $\cdot$ \|p - borderPln[nr][2].p\|2
|
| 95 |
+
9: else ▷ Dist to edge
|
| 96 |
+
10: return sgn(plnDist) $\cdot$ edgeLine[nr].distTo(p)
|
| 97 |
+
return plnDist
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
of all individual steps is given in the supplementary material, and we also refer to our implementation, which is online: https://github.com/DLR-RM/SingleViewReconstruction. The key component here is the fast computation of $d(\mathbf{x},t)$ using modern hardware. For this, we first precompute 10 vectors for each triangle $t$ , namely the normal vector $\mathbf{n}$ of the triangle plane $\mathfrak{P}$ , the vectors $\mathbf{n}^{\perp}$ that are orthogonal to the edges of $t$ and lie inside $\mathfrak{P}$ , as well as the vectors $\mathbf{n}^{+}$ and $\mathbf{n}^{-}$ that are parallel to the edges of $t$ (see Fig. 3). Next, we compute the distance $d(\mathbf{x},\mathfrak{P})$ between $\mathfrak{P}$ and $\mathbf{x}$ and check whether its projection onto $\mathfrak{P}$ is inside $t$ , using the normals $\mathbf{n}^{\perp}$ . If so, $d(\mathbf{x},t)$ is equal to $d(\mathbf{x},\mathfrak{P})$ , otherwise $\mathbf{x}$ is closer to an edge or vertex than to the surface. For the final check, we use the normals $\mathbf{n}^{+}$ and $\mathbf{n}^{-}$ of the planes. If the distances of the planes are positive, then the distance can be calculated towards the edge, and if one of them is negative, the closest distance is to one of the points, see Algorithm 1. This distance calculation has to be done for all voxels and all polygons. Finally, we quantize the TSDF volume to 16 bit and compress them with gzip, which reduces the size by a factor of ten.
|
| 101 |
+
|
| 102 |
+
# 4.3 Spatial Compression
|
| 103 |
+
|
| 104 |
+
A straightforward implementation of our high-resolution TSDF volume $V$ with $512^{3}$ voxels would require 536.87 MB per scene, which renders the training process on current hardware infeasible. Therefore, we employ a block-wise compression of $V$ to a size of $64 \times 32^{3}$ . This results in 8.38 MB per scene with a compression factor of 64. The compression is done with an autoencoder as shown in Fig. 4. First, we use 3D convolutions in combination with valid padding on a larger input than the output, thereby shrinking the input size from $30^{3}$ to 64 and then up again to $16^{3}$ . Second, we balance the input to the autoencoder so that the much more likely empty voxels are mostly removed to focus on the ones with surfaces. Third, we add loss shaping to focus on the reconstruction of the surfaces, with:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
l o s s (x, y) = \| x - y \| _ {1} \cdot \left(1 + \mathcal {N} \left(0, \frac {\sigma_ {t s d f}}{4}\right) (y) \cdot \frac {4}{\sigma_ {t s d f}}\right) \tag {2}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
a) Full TSDF
|
| 112 |
+
|
| 113 |
+

|
| 114 |
+
b) Full flipped TSDF
|
| 115 |
+
Fig. 5. The two on the left represent a full TSDF, the two on the right are projected TSDF volumes, which uses a camera projecting beams into the scene. Both also have a flipped version, where the empty and occupied space is zero.
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
c) Proj. TSDF
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
d) Proj. flipped TSDF
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
|
| 125 |
+
Here $x$ is the prediction, and $y$ is the label. The scaling value of the Gaussian was determined experimentally. We use a complete TSDF, not a projected or a flipped TSDF, see Fig. 5. We found that a projected TSDF volume can generate hard cuts in the resulting output volumes, which means that moving the input by one voxel generates a big loss at these boundaries. With full TSDF volumes this does not happen, so that the network can learn the three dimensional representation of an object in space. Auto encoders trained on the flipped TSDF performed considerably worse after training, we didn't investigate this further.
|
| 126 |
+
|
| 127 |
+
# 5 Proposed Network Architecture
|
| 128 |
+
|
| 129 |
+
The major challenge of our framework is to represent the 3D occupation information of the voxel grid in a deep neural network. In order to solve this, we propose a special architecture that is based on a tree structure, which helps us to transform a 2D image into a volumetric 3D space, which is described next. To perform the 3D reconstruction task from a single image, we designed a network architecture that can split the input data along the depth dimension. One way of doing this is to use the feature channels as the depth dimension at some point within the network. This, however, requires the network to transform the 2D input to 3D in one step, which failed in our experiments for complex scenes.
|
| 130 |
+
|
| 131 |
+
# 5.1 Tree Network
|
| 132 |
+
|
| 133 |
+
To address this problem, we propose a tree architecture, where each level in the binary tree splits the depth dimension into a front and back part. That means the first tree node splits the scene into foreground and background, where those are defined by the distance to the camera. In Sec. 4.1, we showed that our input images are aligned with the output frame, which makes this splitting possible. We repeat the splitting process three more times so that the leaves of the tree contain small slices of the depth dimension while still representing the full spatial resolution. These slices are then combined into a 3D volume and processed by further 3D convolutions to remove small artifacts.
|
| 134 |
+
|
| 135 |
+
In Fig. 6 such a tree is depicted. The first node is fed with the output of some convolutional and pooling layers to scale the input from $512^{2}$ down to
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
Fig. 6. The basic tree architecture which generates a 3D volume based on a 2D input. Each layer is thought to perform a split through the depth dimension, and in the end all single paths of the tree are concatenated to create the third dimension.
|
| 139 |
+
|
| 140 |
+
$32^{2}$ . Then, in this image, it is split three times, and the resulting colored leaves are combined into a 3D tensor. The resulting feature channels in the leaf nodes can then only have four channels to obtain the desired depth of 32. Here each path builds different CNN parts to learn the size of different objects at different scales. Instead of a single track sequential model, where at some point the feature channels could be mapped to the depth dimension, our network has several layers to capture the relationship between the input and the depth dimension.
|
| 141 |
+
|
| 142 |
+
# 5.2 Multipath
|
| 143 |
+
|
| 144 |
+
The proposed tree network has a bottleneck when forming the 3D volume from the 2D features channels. As there are only 32 leaf nodes with just two feature channels each, the combination leads only to one 3D volume, whereas the compressed output has 64. We address this by increasing the output of the leaf nodes to 128 and then create 64 3D volumes out of it. This is achieved by splitting up each leaf node's feature results and using two feature channels per created 3D volume. In Fig. 7, we show this for two 3D volumes with only eight leaf nodes, which means that each leaf node has a eight feature channels. Thereby, the first half of each node is used in the left 3D volume and the second in the right.
|
| 145 |
+
|
| 146 |
+
# 5.3 General architecture
|
| 147 |
+
|
| 148 |
+
Inspired by He et al [10] we use ResNet blocks, where additionally each block uses dilated convolution in an inception fashion [23, 30]. This means that the input per ResNet step is given to three different convolutional blocks, where the dilation rate differs. This dilation inception step was done twice in each of the ResNet blocks. In our experiments a dilation rate of 1, 2, 4 with a split of $50\%$ , $25\%$ , $25\%$ over the desired filter channels performed best. These three are
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Fig. 7. The upper row represents the leaves of our tree architecture. In this example each node has eight feature channels, which are evenly split over the resulting volumes.
|
| 152 |
+
|
| 153 |
+
then concatenated again and used as an input to the next layer. Our tree uses two ResNet blocks in the first two layers and three in the last two layers.
|
| 154 |
+
|
| 155 |
+
After performing the multipath joining explained in Sec. 5.2, we perform several 3D convolutions on the joined result. This smoothes out errors that were introduced by paths in the tree, which performed worse than the others. We use 9 layers of a sequence of normal convolutions and separable convolutions to save memory [2]. We alternate between one normal 3D convolution followed by two 2D convolutions performed in all three axes. All of them use 64 filters, where we split in each over four paths. These also use again dilations with rates of 1, 2, 4, and 8, where the splitting for the filters is 32, 16, 8, and 8.
|
| 156 |
+
|
| 157 |
+
# 6 Loss Shaping
|
| 158 |
+
|
| 159 |
+
An essential part of our pipeline is our loss shaping, which we use to focus the attention of the network to parts of the TSDF volume that are more relevant for a correct reconstruction. We distinguish two kinds of loss shaping, one is related to the voxel space and one to the tree net structure. Both are described next.
|
| 160 |
+
|
| 161 |
+
# 6.1 Output loss shaping
|
| 162 |
+
|
| 163 |
+
When we know where the surface in the TSDF volume is, we can increase the loss around and on the surface by a factor $\sigma_{Surface}$ to make sure that these encoded latent values are correctly regressed. The same is done for the free space before an object occurs. This value $\sigma_{Free}$ is selected to be smaller than $\sigma_{Surface}$ . Additionally, the free space behind objects receives an increased loss factor to make sure that those areas, which are reachable but not visible from the camera point of view, are reconstructed well. The distance to the closest visible and free voxel determines the strength of the factor. It decreases from $\sigma_{Free}$ to a fixed value of $\sigma_{NonVisibleFree}$ . This decline is done at most for $7\%$ of the space size, which we found gives sufficiently good results.
|
| 164 |
+
|
| 165 |
+
In Fig. 8, the loss factors for a 2D scene with two objects (in blue) are shown. The camera is on the left side of the frame and is oriented towards the right. All voxels with circles in them are free. The stars are used for the area around and below the surface of an object, and the rectangles depict the areas, which are not
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
Fig. 8. In this top-down 2D map of a scene two objects are depicted one on the left as a blue pyramid and one as a wall on the right, which give the used loss factors as seen from the left. In the legend we show the weight values that are used in our approach.
|
| 169 |
+
|
| 170 |
+
reachable. It is important to note here that the factor for the first hit onto an object is 100 to make sure that this surface is regressed correctly. The surfaces behind this only receive an increased factor if they can be reached from the free space. To determine these back surfaces we used a flood filling algorithm. Using this loss shaping, our network is able to focus on the more relevant parts of the reconstruction, and neglects the parts, which are deemed less important. This improves the reconstruction performance, see Sec. 7.
|
| 171 |
+
|
| 172 |
+
# 6.2 Tree loss shaping
|
| 173 |
+
|
| 174 |
+
To speed up the training, we enforce the splitting of the depth dimension already in the tree by comparing the output of each node with the average of the corresponding depth range. This means for the first split we take the left node result and branch into a $1 \times 1$ convolution to change the number of feature channels so that they match the target output, see Fig. 9. Then we take the target output and use only the first half of it, average it in the depth dimension and compare this slice with the branched output. This process is repeated in every node, where every time the corresponding depth slice is averaged and compared with the branched version. All these losses are combined and weighted, where the second layer in the tree receives a lower loss than the leaf nodes. We used the values [0.2, 0.3, 0.5, 0.8] from top to bottom for our tree, which has a height of 5. Finally, we scale this weighted tree value with a factor of 0.4 and add it to the final loss. Additionally, before reducing the difference between all these losses, we multiply our averaged loss map introduced in Sec. 6.1. This again helps to focus on the relevant surfaces of the TSDF volume.
|
| 175 |
+
|
| 176 |
+
# 7 Experiments
|
| 177 |
+
|
| 178 |
+
# 7.1 Test setup
|
| 179 |
+
|
| 180 |
+
The evaluation of our approach is first done on the synthetic dataset SUNCG [21], from which we already used one split for training. The second evaluation is done on the real-world Replica-dataset [22], which is the only dense, hole-free dataset available. It stands in contrast to datasets like Matterport 3D [1], where holes introduced through the scanning process have not been filled manually.
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Fig.9. For each node the corresponding depth layers from the output are averaged in the depth and then compared to a reshaped tensor from the node. This already enforces in the tree a sense of the encoded 3D structure.
|
| 184 |
+
|
| 185 |
+
As described in Sec. 4.2 for the training we first generate the TSDF volumes for the sampled camera positions from SUNCG. Then we create the corresponding loss volumes and finally the RGB images using BlenderProc [5]. We tested both with the normal generation and with the synthetic normal images to see how our network can deal with the limitations of the normal generation. All tests were performed with models that were exclusively trained on the generated data from BlenderProc on the SUNCG dataset. For the training, we used around 130,000 image pairs. We did not finetune on the Replica-dataset, nor did we finetune with the generated normals to show the lower bound of this approach. The reconstruction network was evaluated on 500 image scene pairs from the SUNCG dataset. For the Replica-dataset, we sampled as in SUNCG ten cameras per scene, which resulted in 180 image pairs. The creation of the encoded scene from a color and normal image takes around 0.11 seconds. However, the reconstruction to a full scene takes around 5.1 seconds, with the decoder.
|
| 186 |
+
|
| 187 |
+
# 7.2 Qualitative results
|
| 188 |
+
|
| 189 |
+
In Fig. 10, we show some qualitative results on the real-world Replica-dataset. As in previous images, the areas in pink are invisible to the camera and did not get assigned a color. For failed reconstructions, these areas are too far away from the true surface to get the correct color. The scene in the lower left corner, for example, was reconstructed well, without ever seeing this room before nor being able to recognize that this texture belongs to a bed. It also indicates that it learned some kind of semantic understanding of this object type, without us providing the additional label "bed". In the right lower corner in Fig. 10 is in contrast to that a failed reconstruction, as the network could not reconstruct the surface of the thin chair and nearly hidden table.
|
| 190 |
+
|
| 191 |
+
# 7.3 Quantitative results
|
| 192 |
+
|
| 193 |
+
We evaluate the precision, recall, and IOU over the occupied voxel on both datasets. This shows directly how much of the space was correctly classified as occupied, for that the predicted TSDF volumes are converted into binary occupation grids. This process means that some of the resolution is lost. Because of that we also evaluate the mean and RMS Hausdorff distance (HD) [9], between
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
2D input image
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
3D output
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
2D input image
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
3D output
|
| 222 |
+
Fig. 10. Results on the Replica-dataset for six scenes. Only the generated normal and color images were provided to create the full scene reconstruction. In the top left corner, our network could separate the commode from the wall and detect the end of it, too. Areas in pink are so far away from the true reconstruction to assign a color. The red ellipse highlights, the failed reconstruction of the thin chair and table.
|
| 223 |
+
|
| 224 |
+
the true and the predicted mesh. This mean is calculate by averaging over the distances of each true mesh vertex to the closest point in the test mesh.
|
| 225 |
+
|
| 226 |
+
Table 1 shows the results for four different cases, where two are with SUNCG and two with the Replica-dataset. Both are tested with the normals from BlenderProc (woNG) and with the generated once from the U-Net (wNG). We tested with four different configurations, first we alter the amount of layers in the tree from four to six, where five is our default. By only copying or removing the second layer in Fig. 2 and also report results when no loss shaping was used.
|
| 227 |
+
|
| 228 |
+
Even though our network has never seen real scenes, the performance on the real-world Replica-dataset is better than on the SUNCG dataset. As our network performs particularly well in predicting large structures, which are more commonly found in Replica, so the performance on Replica is higher. As a lot of the SUNCG scenes are cluttered with thin small objects. This also relates to the fact that the scenes in the Replica-dataset are more structured than in SUNCG. We observed that it might happen that unusual combinations of objects are randomly placed in a SUNCG scene. It is also interesting to see that not using the loss shaping, introduced in section 6, increases the IOU performance, however, decreases strongly the HD performance, so that rough shapes can still be reconstructed, but the finer details are mostly lost.
|
| 229 |
+
|
| 230 |
+
In order to demonstrate the relative performance of our approach, we included the results from Firman et al and Song et al [7,21]. Note here, that they use output spaces with less resolution and other datasets. They did not report
|
| 231 |
+
|
| 232 |
+
Table 1. The comparison on the synthetic SUNCG dataset and the real-world Replica-dataset. It was tested with the ground truth (woNG) and the generated normals (wNG).
|
| 233 |
+
|
| 234 |
+
<table><tr><td>Dataset</td><td>Method</td><td>Precision</td><td>Recall</td><td>∅IOU</td><td>∅HD</td><td>RMS HD</td></tr><tr><td>SSCNet joint [21]</td><td></td><td>75.0</td><td>96.0</td><td>73.0</td><td>-</td><td>-</td></tr><tr><td>SUNCG+NYU</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Voxlets [7]</td><td></td><td>58.5</td><td>79.3</td><td>65.8</td><td>-</td><td>-</td></tr><tr><td>SUNCG woNG</td><td>default</td><td>85.05</td><td>72.96</td><td>65.10</td><td>0.0416</td><td>0.0670</td></tr><tr><td>SUNCG woNG</td><td>height 4</td><td>84.47</td><td>76.59</td><td>68.05</td><td>0.0395</td><td>0.0644</td></tr><tr><td>SUNCG woNG</td><td>height 6</td><td>81.08</td><td>78.06</td><td>66.58</td><td>0.0390</td><td>0.0620</td></tr><tr><td>SUNCG woNG</td><td>no loss sh.</td><td>83.51</td><td>81.21</td><td>70.49</td><td>0.0745</td><td>0.1117</td></tr><tr><td>SUNCG wNG</td><td>default</td><td>83.65</td><td>69.65</td><td>61.56</td><td>0.0509</td><td>0.0794</td></tr><tr><td>SUNCG wNG</td><td>height 4</td><td>83.06</td><td>73.41</td><td>64.41</td><td>0.0488</td><td>0.0769</td></tr><tr><td>SUNCG wNG</td><td>height 6</td><td>80.93</td><td>74.19</td><td>63.48</td><td>0.0487</td><td>0.0750</td></tr><tr><td>SUNCG wNG</td><td>no loss sh.</td><td>82.70</td><td>77.80</td><td>67.61</td><td>0.0835</td><td>0.1232</td></tr><tr><td>Replica woNG</td><td>default</td><td>86.19</td><td>84.34</td><td>73.97</td><td>0.0387</td><td>0.0521</td></tr><tr><td>Replica woNG</td><td>height 4</td><td>85.31</td><td>91.40</td><td>78.76</td><td>0.0393</td><td>0.0518</td></tr><tr><td>Replica woNG</td><td>height 6</td><td>81.67</td><td>93.39</td><td>76.81</td><td>0.0457</td><td>0.0569</td></tr><tr><td>Replica woNG</td><td>no loss sh.</td><td>83.33</td><td>94.03</td><td>78.36</td><td>0.0614</td><td>0.0766</td></tr><tr><td>Replica wNG</td><td>default</td><td>87.75</td><td>72.94</td><td>65.86</td><td>0.0562</td><td>0.0745</td></tr><tr><td>Replica wNG</td><td>height 4</td><td>84.59</td><td>80.00</td><td>69.40</td><td>0.0518</td><td>0.0691</td></tr><tr><td>Replica wNG</td><td>height 6</td><td>83.46</td><td>78.33</td><td>67.27</td><td>0.0563</td><td>0.0735</td></tr><tr><td>Replica wNG</td><td>no loss sh.</td><td>83.37</td><td>88.38</td><td>73.49</td><td>0.0766</td><td>0.0959</td></tr></table>
|
| 235 |
+
|
| 236 |
+
the HD for their reconstructions. Nonetheless, the given precision, recall, and IOU values indicate that our approach performs equally well, even though we do not have any depth data, nor do we do any semantic segmentation. Using our novel tree net architecture we can reconstruct scenes well without the additional information of depth or semantic segmentation.
|
| 237 |
+
|
| 238 |
+
# 8 Conclusion
|
| 239 |
+
|
| 240 |
+
We have demonstrated that the difficult task of reconstructing a full indoor scene based on just one single color image is possible. To achieve that, we introduced a tree net architecture that enables the splitting in different depth layers. We combined this with an autoencoder approach to increase the resolution of the used TSDF volumes. Furthermore, we showed the importance of loss shaping during training to focus the attention of the network on the relevant parts. For some applications, the quality of our results is likely to be sufficient, especially in the domain of map generation for mobile robot navigation.
|
| 241 |
+
|
| 242 |
+
We furthermore conclude that our 3D reconstruction approach is realized with a network that is solely trained on synthetic data, and it can still adapt to a real scenario. Finally, we showed that the complete scene reconstruction is possible without depth data or any auxiliary task like semantic segmentation.
|
| 243 |
+
|
| 244 |
+
# References
|
| 245 |
+
|
| 246 |
+
1. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. International Conference on 3D Vision (3DV) (2017)
|
| 247 |
+
2. Chollet, F.: Xception: Deep learning with depthwise separable convolutions. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1251-1258 (2017)
|
| 248 |
+
3. Dai, A., Ritchie, D., Bokeloh, M., Reed, S., Sturm, J., Nießner, M.: Scancomplete: Large-scale scene completion and semantic segmentation for 3d scans. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 4578-4587 (2018)
|
| 249 |
+
4. Dai, A., Ruizhongtai Qi, C., Nießner, M.: Shape completion using 3d-encoder-predictor cnns and shape synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5868-5877 (2017)
|
| 250 |
+
5. Denninger, M., Sundermeyer, M., Winkelbauer, D., Zidan, Y., Olefir, D., Elbadrawy, M., Lodhi, A., Katam, H.: Blenderproc. arXiv:1911.01911 (2019)
|
| 251 |
+
6. Eigen, D., Puhrsch, C., Fergus, R.: Depth map prediction from a single image using a multi-scale deep network. In: Advances in neural information processing systems. pp. 2366-2374 (2014)
|
| 252 |
+
7. Firman, M., Mac Aodha, O., Julier, S., Brostow, G.J.: Structured prediction of unobserved voxels from a single depth image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5431-5440 (2016)
|
| 253 |
+
8. Hane, C., Zach, C., Cohen, A., Angst, R., Pollefeys, M.: Joint 3d scene reconstruction and class segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 97-104 (2013)
|
| 254 |
+
9. Hausdorff, F.: Grundzüge der Mengenlehre, leipzig. de Gruyter & Co 1927, 1935 (1914)
|
| 255 |
+
10. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)
|
| 256 |
+
11. Izadinia, H., Shan, Q., Seitz, S.M.: Im2cad. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5134-5143 (2017)
|
| 257 |
+
12. Kim, H., Moon, J., Lee, B.: RGB-to-tsdf: Direct tsdf prediction from a single rgb image for dense 3d reconstruction. In: 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 6714-6720. IEEE (2019)
|
| 258 |
+
13. Liu, F., Shen, C., Lin, G.: Deep convolutional neural fields for depth estimation from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5162-5170 (2015)
|
| 259 |
+
14. Liu, F., Shen, C., Lin, G., Reid, I.: Learning depth from single monocular images using deep convolutional neural fields. IEEE transactions on pattern analysis and machine intelligence 38(10), 2024-2039 (2015)
|
| 260 |
+
15. Mal, F., Karaman, S.: Sparse-to-dense: Depth prediction from sparse depth samples and a single image. In: 2018 IEEE International Conference on Robotics and Automation (ICRA). pp. 1-8. IEEE (2018)
|
| 261 |
+
16. Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: Deepsdf: Learning continuous signed distance functions for shape representation. arXiv:1901.05103 (2019)
|
| 262 |
+
17. Richter, S.R., Roth, S.: Matryoshka networks: Predicting 3d geometry via nested shape layers. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1936-1944 (2018)
|
| 263 |
+
|
| 264 |
+
18. Rock, J., Gupta, T., Thorsen, J., Gwak, J., Shin, D., Hoiem, D.: Completing 3d object shape from one depth image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2484-2493 (2015)
|
| 265 |
+
19. Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention (MICCAI). LNCS, vol. 9351, pp. 234-241. Springer (2015), http://lmb.informatik.uni-freiburg.de/Publications/2015/RFB15a, (available on arXiv:1505.04597 [cs.CV])
|
| 266 |
+
20. Silberman, N., Shapira, L., Gal, R., Kohli, P.: A contour completion model for augmenting surface reconstructions. In: European Conference on Computer Vision. pp. 488-503. Springer (2014)
|
| 267 |
+
21. Song, S., Yu, F., Zeng, A., Chang, A.X., Savva, M., Funkhouser, T.: Semantic scene completion from a single depth image. arXiv:1611.08974 (2016)
|
| 268 |
+
22. Straub, J., Whelan, T., Ma, L., Chen, Y., Wijmans, E., Green, S., Engel, J.J., Mur-Artal, R., Ren, C., Verma, S., et al.: The replica dataset: A digital replica of indoor spaces. arXiv:1906.05797 (2019)
|
| 269 |
+
23. Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A.: Going deeper with convolutions. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1-9 (2015)
|
| 270 |
+
24. Tatarchenko, M., Dosovitskiy, A., Brox, T.: Octree generating networks: Efficient convolutional architectures for high-resolution 3d outputs. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 2088-2096 (2017)
|
| 271 |
+
25. Thanh Nguyen, D., Hua, B.S., Tran, K., Pham, Q.H., Yeung, S.K.: A field model for repairing 3d shapes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5676-5684 (2016)
|
| 272 |
+
26. Varley, J., DeChant, C., Richardson, A., Ruales, J., Allen, P.: Shape completion enabled robotic grasping. In: 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 2442-2447. IEEE (2017)
|
| 273 |
+
27. Wu, J., Zhang, C., Zhang, X., Zhang, Z., Freeman, W.T., Tenenbaum, J.B.: Learning shape priors for single-view 3d completion and reconstruction. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 646-662 (2018)
|
| 274 |
+
28. Wu, Y., Man, J., Xie, Z.: A double layer method for constructing signed distance fields from triangle meshes. Graphical models 76(4), 214-223 (2014)
|
| 275 |
+
29. Wu, Z., Song, S., Khosla, A., Yu, F., Zhang, L., Tang, X., Xiao, J.: 3d shapenets: A deep representation for volumetric shapes. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1912-1920 (2015)
|
| 276 |
+
30. Yu, F., Koltun, V.: Multi-scale context aggregation by dilated convolutions. arXiv:1511.07122 (2015)
|
| 277 |
+
31. Zhang, Y., Funkhouser, T.: Deep depth completion of a single rgb-d image. arXiv:1803.09326 (2018)
|
3dscenereconstructionfromasingleviewport/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2c91a8571d520b9d015f15c1b289143cb6199530d6f17d5e937e0fda744e09e
|
| 3 |
+
size 439574
|
3dscenereconstructionfromasingleviewport/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5608cdf8698455a10c6976b08345eae90b9dc0378b9c83b0a2d47ebdb0a99eaa
|
| 3 |
+
size 386373
|
3pointtmfastermeasurementofhighdimensionaltransmissionmatrices/23bed598-2c66-4536-b2f6-7daecde70142_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd1b959fca77958fe77a8fb26bb8a133c212e16fec1da9582789e7aa8e21dec5
|
| 3 |
+
size 74855
|
3pointtmfastermeasurementofhighdimensionaltransmissionmatrices/23bed598-2c66-4536-b2f6-7daecde70142_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e14b2374c8c12d378f69f426203d5125a2e32d8046e8f4cc011fafdd8f84b7b
|
| 3 |
+
size 93701
|