Add Batch 01eef8e9-0a32-4b51-9ba2-dad1b944f35a
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_content_list.json +3 -0
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_model.json +3 -0
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf +3 -0
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/full.md +302 -0
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/images.zip +3 -0
- 1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/layout.json +3 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_content_list.json +3 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_model.json +3 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf +3 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/full.md +306 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/images.zip +3 -0
- 2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/layout.json +3 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_content_list.json +3 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_model.json +3 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf +3 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/full.md +295 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/images.zip +3 -0
- 360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/layout.json +3 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_content_list.json +3 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_model.json +3 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf +3 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/full.md +295 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/images.zip +3 -0
- 360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/layout.json +3 -0
- 360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_content_list.json +3 -0
- 360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_model.json +3 -0
- 360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf +3 -0
- 360xapanopticmultimodalsceneunderstandingdataset/full.md +251 -0
- 360xapanopticmultimodalsceneunderstandingdataset/images.zip +3 -0
- 360xapanopticmultimodalsceneunderstandingdataset/layout.json +3 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_content_list.json +3 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_model.json +3 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf +3 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/full.md +313 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/images.zip +3 -0
- 3dawarefaceeditingviawarpingguidedlatentdirectionlearning/layout.json +3 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_content_list.json +3 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_model.json +3 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf +3 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/full.md +310 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/images.zip +3 -0
- 3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/layout.json +3 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_content_list.json +3 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_model.json +3 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf +3 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/full.md +381 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/images.zip +3 -0
- 3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/layout.json +3 -0
- 3dfacetrackingfrom2dvideothroughiterativedenseuvtoimageflow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_content_list.json +3 -0
- 3dfacetrackingfrom2dvideothroughiterativedenseuvtoimageflow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_model.json +3 -0
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7eb68a19e4bb0ea52f7f48b068dc3a06f0dad52685e3a659a3f512854c4c2ac7
|
| 3 |
+
size 76819
|
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cd37b3b7ce9c3d2083d5fe380e6c86d30d5d3de84199a1deaad35eabcb64b42
|
| 3 |
+
size 93643
|
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/dc5df91e-4a9d-43cc-9e47-6667f322a373_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52a23d2d9ef5277b4ae4843ca0b046cf3f854f103b0222acfd0e07579881958d
|
| 3 |
+
size 358709
|
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/full.md
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness
|
| 2 |
+
|
| 3 |
+
Bernd Prach, $^{1,*}$ Fabio Brau, $^{2,*}$ Giorgio Buttazzo, $^{2}$ Christoph H. Lampert $^{1}$ $^{1}$ ISTA, Klosterneuburg, Austria
|
| 4 |
+
$^{2}$ Scuola Superiore Sant'Anna, Pisa, Italy
|
| 5 |
+
|
| 6 |
+
{bprach, chl}@ist.ac.at, {fabio.brau, giorgio.butazzo}@santannapisa.it
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
The robustness of neural networks against input perturbations with bounded magnitude represents a serious concern in the deployment of deep learning models in safety-critical systems. Recently, the scientific community has focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz neural networks that leverage Lipschitz bounded dense and convolutional layers. Different methods have been proposed in the literature to achieve this goal, however, comparing the performance of such methods is not straightforward, since different metrics can be relevant (e.g., training time, memory usage, accuracy, certifiable robustness) for different applications. Therefore, this work provides a thorough comparison between different methods, covering theoretical aspects such as computational complexity and memory requirements, as well as empirical measurements of time per epoch, required memory, accuracy and certifiable robust accuracy. The paper also provides some guidelines and recommendations to support the user in selecting the methods that work best depending on the available resources. We provide code at github.com/berndprach/lLipschitzLayersCompared.
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
Modern artificial neural networks achieve high accuracy and sometimes superhuman performance in many different tasks, but it is widely recognized that they are not robust to tiny and imperceptible input perturbations [4, 39] that, if properly crafted, can cause a model to produce the wrong output. Such inputs, known as Adversarial Examples, represent a serious concern for the deployment of machine learning models in safety-critical systems [26]. To overcome this issue, adversarial training has been proposed in
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 1. Evaluation of 1-Lipschitz methods on different metrics. Scores are assigned from 1 (worst) to 5 (best) to every method based on the results reported in Sections 3 and 5.
|
| 30 |
+
|
| 31 |
+
<table><tr><td colspan="2">Legend</td></tr><tr><td>RA</td><td>Robust Accuracy</td></tr><tr><td>A</td><td>Accuracy</td></tr><tr><td>TT</td><td>Training Time</td></tr><tr><td>IT</td><td>Inference Time</td></tr><tr><td>TM</td><td>Train Memory</td></tr><tr><td>IM</td><td>Inference Memory</td></tr></table>
|
| 32 |
+
|
| 33 |
+
provide any guarantees of robustness.
|
| 34 |
+
|
| 35 |
+
However, for many applications a guarantee of robustness is desired. Roughly speaking, a model $f$ is said to be $\varepsilon$ -robust for a given input $x$ if no perturbation of magnitude bounded by $\varepsilon$ can change its prediction. Recently, in the context of image classification, various approaches have been proposed to achieve certifiable robustness, including Verification, Randomized Smoothing, and Lipschitz Bounded Neural Networks.
|
| 36 |
+
|
| 37 |
+
Verification strategies aim to establish, for any given model, whether all samples contained in a $l_{2}$ -ball with radius $\varepsilon$ and centered in the tested input $x$ are classified with the same class as $x$ . In the exact formulation, verification strategies involve the solution of an NP-hard problem [20]. Nevertheless, even in a relaxed formulation, [44], these strategies require a huge computational effort [43].
|
| 38 |
+
|
| 39 |
+
Randomized smoothing strategies, initially presented in [10], represent an effective way of crafting a certifiable-robust classifier $g$ based on a base classifier $f$ . If combined with an additional denoising step, they can achieve state-of-the-art levels of robustness, [7]. However, since they require multiple evaluations of the base model (up to 100k evaluations) for the classification of a single input, they cannot be used for real-time applications.
|
| 40 |
+
|
| 41 |
+
Finally, Lipschitz Bounded Neural Networks [6, 9, 24, 27, 29, 34, 40] represent a valid alternative to produce certifiable classifiers, since they only require a single forward pass of the model at inference time to deduce guarantees of robustness. Indeed, for such models, a lower-bound of the minimal adversarial perturbation capable of fooling the classifier can be evaluated by considering the difference between the two largest class scores predicted by the model.
|
| 42 |
+
|
| 43 |
+
Lipschitz-bounded neural networks can be obtained by the composition of 1-Lipschitz layers [2]. The process of parameterizing 1-Lipschitz layers is fairly straightforward for fully connected layers. However, for convolutions — with overlapping kernels — deducing an effective parameterization is a hard problem. Indeed, the Lipschitz condition can be essentially thought of as a condition on the Jacobian of the layer. However, the Jacobian matrix can not be efficiently computed.
|
| 44 |
+
|
| 45 |
+
In order to avoid the explicit computation of the Jacobian, various methods have been proposed, including parameterizations that cause the Jacobian to be (very close to) orthogonal [27, 36, 40, 46] and methods that rely on an upper bound on the Jacobian instead [34]. Those different methods differ drastically in training and validation requirements (in particular time and memory) as well as empirical performance. Furthermore, increasing training time or model sizes very often also increases the empirical performance. This makes it hard to judge from the existing
|
| 46 |
+
|
| 47 |
+
literature which methods are the most promising. This becomes even worse when working with specific computation requirements, such as restrictions on the available memory. In this case, it is important to choose the method that better suits the characteristics of the system in terms of evaluation time, memory usage as well and certifiable-robust-accuracy.
|
| 48 |
+
|
| 49 |
+
This work aims at giving a comprehensive comparison of different strategies for crafting 1-Lipschitz layers from both a theoretical and practical perspective. For the sake of fairness, we consider several metrics such as Time and Memory requirements for both training and inference, Accuracy, as well as Certified Robust Accuracy. The main contributions are the following:
|
| 50 |
+
|
| 51 |
+
- An empirical comparison of 1-Lipschitz layers based on six different metrics, and four different datasets on four architecture sizes with three time constraints.
|
| 52 |
+
- A theoretical comparison of the runtime complexity and the memory usage of existing methods.
|
| 53 |
+
- A review of the most recent methods in the literature, including implementations with a revised code that we will release publicly for other researchers to build on.
|
| 54 |
+
|
| 55 |
+
# 2. Existing Works and Background
|
| 56 |
+
|
| 57 |
+
In recent years, various methods have been proposed for creating artificial neural networks with a bounded Lipschitz constant. The Lipschitz constant of a function $f: \mathbb{R}^n \to \mathbb{R}^m$ with respect to the $l_2$ norm is the smallest $L$ such that for all $x, y \in \mathbb{R}^n$
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\left\| f (x) - f (y) \right\| _ {2} \leq L \| x - y \| _ {2}. \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
We also extend this definition to networks and layers, by considering the $l_{2}$ norms of the flattened input and output tensors in Equation (1). A layer is called 1-Lipschitz if its Lipschitz constant is at most 1. For linear layers, the Lipschitz constant is equal to the spectral norm of the weight matrix that is given as
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\| M \| _ {2} = \sup _ {\mathbf {v} \neq 0} \frac {\| M \mathbf {v} \| _ {2}}{\| \mathbf {v} \| _ {2}}. \tag {2}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
A particular class of linear 1-Lipschitz layers are ones with an orthogonal Jacobian matrix. The Jacobian matrix of a layer is the matrix of partial derivatives of the flattened outputs with respect to the flattened inputs. A matrix $M$ is orthogonal if $MM^{\top} = I$ , where $I$ is the identity matrix. For layers with an orthogonal Jacobian, Equation (1) always holds with equality and, because of this, a lot of methods aim at constructing such 1-Lipschitz layers.
|
| 70 |
+
|
| 71 |
+
All the neural networks analyzed in this paper consist of 1-Lipschitz parameterized layers and 1-Lipschitz activation
|
| 72 |
+
|
| 73 |
+
functions, with no skip connections and no batch normalization. Even though the commonly used ReLU activation function is 1-Lipschitz, Anil et al. [2] showed that it reduces the expressive capability of the model. Hence, we adopt the MaxMin activation proposed by the authors and commonly used in 1-Lipschitz models. Concatenations of 1-Lipschitz functions are 1-Lipschitz, so the networks analyzed are 1-Lipschitz by construction.
|
| 74 |
+
|
| 75 |
+
# 2.1. Parameterized 1-Lipschitz Layers
|
| 76 |
+
|
| 77 |
+
This section provides an overview of the existing methods for providing 1-Lipschitz layers. We discuss fundamental methods for estimating the spectral norms of linear and convolutional layers, i.e. Power Method [32] and Fantistica4 [35], and for crafting orthogonal matrices, i.e. Bjorck & Bowie [5], in Appendix A. The rest of this section describes 7 methods from the literature that construct 1-Lipschitz convolutions: BCOP, Cayley, SOC, AOL, LOT, CPL, and SLL. Further 1-Lipschitz methods, [19, 42, 47], and the reasons why they were not included in our main comparison can be found in Appendix B.
|
| 78 |
+
|
| 79 |
+
BCOP Block Orthogonal Convolution Parameterization (BCOP) was introduced by Li et al. in [27] to extend a previous work by Xiao et al. [45] that focused on the importance of orthogonal initialization of the weights. For a $k \times k$ convolution, BCOP uses a set of $(2k - 1)$ parameter matrices. Each of these matrices is orthogonalized using the algorithm by Bjorck & Bowie [5] (see also Appendix A). Then, a $k \times k$ kernel is constructed from those matrices in a way that guarantees that the resulting layer is orthogonal.
|
| 80 |
+
|
| 81 |
+
Cayley Another family of orthogonal convolutional and fully connected layers has been proposed by Trockman and Kolter [40] by leveraging the Cayley Transform [8], which maps a skew-symmetric matrix $A$ into an orthogonal matrix $Q$ using the relation
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
Q = (I - A) (I + A) ^ {- 1}. \tag {3}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
The transformation can be used to parameterize orthogonal weight matrices for linear layers in a straightforward way. For convolutions, the authors make use of the fact that circular padded convolutions are vector-matrix products in the Fourier domain. As long as all those vector-matrix products have orthogonal matrices, the full convolution will have an orthogonal Jacobian. For Cayley Convolutions, those matrices are orthogonalized using the Cayley transform.
|
| 88 |
+
|
| 89 |
+
SOC Skew Orthogonal Convolution is an orthogonal convolutional layer presented by Singla et al. [36], obtained by leveraging the exponential convolution [15]. Analogously
|
| 90 |
+
|
| 91 |
+
to the matrix case, given a kernel $L\in \mathbb{R}^{c\times c\times k\times k}$ , the exponential convolution can be defined as
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\exp (L) (x) := x + \frac {L \star x}{1} + \frac {L \star^ {2} x}{2 !} + \dots + \frac {L \star^ {k} x}{k !} + \dots , \tag {4}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where $\star^k$ denotes a convolution applied $k$ -times. The authors proved that any exponential convolution has an orthogonal Jacobian matrix as long as $L$ is skew-symmetric, providing a way of parameterizing 1-Lipschitz layers. In their work, the sum of the infinite series is approximated by computing only the first 5 terms during training and the first 12 terms during the inference, and $L$ is normalized to have unitary spectral norm following the method presented in [35] (see Appendix A).
|
| 98 |
+
|
| 99 |
+
AOL Prach and Lampert [34] introduced Almost Orthogonal Lipschitz (AOL) layers. For any matrix $P$ , they defined a diagonal rescaling matrix $D$ with
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
D _ {i i} = \left(\sum_ {j} \left| P ^ {\top} P \right| _ {i j}\right) ^ {- 1 / 2} \tag {5}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
and proved that the spectral norm of $PD$ is bounded by 1. This result was used to show that the linear layer given by $l(x) = PDx + b$ (where $P$ is the learnable matrix and $D$ is given by Eq. (5)) is 1-Lipschitz. Furthermore, the authors extended the idea so that it can also be efficiently applied to convolutions. This is done by calculating the rescaling in Equation (5) with the Jacobian $J$ of a convolution instead of $P$ . In order to evaluate it efficiently the authors express the elements of $J^{\top}J$ explicitly in terms of the kernel values.
|
| 106 |
+
|
| 107 |
+
LOT The layer presented by Xu et al. [46] extends the idea of [19] to use the Inverse Square Root of a matrix in order to orthogonalize it. Indeed, for any matrix $V$ , the matrix $Q = V(V^T V)^{-\frac{1}{2}}$ is orthogonal. Similarly to the Cayley method, for the layer-wise orthogonal training (LOT) the convolution is applied in the Fourier frequency domain. To find the inverse square root, the authors relay on an iterative Newton Method. In details, defining $Y_0 = V^T V$ , $Z_0 = I$ , and
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
Y _ {i + 1} = \frac {1}{2} Y _ {i} \left(3 I - Z _ {i} Y _ {i}\right), Z _ {i + 1} = \frac {1}{2} \left(3 I - Z _ {i} Y _ {i}\right) Z _ {i}, \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
it can be shown that $Y_{i}$ converges to $(V^{T}V)^{-\frac{1}{2}}$ . In their proposed layer, the authors apply 10 iterations of the method for both training and evaluation.
|
| 114 |
+
|
| 115 |
+
CPL Meunier et al. [31] proposed the Convex Potential Layer. Given a non-decreasing 1-Lipschitz function $\sigma$ (usually ReLU), the layer is constructed as
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
l (x) = x - \frac {2}{\| W \| _ {2} ^ {2}} W ^ {\top} \sigma (W x + b), \tag {7}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
which is 1-Lipschitz by design. The spectral norm required to calculate $l(x)$ is approximated using the power method (see Appendix A).
|
| 122 |
+
|
| 123 |
+
SLL The SDP-based Lipschitz Layers (SLL) proposed by Araujo et al. [3] combine the CPL layer with the upper bound on the spectral norm from AOL. The layer can be written as
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
l (x) = x - 2 W ^ {\top} Q ^ {- 2} D ^ {2} \sigma (W x + b), \tag {8}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
where $Q$ is a learnable diagonal matrix with positive entries and $D$ is deduced by applying Equation (5) to $P = W^{\top}Q^{-1}$ .
|
| 130 |
+
|
| 131 |
+
Remark 1. Both CPL and SLL are non-linear by construction, so they can be used to construct a network without any further use of activation functions. However, carrying out some preliminary experiments, we empirically found that alternating CPL (and SLL) layers with MaxMin activation layers allows achieving a better performance.
|
| 132 |
+
|
| 133 |
+
# 3. Theoretical Comparison
|
| 134 |
+
|
| 135 |
+
As illustrated in the last section, various ideas and methods have been proposed to parameterize 1-Lipschitz layers. This causes the different methods to have very different properties and requirements. This section aims at highlighting the properties of the different algorithms, focusing on the algorithmic complexity and the required memory.
|
| 136 |
+
|
| 137 |
+
Table 1 provides an overview of the computational complexity and memory requirements for the different layers considered in the previous section. For the sake of clarity, the analysis is performed by considering separately the transformations applied to the input of the layers and those applied to the weights to ensure the 1-Lipschitz constraint. Each of the two sides of the table contains three columns: i) Operations contains the most costly transformations applied to the input as well as to the parameters of different layers; ii) MACS reports the computational complexity expressed in multiply-accumulate operations (MACS) involved in the transformations (only leading terms are presented); iii) Memory reports the memory required by the transformation during the training phase.
|
| 138 |
+
|
| 139 |
+
At training time, both input and weight transformations are required, thus the training complexity of the forward pass can be computed as the sum of the two corresponding MACS columns of the table. Similarly, the training memory requirements can be computed as the sum of the two corresponding Memory columns of the table. For the considered operations, the cost of the backward pass during training has the same computational complexity as the forward pass, and
|
| 140 |
+
|
| 141 |
+
therefore increases the overall complexity by a constant factor. At inference time, all the parameter transformations can be computed just once and cached afterward. Therefore, the inference complexity is equal to the complexity due to the input transformation (column 3 in the table). At inference time, the intermediate variables are not stored in memory, hence, the memory requirements are much lower than during training. The values cannot directly be inferred from Table 1, we reported them separately in Appendix C.1.
|
| 142 |
+
|
| 143 |
+
Note that all the terms reported in Table 1 depend on the batch size $b$ , the input size $s \times s \times c$ , the number of inner iterations of a method $t$ , and the kernel size $k \times k$ . (Often, $t$ is different at training and inference time.) For the sake of clarity, the MACS of a naive convolution implementation is denoted by $C$ ( $C = bs^2c^2k^2$ ), the number of inputs of a layer is denoted by $M$ ( $M = bs^2c$ ), and the size of the kernel of a standard convolution is denoted by $P$ ( $P = c^2k^2$ ). Only the leading terms of the computations are reported in Table 1. In order to simplify some terms, we assume that $c > \log_2(s)$ and that rescaling a tensor (by a scalar) as well as adding two tensors does not require any memory in order to do backpropagation. We also assume that each additional activation does require extra memory. All these assumptions have been verified to hold within PyTorch, [33]. Also, when the algorithm described in the paper and the version provided in the supplied code differed, we considered the algorithm implemented in the code.
|
| 144 |
+
|
| 145 |
+
The transformations reported in the table are convolutions (CONV), Fast Fourier Transformations (FFT), matrix-vector multiplications (MV), matrix-matrix multiplications (MM), matrix inversions (INV), as well as applications of an activation function (ACT). The application of algorithms such as BJORCK & Bowie (BnB), power method, and Fantastic 4 (F4) is also reported (see Appendix A for descriptions).
|
| 146 |
+
|
| 147 |
+
# 3.1. Analysis of the computational complexity
|
| 148 |
+
|
| 149 |
+
It is worth noting that the complexity of the input transformations (in Table 1) is similar for all methods. This implies that a similar scaling behaviour is expected at inference time for the models. Cayley and LOT apply an FFT-based convolution and have computational complexity independent of the kernel size. CPL and SLL require two convolutions, which make them slightly more expensive at inference time. Notably, SOC requires multiple convolutions, making this method more expensive at inference time.
|
| 150 |
+
|
| 151 |
+
At training time, parameter transformations need to be applied in addition to the input transformations during every forward pass. For SOC and CPL, the input transformations always dominate the parameter transformations in terms of computational complexity. This means the complexity scales like $c^2$ , just like a regular convolution, with a further factor of 2 and 5 respectively. All other methods
|
| 152 |
+
|
| 153 |
+
Table 1. Computational complexity and memory requirements of different methods. We report multiply-accumulate operations (MACS) as well as memory requirements (per layer) for batch size $b$ , image size $s \times s \times c$ , kernel size $k \times k$ and number of inner iterations $t$ . We use $C = bs^2c^2k^2$ , $M = bs^2c$ and $P = c^2k^2$ . For a detailed explanation on what is reported see Section 3. For some explanation on how the entries of this table were derived, see Appendix C.
|
| 154 |
+
|
| 155 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Input Transformations</td><td colspan="3">Parameter Transformations</td></tr><tr><td>Operations</td><td>MACS O(·)</td><td>Memory</td><td>Operations</td><td>MACS O(·)</td><td>Memory O(·)</td></tr><tr><td>Standard</td><td>CONV</td><td>C</td><td>M</td><td>-</td><td>-</td><td>P</td></tr><tr><td>AOL</td><td>CONV</td><td>C</td><td>M</td><td>CONV</td><td>c3k4</td><td>5P</td></tr><tr><td>BCOP</td><td>CONV</td><td>C</td><td>M</td><td>BnB & MMs</td><td>c3kt + c3k3</td><td>c2kt + c2k3</td></tr><tr><td>Cayley</td><td>FFTs & MVs</td><td>bs2c2</td><td>5/2M</td><td>FFTs & INVs</td><td>s2c3</td><td>3/2s2c2</td></tr><tr><td>CPL</td><td>CONVs & ACT</td><td>2C</td><td>3M</td><td>power method</td><td>s2c2k2</td><td>P + s2c</td></tr><tr><td>LOT</td><td>FFTs & MVs</td><td>bs2c2</td><td>3M</td><td>FFTs & MMs</td><td>4s2c3t</td><td>4s2c2t</td></tr><tr><td>SLL</td><td>CONVs & ACT</td><td>2C</td><td>3M</td><td>CONVs</td><td>c3k4</td><td>5P</td></tr><tr><td>SOC</td><td>CONVs</td><td>Ct1</td><td>Mt1</td><td>F4</td><td>c2k2t2</td><td>P</td></tr></table>
|
| 156 |
+
|
| 157 |
+
require parameter transformations that scale like $c^3$ , making them more expensive for larger architectures. In particular, we do expect Cayley and LOT to require long training times for larger models, since the complexity of their parameter transformations further depends on the input size.
|
| 158 |
+
|
| 159 |
+
# 3.2. Analysis of the training memory requirements
|
| 160 |
+
|
| 161 |
+
The memory requirements of the different layers are important, since they determine the maximum batch size and the type of models we can train on a particular infrastructure. At training time, typically all intermediate results are kept in memory to perform backpropagation. This includes intermediate results for both input and parameter transformations. The input transformations usually preserve the size, and therefore the memory required is usually of $\mathcal{O}(M)$ . Therefore, for the input transformations, all methods require memory not more than a constant factor worse than standard convolutions, with the worst method being SOC, with a constant $t_1$ , typically equal to 5.
|
| 162 |
+
|
| 163 |
+
In addition to the input transformation, we also need to store intermediate results of the parameter transformations in memory in order to evaluate the gradients. Again, most methods approximately preserve the sizes during the parameter transformations, and therefore the memory required is usually of order $\mathcal{O}(P)$ . Exceptions to this rule are Cayley and LOT, with a larger $\mathcal{O}(s^2 c^2)$ term, as well as BCOP.
|
| 164 |
+
|
| 165 |
+
# 4. Experimental Setup
|
| 166 |
+
|
| 167 |
+
This section presents an experimental study aimed at comparing the performance of the considered layers with respect to different metrics. Before presenting the results, we first summarize the setup used in our experiments. For a detailed description see Appendix E. To have a fair and meaningful comparison among the various models, all the
|
| 168 |
+
|
| 169 |
+
proposed layers have been evaluated using the same architecture, loss function, and optimizer. Since, according to the data reported in Table 1, different layers may have different throughput, to have a fair comparison with respect to the tested metrics, we limited the total training time instead of fixing the number of training epochs. Results are reported for training times of $2\mathrm{h}$ , $10\mathrm{h}$ , and $24\mathrm{h}$ on one A100 GPU.
|
| 170 |
+
|
| 171 |
+
Our architecture is a standard convolutional network that doubles the number of channels whenever the resolution is reduced [6, 40]. For each method, we tested architectures of different sizes. We denoted them as XS, S, M and L, depending on the number of parameters, according to the criteria in Table 7, ranging from 1.5M to 100M parameters.
|
| 172 |
+
|
| 173 |
+
Since different methods benefit from different learning rates and weight decay, for each setting (model size, method and dataset), we used the best values resulting from a random search performed on multiple training runs on a validation set composed of $10\%$ of the original training set. More specifically, 16 runs were performed for each configuration of randomly sampled hyperparameters, and we selected the configuration maximizing the certified robust accuracy w.r.t. $\epsilon = 36 / 255$ (see Appendix E.4 for details).
|
| 174 |
+
|
| 175 |
+
The evaluation was carried out using four different datasets: CIFAR-10, CIFAR-100 [21], Tiny ImageNet [23], and Imagenette [16] for large images. Augmentation was used during the training (Random crops and flips on CIFAR-10 and CIFAR-100, RandAugment [11] on Tiny ImageNet, and random crop as well as RandAugment on Imagenette), details in Appendix E.5. We use the loss function proposed by [34], with same temperature 0.25, and where we tuned the margin to maximize the robust accuracy for $\epsilon = \frac{36}{255}$ . In detail, we considered a margin of $2\sqrt{2}\epsilon$ where
|
| 176 |
+
|
| 177 |
+
the $\sqrt{2}$ factor comes from the $L_{2}$ norm [41], and the factor 2 has been added to help with generalization.
|
| 178 |
+
|
| 179 |
+
# 4.1. Metrics
|
| 180 |
+
|
| 181 |
+
All the considered models were evaluated based on three main metrics: the throughput, the required memory, and the certified robust accuracy.
|
| 182 |
+
|
| 183 |
+
Throughput and epoch time The throughput of a model is the average number of examples that the model can process per second. It determines how many epochs are processed in a given time frame. The evaluation of the throughput was performed on an 80GB-A100-GPU based on the average time of 100 mini-batches. We measured the inference throughput with cached parameter transformations.
|
| 184 |
+
|
| 185 |
+
Memory required Layers that require less memory allow for larger batch size, and the memory requirements also determine the type of hardware we can train a model on. For each model, we measured and reported the maximal GPU memory occupied by tensors using the function torch.cuda.max_memory_allocated() provided by the PyTorch framework. This is not exactly equal to the overall GPU memory requirement but gives a fairly good approximation of it. Note that the model memory measured in this way also includes additional memory required by the optimizer (e.g. to store the momentum term) as well as by the activation layers in the forward pass. However, this additional memory should be at most of order $\mathcal{O}(M + P)$ . As for the throughput, we evaluated and cached all calculations independent of the input at inference time.
|
| 186 |
+
|
| 187 |
+
Certified robust accuracy In order to evaluate the performance of a 1-Lipschitz network, the standard metric is the certified robust accuracy. An input is classified certifiably robustly with radius $\epsilon$ by a model, if no perturbations of the input with norm bounded by $\epsilon$ can change the prediction of the model. Certified robust accuracy measures the proportion of examples that are classified correctly as well as certifiably robustly. For 1-Lipschitz models, a lower bound of the certified $\epsilon$ -robust accuracy is the portion of correctly classified inputs such that $\mathcal{M}_f(x_i, l_i) > \epsilon \sqrt{2}$ where the margin $\mathcal{M}_f(x, l)$ of a model $f$ at input $x$ with label $l$ , given as $\mathcal{M}_f(x, l) = f(x)_l - \max_{j \neq l} f_j(x)$ , is the difference between target class score and the highest score of a different class. For details, see [41].
|
| 188 |
+
|
| 189 |
+
# 5. Experimental Results
|
| 190 |
+
|
| 191 |
+
This section presents the results of the comparison performed by applying the methodology discussed in Section 4. The results related to the different metrics are dis
|
| 192 |
+
|
| 193 |
+
cussed in dedicated subsections and the key takeaways are summarized in the radar-plot illustrated in Figure 1.
|
| 194 |
+
|
| 195 |
+
# 5.1. Training and inference times
|
| 196 |
+
|
| 197 |
+
Figure 2 plots the training time per epoch of the different models as a function of their size, and Figure 3 plots the corresponding inference throughput for the various sizes as described in Section 4. As described in Table 5, the model base width, referred to as $w$ , is doubled from one model size to the next. We expect the training and inference time to scale with $w$ similarly to how individual layers scale with their number of channels, $c$ (in Table 1). This is because the width of each of the 5 blocks of our architecture is a constant multiple of the base width, $w$ .
|
| 198 |
+
|
| 199 |
+
The training time increases (at most) about linearly with $w$ for standard convolutions, whereas the computational complexity of each single convolution scales like $c^2$ . This suggests that parallelism on the GPU and the overhead from other operations (activations, parameter updates, etc.) are important factors determining the training time. This also explains why CPL (doing two convolutions, with identical kernel parameters) is only slightly slower than a standard
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Figure 2. Training time per epoch (on CIFAR-10) for different methods and different model sizes.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Figure 3. Inference throughput for different methods as a function of their size for CIFAR-10 sizes input images. All parameter transformations have been evaluated and cached beforehand
|
| 206 |
+
|
| 207 |
+
convolution, and SOC (doing 5 convolutions) is only about 3 times slower than the standard convolution. The AOL and SLL methods also require times comparable to a standard convolution for small models, although eventually, the $c^3$ term in the computation of the rescaling makes them slower for larger models. Finally, Cayley, LOT, and BCOP methods take much longer training times per epoch. For Cayley and LOT this behavior was expected, as they have a large $\mathcal{O}(s^2c^3)$ term in their computational complexity. See Table 1 for further details.
|
| 208 |
+
|
| 209 |
+
At inference time transformations of the weights are cached, therefore some methods (AOL, BCOP) do not have any overhead compared to a standard convolution. As expected, other methods (CPL, SLL, and SOC) that apply additional convolutions to the input suffer from a corresponding overhead. Finally, Cayley and LOT have a slightly different throughput due to their FFT-based convolution. Among them, Cayley is about twice as fast because it involves a real-valued FFT rather than a complex-valued one. From Figure 3, it can be noted that cached Cayley and CPL have the same inference time, even though CPL uses twice the number of convolutions. We believe this is due to the fact that the conventional FFT-based convolution is quite efficient for large kernel sizes, but for smaller ones PyTorch implements a faster algorithm, i.e., Winograd, [22], that can be up to 2.5 times faster.
|
| 210 |
+
|
| 211 |
+
# 5.2. Training memory requirements
|
| 212 |
+
|
| 213 |
+
The training and inference memory requirements of the various models (measured as described in Section 4.1) are reported in Figure 4 as a function of the model size. The results of the theoretical analysis reported in Table 1 suggest that the training memory requirements always have a term linear in the number of channels $c$ (usually the activations from the forward pass), as well as a term quadratic in $c$ (usually the weights and all transformations applied to the weights during the forward pass). This behavior can also be observed from Figure 4. For some of the models, the memory required approximately doubles from one model size to the next one, just like the width. This means that the linear term dominates (for those sizes), which makes those models relatively cheap to scale up. For the BCOP, LOT, and Cayley methods, the larger coefficients in the $c^2$ term (for LOT and Cayley the coefficient is even dependent on the input size, $s^2$ ) cause this term to dominate. This makes it much harder to scale those methods to more parameters. Method LOT requires huge amounts of memory, in particular LOT-L is too large to fit in 80GB GPU memory.
|
| 214 |
+
|
| 215 |
+
Note that at test time, the memory requirements are much lower, because the intermediate activation values do not need to be stored, as there is no backward pass. Therefore, at inference time, most methods require a very similar
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Figure 4. Memory required at training and inference time for input size $32 \times 32$ .
|
| 221 |
+
|
| 222 |
+
amount of memory as a standard convolution. The Cayley and LOT methods require more memory since they perform the calculation in the Fourier space, creating an intermediate representation of the weight matrices of size $\mathcal{O}(s^2 c^2)$ .
|
| 223 |
+
|
| 224 |
+
# 5.3. Certified robust accuracy
|
| 225 |
+
|
| 226 |
+
The results related to the accuracy and the certified robust accuracy for the different methods, model sizes, and datasets measured on a 24h training budget are summarized in Table 2. The differences among the various model sizes are also highlighted in Figure 6 by reporting the sorted values of the certified robust accuracy. Further tables and plots relative to different training budgets can be found in Appendix G. The reader can compare our results with the state-of-the-art certified robust accuracy summarized in Appendix D. However, it is worth noting that, to reach state-of-the-art performance, authors often carry out experiments using large model sizes and long training times, which makes it hard to compare the methods themselves. On the other hand, the evaluation proposed in this paper allows a fairer comparison among the different methods, since it also considers timing and memory aspects. This restriction based on time, rather than the number of epochs, ensures that merely enlarging the model size does not lead to improved performance, as bigger models typically process fewer epochs of data. Indeed, in our results in Figure 6 it is usually the M (and not the L) model that performs best.
|
| 227 |
+
|
| 228 |
+
Experiments show that SOC performs best, reaching the
|
| 229 |
+
|
| 230 |
+
Table 2. Certified robust accuracy for radius $\epsilon = 36 / 255$ on the evaluated datasets. Training is performed for 24 hours.
|
| 231 |
+
|
| 232 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">Accuracy [%]</td><td colspan="4">Robust Accuracy [%]</td></tr><tr><td>XS</td><td>S</td><td>M</td><td>L</td><td>XS</td><td>S</td><td>M</td><td>L</td></tr><tr><td colspan="9">CIFAR-10</td></tr><tr><td>AOL</td><td>71.7</td><td>73.6</td><td>73.4</td><td>73.7</td><td>59.1</td><td>60.8</td><td>61.0</td><td>61.5</td></tr><tr><td>BCOP</td><td>71.7</td><td>73.1</td><td>74.0</td><td>74.6</td><td>58.5</td><td>59.3</td><td>60.5</td><td>61.5</td></tr><tr><td>CPL</td><td>74.9</td><td>76.1</td><td>76.6</td><td>76.8</td><td>62.5</td><td>64.2</td><td>65.1</td><td>65.2</td></tr><tr><td>Cayley</td><td>73.1</td><td>74.2</td><td>74.4</td><td>73.6</td><td>59.5</td><td>61.1</td><td>61.0</td><td>60.1</td></tr><tr><td>LOT</td><td>75.5</td><td>76.6</td><td>72.0</td><td>-</td><td>63.4</td><td>64.6</td><td>58.7</td><td>-</td></tr><tr><td>SLL</td><td>73.7</td><td>74.2</td><td>75.3</td><td>74.3</td><td>61.0</td><td>62.0</td><td>62.8</td><td>62.3</td></tr><tr><td>SOC</td><td>74.1</td><td>75.0</td><td>76.9</td><td>76.9</td><td>61.3</td><td>62.9</td><td>66.3</td><td>65.4</td></tr><tr><td colspan="9">CIFAR-100</td></tr><tr><td>AOL</td><td>40.3</td><td>43.4</td><td>44.3</td><td>41.9</td><td>27.9</td><td>31.0</td><td>31.4</td><td>29.7</td></tr><tr><td>BCOP</td><td>41.4</td><td>42.8</td><td>43.7</td><td>42.2</td><td>28.4</td><td>30.1</td><td>31.2</td><td>29.2</td></tr><tr><td>CPL</td><td>42.3</td><td>-</td><td>45.2</td><td>44.3</td><td>30.1</td><td>-</td><td>33.2</td><td>32.1</td></tr><tr><td>Cayley</td><td>42.3</td><td>43.9</td><td>43.5</td><td>42.9</td><td>29.2</td><td>30.5</td><td>30.5</td><td>29.5</td></tr><tr><td>LOT</td><td>43.5</td><td>45.2</td><td>42.8</td><td>-</td><td>30.8</td><td>32.5</td><td>29.6</td><td>-</td></tr><tr><td>SLL</td><td>41.4</td><td>42.8</td><td>42.4</td><td>42.1</td><td>28.9</td><td>30.5</td><td>29.9</td><td>29.6</td></tr><tr><td>SOC</td><td>43.1</td><td>45.2</td><td>47.3</td><td>46.2</td><td>30.6</td><td>32.6</td><td>34.9</td><td>33.5</td></tr><tr><td colspan="9">Tiny ImageNet</td></tr><tr><td>AOL</td><td>26.6</td><td>29.3</td><td>30.3</td><td>30.0</td><td>18.1</td><td>19.7</td><td>21.0</td><td>20.6</td></tr><tr><td>BCOP</td><td>22.4</td><td>26.2</td><td>27.6</td><td>27.0</td><td>13.8</td><td>16.9</td><td>17.2</td><td>16.8</td></tr><tr><td>CPL</td><td>28.3</td><td>29.3</td><td>29.8</td><td>30.3</td><td>18.9</td><td>19.7</td><td>20.3</td><td>20.1</td></tr><tr><td>Cayley</td><td>27.8</td><td>29.6</td><td>30.1</td><td>27.2</td><td>17.9</td><td>19.5</td><td>19.3</td><td>16.7</td></tr><tr><td>LOT</td><td>30.7</td><td>32.5</td><td>28.8</td><td>-</td><td>20.8</td><td>21.9</td><td>18.1</td><td>-</td></tr><tr><td>SLL</td><td>25.1</td><td>27.0</td><td>26.5</td><td>27.9</td><td>16.6</td><td>18.4</td><td>17.7</td><td>18.8</td></tr><tr><td>SOC</td><td>28.9</td><td>28.8</td><td>32.1</td><td>32.1</td><td>18.9</td><td>18.8</td><td>21.2</td><td>21.1</td></tr><tr><td colspan="9">Imagenette</td></tr><tr><td>AOL</td><td></td><td>80.8</td><td>83.7</td><td>82.8</td><td></td><td>76.8</td><td>79.9</td><td>78.5</td></tr><tr><td>BCOP</td><td></td><td>81.2</td><td>84.5</td><td>9.8</td><td></td><td>75.6</td><td>80.1</td><td>9.8</td></tr><tr><td>CPL</td><td></td><td>85.5</td><td>86.5</td><td>86.4</td><td></td><td>80.8</td><td>82.4</td><td>82.3</td></tr><tr><td>Cayley</td><td></td><td>81.2</td><td>77.9</td><td>-</td><td></td><td>75.8</td><td>71.7</td><td>-</td></tr><tr><td>SLL</td><td></td><td>80.8</td><td>83.4</td><td>79.3</td><td></td><td>75.4</td><td>78.0</td><td>72.8</td></tr><tr><td>SOC</td><td></td><td>80.6</td><td>83.6</td><td>79.0</td><td></td><td>74.7</td><td>78.4</td><td>73.5</td></tr></table>
|
| 233 |
+
|
| 234 |
+
highest certified robust accuracy on two datasets. CPL models consistently rank in top-10 position among the three datasets. LOT performed well, in particular on Tiny ImageNet dataset where it performs the best. AOL did not reach high accuracy on CIFAR-10, but reached more competitive results on Tiny ImageNet. An opposite effect can be observed for SLL, which performance seems to strongly depend on the number of classes. BCOP only reach the top-10 once, while Cayley is consistently outperformed by the other methods. The very same analysis can be applied to the clean accuracy, whose sorted bar-plots are reported in Appendix G, where the main difference is that Cayley performs slightly better for that metric. Furthermore, it is worth
|
| 235 |
+
|
| 236 |
+
highlighting that CPL is sensitive to weight initialization. We faced numerical errors during the 10h and 24h training of the small model on CIFAR-100. On Imagenette, CPL clearly performs best, followed by BCOP and AOL. Note that these methods all construct a kernel so that the convolution is 1-Lipschitz. This seems to be good strategy for higher resolution datasets. E.g. SOC, that instead applies multiple convolutions has a drop in performs compared to other datasets.
|
| 237 |
+
|
| 238 |
+
# 5.3.1 Interpretation of the results
|
| 239 |
+
|
| 240 |
+
We confirm empirically what suspected in [46]: layers that naturally include a skip connections (CPL, SLL, SOC) generally perform better than layers that do not have this ability. Furthermore, we noticed that layers with an identity initialization (AOL, LOT) perform better than layers that do neither (BCOP, Cayley). Presumably this is due to the MaxMin activation reducing the variance in the forward pass when alternated with non-identity layers.
|
| 241 |
+
|
| 242 |
+
Our results also allow ruling out some other possible explanation: one might suspect that pure contractive layers (AOL, CPL, and SLL) would suffer from vanishing gradients, differently from orthogonal ones, however, our experiments do not show any evidence of this fact. Furthermore, one might suspect that slower methods perform worse, because they allow fewer epochs for a given time budget, however, our experiments do not support this fact; two relative slow methods (SOC, LOT) are among the best ones.
|
| 243 |
+
|
| 244 |
+
# 6. Conclusions and Guidelines
|
| 245 |
+
|
| 246 |
+
This work presented a comparative study of state-of-the-art 1-Lipschitz layers under the lens of different metrics, such as time and memory requirements, accuracy, and certified robust accuracy, all evaluated at training and inference time. A theoretical comparison of the methods in terms of time and memory complexity was also presented and validated by experiments.
|
| 247 |
+
|
| 248 |
+
Taking all metrics into account (summarized in Figure 1), the results are in favor of CPL, due to its highest performance and lower consumption of computational resources. When large computational resources are available and the application does not impose stringent timing constraints during inference and training, the SOC layer could be used, due to its slightly better performance. Finally, those applications in which the inference time is crucial may take advantage of AOL or BCOP, which do not introduce additional runtime overhead (during inference) compared to a standard convolution. For higher resolution images, it also seems that CPL is the most promising method.
|
| 249 |
+
|
| 250 |
+
# References
|
| 251 |
+
|
| 252 |
+
[1] Thomas Altstidl, David Dobre, Björn Eskofier, Gauthier Gidel, and Leo Schwinn. Raising the bar for certified adversarial robustness with diffusion models. arXiv preprint arXiv:2305.10388, 2023. 14, 22
|
| 253 |
+
[2] Cem Anil, James Lucas, and Roger Grosse. Sorting out Lipschitz function approximation. In International Conference on Machine Learning (ICML), 2019. 2, 3, 11
|
| 254 |
+
[3] Alexandre Araujo, Aaron J Havens, Blaise Delattre, Alexandre Allauzen, and Bin Hu. A unified algebraic perspective on Lipschitz neural networks. In International Conference on Learning Representations (ICLR), 2023. 4, 14, 22
|
| 255 |
+
[4] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Machine Learning and Knowledge Discovery in Databases, 2013. 1
|
| 256 |
+
[5] Å. Björck and C. Bowie. An iterative algorithm for computing the best estimate of an orthogonal matrix. SIAM Journal on Numerical Analysis, 1971. 3, 11
|
| 257 |
+
[6] Fabio Brau, Giulio Rossolini, Alessandro Biondi, and Giorgio Buttazzo. Robust-by-design classification via unitary-gradient neural networks. Proceedings of the AAAI Conference on Artificial Intelligence, 2023. 2, 5
|
| 258 |
+
[7] Nicholas Carlini, Florian Tramer, Krishnamurthy Dj Dvijotham, Leslie Rice, Mingjie Sun, and J Zico Kolter. (Certified!!) adversarial robustness for free! In International Conference on Learning Representations (ICLR), 2023. 2
|
| 259 |
+
[8] Arthur Cayley. About the algebraic structure of the orthogonal group and the other classical groups in a field of characteristic zero or a prime characteristic. Journal für die reine und angewandte Mathematik, 1846. 3
|
| 260 |
+
[9] Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. Parseval networks: Improving robustness to adversarial examples. In International conference on machine learning, 2017. 2, 11
|
| 261 |
+
[10] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In Proceedings of the 36th International Conference on Machine Learning, 2019. 2
|
| 262 |
+
[11] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020. 5, 16
|
| 263 |
+
[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Conference on Computer Vision and Pattern Recognition (CVPR), 2009. 16
|
| 264 |
+
[13] Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2018. 11
|
| 265 |
+
[14] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. stat, 2015. 1
|
| 266 |
+
[15] Emiel Hoogeboom, Victor Garcia Satorras, Jakub Tomczak, and Max Welling. The convolution exponential and generalized Sylvester flows. In Advances in Neural Information Processing Systems, 2020. 3
|
| 267 |
+
|
| 268 |
+
[16] Jeremy Howard. Imagenette. https://github.com/fastai/imagenette/. Accessed: 01.02.2024. 5, 16
|
| 269 |
+
[17] Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. Effectively leveraging capacity for improved deterministic robustness certification. In International Conference on Learning Representations (ICLR), 2024. 14, 22
|
| 270 |
+
[18] Kai Hu, Andy Zou, Zifan Wang, Klas Leino, and Matt Fredrikson. Unlocking deterministic robustness certification on imagenet. Conference on Neural Information Processing Systems (NeurIPS), 2024. 14
|
| 271 |
+
[19] Lei Huang, Li Liu, Fan Zhu, Diwen Wan, Zehuan Yuan, Bo Li, and Ling Shao. Controllable orthogonalization in training DNNs. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 3, 11, 12
|
| 272 |
+
[20] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient SMT solver for verifying deep neural networks. In International conference on computer aided verification, 2017. 2
|
| 273 |
+
[21] Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, 2009. 5, 16
|
| 274 |
+
[22] Andrew Lavin and Scott Gray. Fast algorithms for convolutional neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 7
|
| 275 |
+
[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 2015. 5, 16
|
| 276 |
+
[24] Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021. 2, 11
|
| 277 |
+
[25] Mario Lezcano-Casado and David Martínez-Rubio. Cheap orthogonal constraints in neural networks: A simple parametrization of the orthogonal and unitary group. In International Conference on Machine Learning (ICML), 2019. 11
|
| 278 |
+
[26] Linyi Li, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. In 2023 IEEE Symposium on Security and Privacy (SP), 2023. 1
|
| 279 |
+
[27] Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in Lipschitz constrained convolutional networks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2, 3, 11, 14
|
| 280 |
+
[28] Shuai Li, Kui Jia, Yuxin Wen, Tongliang Liu, and Dacheng Tao. Orthogonal deep neural networks. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 11
|
| 281 |
+
[29] Max Losch, David Stutz, Bernt Schiele, and Mario Fritz. Certified robust models with slack control and large Lipschitz constants. arXiv preprint arXiv:2309.06166, 2023. 2
|
| 282 |
+
[30] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018. 1
|
| 283 |
+
[31] Laurent Meunier, Blaise J Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for Lipschitz neural networks. In International Conference on Machine Learning (ICML), 2022. 3, 11, 14, 22
|
| 284 |
+
[32] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations (ICLR), 2018. 3, 11
|
| 285 |
+
|
| 286 |
+
[33] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Conference on Neural Information Processing Systems (NeurIPS). 2019. 4
|
| 287 |
+
[34] Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose Lipschitz networks. In European Conference on Computer Vision (ECCV), 2022. 2, 3, 5, 14
|
| 288 |
+
[35] S Singla and S Feizi. Fantastic four: Differentiable bounds on singular values of convolution layers. In International Conference on Learning Representations (ICLR), 2021. 3, 11
|
| 289 |
+
[36] Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning (ICML), 2021. 2, 3, 14
|
| 290 |
+
[37] Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. Conference on Neural Information Processing Systems (NeurIPS), 2022. 14
|
| 291 |
+
[38] Leslie N Smith and Nicholay Topin. Super-convergence: Very fast training of neural networks using large learning rates. In Artificial intelligence and machine learning for multi-domain operations applications, 2019. 15
|
| 292 |
+
[39] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014. 1
|
| 293 |
+
[40] Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the Cayley transform. In International Conference on Learning Representations (ICLR), 2021. 2, 3, 5, 14, 23
|
| 294 |
+
[41] Yusuke Tsuzuku, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. Conference on Neural Information Processing Systems (NeurIPS), 2018. 6
|
| 295 |
+
[42] Ruigang Wang and Ian Manchester. Direct parameterization of Lipschitz-bounded deep networks. In International Conference on Machine Learning (ICML), 2023. 3, 11, 12
|
| 296 |
+
[43] Lily Weng, Huan Zhang, Hongge Chen, Zhao Song, Chojui Hsieh, Luca Daniel, Duane Boning, and Inderjit Dhillon. Towards fast computation of certified robustness for relu networks. In International Conference on Machine Learning (ICML), 2018. 2
|
| 297 |
+
[44] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning (ICML), 2018. 2
|
| 298 |
+
[45] Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of CNNs: How to train 10,000-layer vanilla convolutional neural networks. In International Conference on Machine Learning (ICML), 2018. 3
|
| 299 |
+
[46] Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. Conference on
|
| 300 |
+
|
| 301 |
+
Neural Information Processing Systems (NeurIPS), 2022. 2, 3, 8, 14, 23
|
| 302 |
+
[47] Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations (ICLR), 2021. 3, 11, 12
|
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:958410ac9ac178a74e8bc0a75648bea1017353c76d4972b484835351d2f4a4d9
|
| 3 |
+
size 431983
|
1lipschitzlayerscomparedmemoryspeedandcertifiablerobustness/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b4850cea6986035f04d900485dd4e3182ef58e50c494a950ca8fbcc4cb91a53
|
| 3 |
+
size 386152
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:808558429822606c9527fee0639ba4ec68048d4aba63306aba1b77f7aac5777d
|
| 3 |
+
size 78328
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8515bdfb58a7fce83824ed18098f66603b3b6768fd3119306e2bd0eb89071955
|
| 3 |
+
size 94387
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/42a5d7da-3ecd-41f5-9527-9d0c2dfbf201_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7227de63ce0cfea56439fffcbe428ec5228a39572541bc1bdc66a4cd8f433e97
|
| 3 |
+
size 7094836
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/full.md
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 2S-UDF: A Novel Two-stage UDF Learning Method for Robust Non-watertight Model Reconstruction from Multi-view Images
|
| 2 |
+
|
| 3 |
+
Junkai Deng1,2 Fei Hou1,2* Xuhui Chen1,2 Wencheng Wang1,2 Ying He3
|
| 4 |
+
|
| 5 |
+
$^{1}$ State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences
|
| 6 |
+
|
| 7 |
+
$^{2}$ University of Chinese Academy of Sciences
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Computer Science and Engineering, Nanyang Technological University
|
| 10 |
+
|
| 11 |
+
{dengjk,houfei,chenxh,whn}@ios.ac.cn yhe@ntu.edu.sg
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Recently, building on the foundation of neural radiance field, various techniques have emerged to learn unsigned distance fields (UDF) to reconstruct 3D non-watertight models from multi-view images. Yet, a central challenge in UDF-based volume rendering is formulating a proper way to convert unsigned distance values into volume density, ensuring that the resulting weight function remains unbiased and sensitive to occlusions. Falling short on these requirements often results in incorrect topology or large reconstruction errors in resulting models. This paper addresses this challenge by presenting a novel two-stage algorithm, 2S-UDF, for learning a high-quality UDF from multi-view images. Initially, the method applies an easily trainable density function that, while slightly biased and transparent, aids in coarse reconstruction. The subsequent stage then refines the geometry and appearance of the object to achieve a high-quality reconstruction by directly adjusting the weight function used in volume rendering to ensure that it is unbiased and occlusion-aware. Decoupling density and weight in two stages makes our training stable and robust, distinguishing our technique from existing UDF learning approaches. Evaluations on the DeepFashion3D, DTU, and BlendedMVS datasets validate the robustness and effectiveness of our proposed approach. In both quantitative metrics and visual quality, the results indicate our superior performance over other UDF learning techniques in reconstructing 3D non-watertight models from multi-view images. Our code is available at https://bitbucket.org/jkdeng/2sudf/.
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
As the success of neural radiance field (NeRF) [29], numerous volume rendering based 3D modeling methods are pro
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
GT
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
Figure 1. We learn a UDF from multiview images for nonwatertight model reconstruction. As illustrated in the cross sections of learned UDFs, our learned UDF approximates to the ground truth. In contrast, the learned UDF of NeuralUDF [25] is choppy leading to significant artifacts, e.g., unexpected pit. The learned UDF of NeUDF [23] is almost closed struggling to generate open surface.
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Ours
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
NeuralUDF
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
NeUDF
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+
posed to learn signed distance fields (SDF) for 3D model reconstruction from multi-view images [7, 34, 36, 40]. These approaches map signed distance value to a density function, thereby enabling the use of volume rendering to learn an implicit SDF representation. To calculate pixel colors, they compute the weighted sum of radiances along each light ray. Achieving an accurate surface depiction requires the density function to meet three essential criteria. Firstly, the weights, which are derived from the density function, must reach their maximum value when the distance is zero, ensuring unbiasedness. Secondly, as a ray traverses through the surface, the accumulated density should tend towards infinity, rendering the surface opaque — a property referred to as occlusion-awareness. Finally, the density function should be bounded to prevent numerical issues. The popular SDF approaches, such as NeuS [34] and VolSDF [40], adopt an S-shaped density function that meets all these requirements.
|
| 43 |
+
|
| 44 |
+
While SDF-based methods excel at reconstructing watertight models, they have limitations in representing open models. This is due to the intrinsic nature of SDF, which
|
| 45 |
+
|
| 46 |
+
differentiates between the interior and exterior of a model, thus failing to accommodate open boundaries. Recent advances have attempted to mitigate this constraint by employing unsigned distance fields (UDF) [23, 25, 27]. Unlike signed distance fields, UDFs have non-negative distance values, making them suitable for representing nonwatertight models. However, learning a UDF from multiview images is a challenging task since the gradients of the UDF are unstable due to directional changes near the zero level-set, making it difficult to train the neural network. Another major challenge lies in formulating a UDF-induced density function that can simultaneously meet the above-mentioned three requirements. Unlike SDFs, UDFs cannot distinguish between the front and back of a surface based on distance values, thus, directly using an S-shaped density function is off the table. Opting for a bell-shaped density function brings its own issues. It is impossible for these integrations to approach infinity, so as to be occlusion-aware, unless the density becomes boundless at zero distance values. These conflicting requirements make UDF learning a non-trivial task, forcing existing methods to sacrifice at least one of these conditions. As shown in Figure 1, the existing methods NeuralUDF [25] and NeUDF [23] result in either choppy or nearly closed UDFs.
|
| 47 |
+
|
| 48 |
+
As designing a UDF-induced density function that simultaneously fulfills the three aforementioned conditions remains an unresolved challenge, we propose a novel approach that learns a UDF from multi-view images in two separate stages. In the first stage, we apply an easily trainable but slightly biased and transparent density function for coarse reconstruction. Such a UDF, although being approximate, provides an important clue so that we can determine where to truncate the light rays. This accounts for the occlusion effect, where points behind the surface are not visible and should not contribute to the output color. With truncated light rays, we are able to derive the weights from UDF directly bypassing the density function, to further refine the geometry and appearance in the second stage. Our two-stage learning method, called 2S-UDF, leads to an unbiased and occlusion-aware weight function. Furthermore, by sidestepping density function learning in Stage 2, we effectively bypass the challenges associated with ensuring its boundedness. This strategy enhances the numerical stability of our method. Evaluations on benchmark datasets DeepFashion3D [43] and DTU [19] show that 2S-UDF outperforms existing UDF learning methods in terms of both reconstruction accuracy and visual quality. Additionally, we observe that the training stability of 2S-UDF is notably superior compared to other UDF learning neural networks.
|
| 49 |
+
|
| 50 |
+
# 2. Related Work
|
| 51 |
+
|
| 52 |
+
3D Reconstruction from Multi-View Images. Surface reconstruction from multi-view images has been a subject of
|
| 53 |
+
|
| 54 |
+
study for several decades, and can generally be classified into two categories: voxel-based and point-based methods. Voxel-based methods [3, 8, 20, 21, 33] divide the 3D space into voxels and determine which ones belong to the object. These methods can be computationally expensive and may not be suitable for reconstructing complex surfaces. Point-based methods [13, 31, 38] use structure-from-motion [16] to calibrate the images and generate a dense point cloud using multi-view stereo [12]. Finally, surface reconstruction methods (e.g., [2, 17, 22]) are used to generate a mesh. Since multi-view stereo requires dense correspondences to generate a dense point cloud, which are often difficult to compute, its results often contain various types of artifacts, such as noise, holes, and incomplete structures.
|
| 55 |
+
|
| 56 |
+
Neural Volume Rendering. Neural network-based 3D surface reconstruction has received attention in recent years with the emergence of neural rendering [29]. Several methods have been proposed for volume rendering and surface reconstruction using neural networks. VolSDF [40] uses the cumulative distribution function of Laplacian distribution to evaluate the density function from SDF for volume rendering and surface reconstruction. NeuS [34] adopts an unbiased density function to the first-order approximation of SDFs for more accurate reconstruction. SparseNeuS [24] extends NeuS to use fewer images for reconstruction. HFNeuS [36] improves NeuS by proposing a simplified and unbiased density function and using hierarchical multilayer perceptrons (MLPs) for detail reconstruction. GeoNeuS [10] incorporates structure-from-motion to add more constraints. NeuralWarp [7] improves the accuracy by optimizing consistency between warped views of different images. PET-NeuS [37] further improves the accuracy by introducing tri-planes into the SDF prediction module, incorporating with MLP. All these methods learn SDFs, which can only reconstruct watertight models. Recently, Long et al. proposed NeuralUDF [25] for learning UDF for reconstructing open models. It adapts the S-shaped density function for learning SDF to UDFs by introducing an indicator function. However, the indicator function is complicated to learn, and also introduces biases. Liu et al. proposed NeUDF [23] adopting a bell-shaped density. However, to make it occlusion-aware, the density has to be unbounded resulting in an improper integral, which reduces accuracy. Meng et al. proposed NeAT [27] to learn SDF with validity so as to reconstruct open models from SDF. However, it needs foreground masks for data.
|
| 57 |
+
|
| 58 |
+
3D Reconstruction from Point Clouds. There has been recent interest in surface representation using signed distance fields (SDFs) and occupation fields. Several methods have been proposed for learning SDFs [4, 26, 30, 32, 35], while occupation fields have been used in methods such as [5, 28]. However, both SDFs and occupation fields can only represent watertight models. To represent non-watertight
|
| 59 |
+
|
| 60 |
+
models, some methods are proposed to learn UDF from 3D point clouds [6, 41, 42]. Our proposed method also uses UDF for non-watertight models representation, but we learn it directly from multi-view images, which is a challenging problem.
|
| 61 |
+
|
| 62 |
+
# 3. Method
|
| 63 |
+
|
| 64 |
+
At the foundation of UDF-based learning approaches is the task of crafting a density function that converts unsigned distance values into volume density, ensuring that the resulting weight function is unbiased and responsive to occlusions. None of the existing UDF learning methods [23, 25] can simultaneously meet the three critical requirements, i.e., ensuring the density function is bounded, and that the weight function remains both unbiased and occlusion aware.
|
| 65 |
+
|
| 66 |
+
We tackle these challenges by decoupling the density function and weight function across two stages. In the initial stage (Section 3.1), we utilize an easy-to-train, bell-shaped density function (which is inherently bounded) to learn a coarse UDF. While the resulting weight function is not theoretically unbiased or occlusion-aware, we can make it practically usable by choosing a proper parameter. Moving into the second stage (Section 3.2), we sidestep the density function entirely, focusing instead on refining the UDF by directly adjusting the weight function within the neural volume rendering framework. Specifically, we truncate light rays after they hit the front side of the object and obtain a weight function that is both unbiased and sensitive to occlusions, without the overhang of density function boundedness concerns. Finally, Section 3.3 presents the training details.
|
| 67 |
+
|
| 68 |
+
# 3.1. Stage 1: Coarse UDF Learning via a Simple Density Function
|
| 69 |
+
|
| 70 |
+
We consider the scenario of a single planar plane $\mathcal{M}$ and a single ray-plane intersection. Inspired by HF-NeuS [36], we propose an easy-to-learn density function $\sigma_{1}$ that maps unsigned distance $f$ to density
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\sigma_ {1} (f (t)) = \frac {c s e ^ {- s f (t)}}{1 + e ^ {- s f (t)}}, s > 0, c > 0, \tag {1}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
where $c > 0$ is a fixed, user-specified parameter and $s > 0$ is a learnable parameter controlling the width of the bell-shaped curve. Straightforward calculation shows that the weight function $w_{1}(f(t)) = e^{-\int_{0}^{t}\sigma_{1}(f(u))\mathrm{d}u}\sigma_{1}(f(t))$ is monotonically decreasing behind the plane $\mathcal{M}$ and the maximum value occurs at a point $t^*$ in front of $\mathcal{M}$ with an unsigned distance value of $f(t^{*}) = \frac{1}{s}\ln \frac{c}{|\cos(\theta)|}, (c > |\cos (\theta)|)$ or $f(t^{*}) = 0, (0 < c \leq |\cos (\theta)|)$ , where $\theta$ is the incident angle between the light ray and the surface normal. This means that the weight function $w_{1}$ is not unbiased. Furthermore, the line integral $\int_0^t\sigma_1(f(u))\mathrm{d}u$ does
|
| 77 |
+
|
| 78 |
+
not approach infinity when a light ray passes through the front-most layer of the surface, indicating $w_{1}$ is only partially occlusion-aware.
|
| 79 |
+
|
| 80 |
+
While the density function $\sigma_{1}$ is not perfect in theory, by selecting an appropriate $c$ , we can practically minimize bias and enhance opacity. Clearly, a smaller $c$ value decreases $f(t^{*})$ , thereby reducing bias. To gauge the effect of $c$ on opacity, we now consider the most extreme scenario where the incident light ray is perpendicular to the planar surface $\mathcal{M}$ , and assume that the intersection point is located at $t = 1$ . In such a situation, the unsigned distance function is $f(t) = 1 - t$ for points in front of $\mathcal{M}$ . Since $\sigma_{1}$ is symmetrical on either side of $\mathcal{M}$ , the surface transparency is the square of the transparency of the front side. The theoretic transparency is,
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\begin{array}{l} \left(e ^ {- \int_ {0} ^ {1} \hat {\sigma} _ {1} (f (t)) \mathrm {d} t}\right) ^ {2} = \left[ \exp \left(- \int_ {0} ^ {1} \frac {c s e ^ {- s (1 - t)}}{1 + e ^ {- s (1 - t)}} \mathrm {d} t\right) \right] ^ {2} \\ = \left(\frac {1 + e ^ {- s}}{2}\right) ^ {2 c}. \\ \end{array}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
Therefore, we should choose a relatively large $c$ to reduce transparency. In our implementation, we set the constant $c = 5$ based on the typical value of the learned parameter $s$ which usually ranges between 1000 and 2000. Calculations of bias and translucency show that this setting offers a good balance between occlusion-awareness and unbiasedness in the first stage training. Please refer to the supplementary material for a detailed analysis.
|
| 87 |
+
|
| 88 |
+
# 3.2. Stage 2: UDF Refinement through Weight Adjustment
|
| 89 |
+
|
| 90 |
+
In this stage, we refine the UDF learned in Stage 1 to improve the quality of geometry and appearance. Unlike Stage 1 and all other UDF-learning methods, inspired by [1], we truncate light rays based on the approximated UDF learned in Stage 1 and learn the weight function $w(t)$ directly instead of the density function $\sigma(t)$ to refine the UDF.
|
| 91 |
+
|
| 92 |
+
Ideally, for a single ray-plane intersection, we want a bell-shaped function $w(t)$ that attains its maximum at the points with zero distance values, and satisfies partition of unity. Therefore, we adopt the derivative of the sigmoid function as the weight function [1], defined as
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
w _ {2} (f (t)) = \frac {s e ^ {- s f (t)}}{(1 + e ^ {- s f (t)}) ^ {2}} \cdot | \cos (\theta) |. \tag {2}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
with $\theta$ being the incident angle between the light ray and the surface normal.
|
| 99 |
+
|
| 100 |
+
Intuitively speaking, learning such a weight function $w_{2}$ in Stage 2 of our UDF method is similar to learning an S-shaped density function in SDF-based approaches, such as [36]. As a result, the learning process in Stage 2 is as
|
| 101 |
+
|
| 102 |
+
stable as those SDF approaches. Furthermore, it can totally avoid using the visibility indicator function, which is necessary in NeuralUDF [25].
|
| 103 |
+
|
| 104 |
+
Calculation shows that the weight $w_{2}$ attains its maximum at zero distance values, therefore it is unbiased. However, if we naively predict the weight function directly, it will not be occlusion-aware, so we introduce the ray truncation. To make $w_{2}$ occlusion-aware, we can truncate the light rays after they pass through the frontmost layer of the surface, thereby preventing rendering the interior of the object. Note that we do not expect the truncation to be exactly on the frontmost layer of the surface. In fact, as long as it occurs between the frontmost layer and the second layer, we consider the truncation valid. This means that the approximate UDF learned in the first stage, which can capture the main topological features (such as boundaries) and provide a fairly good representation of the target object, is sufficient for us to determine where to cut off the light rays.
|
| 105 |
+
|
| 106 |
+
In our implementation, we adopt a simple strategy to determine the truncation point for each light ray. Specifically, the truncation point of ray $\mathbf{r}$ is the first sample point along $\mathbf{r}$ such that
|
| 107 |
+
|
| 108 |
+
- The unsigned distance value at the point is a local maxima. To avoid distance vibration interference, it should be the maximum in a window centered at the point. And
|
| 109 |
+
- The accumulated weight up to this point is greater than $\delta_{\text{thres}}$ .
|
| 110 |
+
|
| 111 |
+
The accumulated weight threshold $\delta_{\text{thres}}$ is intuitively set to 0.5. This choice is based on the assumption that if the Stage 1 training is performed well enough, the accumulated weights at each sample point along the ray would be either 0 (for not reaching a surface) or 1 (for having intersected with a surface). Hence, we intuitively select 0.5 for $\delta_{\text{thres}}$ because it is the midpoint between 0 and 1. With the cutoff mechanism, only the first ray-surface intersection contributes to the color of the ray, effectively achieving occlusion-awareness. Given these properties, we conclude that,
|
| 112 |
+
|
| 113 |
+
Theorem 1 The weight $w_{2}$ with light cutting off is unbiased and occlusion-aware.
|
| 114 |
+
|
| 115 |
+
Figure 2 is an intuitive illustration of our Stage 2 weight learning and truncation strategy. The UDF maxima point $A$ in front of the intersection surface would not affect the cutting point selection as the accumulated weight is below $\delta_{\text{thres}}$ (0.5). The local maxima $B$ due to UDF oscillation also would not affect it since it's not the maximum in a large enough neighborhood. The light is cut at maxima point $C$ , and thus the weight of point $D$ is zero without contributions to the rendering. As illustrated in Figure 2, the cutting process is robust against UDF oscillation, open boundaries, and local maxima in front of the intersection surface.
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
Figure 2. An intuitive illustration of our ray cutting algorithm, best viewed in color and magnified. A ray shoots from left to right, approaching the boundary of the first surface, and going through another two surfaces (gray boxes). The violet solid line represents the UDF values along the ray; the orange dashed line represents the corresponding color weight.
|
| 119 |
+
|
| 120 |
+
# 3.3. Training
|
| 121 |
+
|
| 122 |
+
Differentiable UDFs. NeuS uses an MLP network to learn the signed distance function $f$ , which is a differentiable function. In contrast, UDF is not differentiable at the zero level set, making the network difficult to learn the values and gradients of the UDF close to the zero level set.
|
| 123 |
+
|
| 124 |
+
Another crucial requirement is to ensure non-negative values for the computed distances, which seems like a trivial task as one may simply apply absolute value or normalization such as ReLU [11] to the MLP output. However, applying the absolute value to the distance is not viable due to its non-differentiability at zero. Similarly, normalizing the output value using ReLU is not feasible as it is also non-differentiable at zero and its gradient vanishes for negative inputs. This can be particularly problematic for learning UDFs, since when the MLP returns a negative distance value, the ReLU gradient vanishes, hindering the update of the distance to a positive value in the subsequent iterations.
|
| 125 |
+
|
| 126 |
+
We add a softplus [9] function after the output layer of the MLP [23]. The softplus function is a smooth and differentiable approximation of the ReLU function, which is defined as $\mathrm{softplus}(x) = \frac{1}{\beta}\ln (1 + e^{\beta x})$ . Softplus has the same shape as ReLU, but it is continuous and differentiable at every point and its gradients do not vanish anywhere. Using the softplus function allows us to ensure that the output of the MLP is non-negative and differentiable, making it suitable for learning the UDF. Similar to NeUDF [23], we set $\beta = 100$ in our experiments.
|
| 127 |
+
|
| 128 |
+
Loss functions. Following NeuralUDF [25], we adopt an iso-surface regularizer to penalize the UDF values of the non-surface points from being zero, therefore encouraging smooth and clean UDFs. The regularization loss is defined as [25]
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\mathcal {L} _ {r e g} = \frac {1}{M N} \sum_ {i, k} \exp (- \tau \cdot f (t _ {i, k})),
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where $\tau$ is a constant scalar that scales the learned UDF
|
| 135 |
+
|
| 136 |
+
values, $M$ is the total number of sampled rays per training iteration, and $N$ is the number of sampled points on a single ray. $\tau$ is set to 5.0 in the first stage and 50.0 in the second stage.
|
| 137 |
+
|
| 138 |
+
The value of $s$ , which is learnable in our method, significantly affects the quality of the reconstruction. When $s$ is small, it introduces a larger bias and leads to a more blurred output. We observe that $s$ typically converges to a relatively large value between 1000 and 2000, leading to visually pleasing results. However, in rare cases when $s$ stops increasing during training, we apply a penalty to force it to increase. The penalty is defined as follows
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
\mathcal {L} _ {s} = \frac {1}{M} \sum_ {i, k} \frac {1}{s _ {i , k}},
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $M$ is the number of rays during a training epoch. This term $\mathcal{L}_s$ aggregates the reciprocals of all $s$ values used for the point $t_{i,k}$ on ray $r_i$ . Intuitively speaking, it encourages a larger $s$ during the early stage of training. In our implementation, we make this term optional since $s$ generally increases with a decreasing rate during training, and the penalty term is only necessary in rare cases when $s$ stops at a relatively low value.
|
| 145 |
+
|
| 146 |
+
As in other SDF- and UDF-based methods [25, 34, 36], we adopt color loss and Eikonal loss in our approach. Specifically, the color loss $\mathcal{L}_{color}$ is the $L_{1}$ loss between the predicted color and the ground truth color of a single pixel as used in [34]. The Eikonal loss $\mathcal{L}_{eik}$ is used to regularize the learned distance field to have a unit gradient [14]. Users may also choose to adopt object masks for supervision as introduced in other SDF- and UDF-based methods [25, 34]. Putting it all together, we define the combined loss function as a weighted sum,
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\mathcal {L} = \mathcal {L} _ {\text {c o l o r}} + \lambda_ {1} \mathcal {L} _ {\text {e i k}} + \lambda_ {2} \mathcal {L} _ {\text {r e g}} + \lambda_ {3} \mathcal {L} _ {s} (+ \lambda_ {m} \mathcal {L} _ {\text {m a s k}}),
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
where $\lambda_1, \lambda_2, \lambda_3$ and the optional $\lambda_m$ are hyperparameters that control the weight of each loss term.
|
| 153 |
+
|
| 154 |
+
# 4. Experiments
|
| 155 |
+
|
| 156 |
+
Datasets. To evaluate our method, we use three datasets: DeepFashion3D [43], DTU [19] and BlendedMVS [39]. The DeepFashion3D dataset consists of clothing models, which are open models with boundaries. As only 3D points are available, we render 72 images of resolution $1024 \times 1024$ with a white background from different viewpoints for each model. In addition to DeepFashion3D images rendered by us most of which are texture-less, we also take the image data from NeuralUDF [25] most of which are texture-rich into our experiments. We call them DF3D#Ours and DF3D#NeuralUDF, respectively. The DTU dataset consists of models captured in a studio, all of which are watertight. We use this dataset to validate that our method also works
|
| 157 |
+
|
| 158 |
+
well for watertight models. These datasets have been widely used in previous works such as [34, 36, 40]. In our experiments, open models such as in DeepFashion3D are trained without mask supervision; DTU is trained with mask supervision.
|
| 159 |
+
|
| 160 |
+
Baselines. To validate the effectiveness of our method, we compare it with state-of-the-art UDF learning methods: NeuralUDF [25], NeUDF [23] and NeAT [27]; and SDF learning methods: VolSDF [40] and NeuS [34].
|
| 161 |
+
|
| 162 |
+
# 4.1. Comparisons on Open Models
|
| 163 |
+
|
| 164 |
+
<table><tr><td>Method</td><td>#1</td><td>#2</td><td>#3</td><td>#4</td><td>#5</td><td>#6</td><td>#7</td><td>#8</td><td>#9</td><td>Mean</td></tr><tr><td>NeuS</td><td>6.69</td><td>13.50</td><td>10.32</td><td>15.01</td><td>8.99</td><td>12.92</td><td>12.94</td><td>9.93</td><td>9.49</td><td>11.09</td></tr><tr><td>VolSDF</td><td>6.36</td><td>9.44</td><td>11.87</td><td>16.03</td><td>10.78</td><td>14.91</td><td>15.06</td><td>11.34</td><td>8.96</td><td>11.64</td></tr><tr><td>NeAT</td><td>10.54</td><td>13.89</td><td>7.30</td><td>13.12</td><td>13.18</td><td>12.44</td><td>8.22</td><td>10.30</td><td>11.33</td><td>11.15</td></tr><tr><td>NeuralUDF</td><td>6.07</td><td>11.58</td><td>7.68</td><td>10.96</td><td>11.16</td><td>9.76</td><td>6.98</td><td>6.13</td><td>6.41</td><td>8.53</td></tr><tr><td>NeUDF</td><td>4.39</td><td>8.29</td><td>4.94</td><td>19.56</td><td>7.52</td><td>8.18</td><td>3.81</td><td>3.81</td><td>5.76</td><td>7.36</td></tr><tr><td>Ours</td><td>4.55</td><td>5.77</td><td>4.27</td><td>7.43</td><td>6.59</td><td>4.77</td><td>2.88</td><td>3.21</td><td>5.73</td><td>5.02</td></tr><tr><td>Method</td><td>LS-C0</td><td>SS-D0</td><td>LS-D0</td><td>NS-D1</td><td>LS-C1</td><td>Skirt1</td><td>SS-C0</td><td colspan="3">Mean</td></tr><tr><td>NeuS</td><td>3.18</td><td>4.82</td><td>5.71</td><td>2.21</td><td>3.60</td><td>2.44</td><td>5.13</td><td colspan="3">3.87</td></tr><tr><td>VolSDF</td><td>5.92</td><td>4.79</td><td>5.96</td><td>4.36</td><td>8.73</td><td>7.74</td><td>8.84</td><td colspan="3">6.62</td></tr><tr><td>NeAT</td><td>3.06</td><td>4.33</td><td>5.92</td><td>3.52</td><td>8.84</td><td>3.91</td><td>4.30</td><td colspan="3">4.84</td></tr><tr><td>NeuralUDF</td><td>1.92</td><td>2.05</td><td>4.11</td><td>1.50</td><td>2.47</td><td>2.16</td><td>2.15</td><td colspan="3">2.34</td></tr><tr><td>NeUDF</td><td>1.95</td><td>2.93</td><td>N.A.</td><td>1.48</td><td>2.66</td><td>2.74</td><td>1.77</td><td colspan="3">2.26</td></tr><tr><td>Ours</td><td>1.92</td><td>1.97</td><td>2.46</td><td>1.47</td><td>2.14</td><td>1.84</td><td>1.91</td><td colspan="3">1.96</td></tr></table>
|
| 165 |
+
|
| 166 |
+
Table 1. Chamfer distances $(\times 10^{-3})$ on DF3D#Ours (top) and DF3D#NeuralUDF (bottom). NeAT requires mask supervision and others do not need.
|
| 167 |
+
|
| 168 |
+
We evaluate our method and compare it with baselines using the garments from DeepFashion3D [43], where the models have multiple open boundaries. VolSDF and NeuS always close the boundaries since they learn SDFs.
|
| 169 |
+
|
| 170 |
+
NeuralUDF, NeUDF and NeAT are designed to learn non-watertight models. NeAT learns SDFs for open models, and requires mask supervision to produce reasonable results, but other methods do not require mask supervision for DeepFashion3D. The released codebase of NeuralUDF indicates that it also has a two-stage training process. We evaluate the results of NeuralUDF at the end of both stages, and present whichever is better.
|
| 171 |
+
|
| 172 |
+
In contrast, NeuralUDF, NeUDF and our method learn UDFs, which can generate open models. Table 1 shows the Chamfer distances of the results on DeepFashion3D. Some of the Chamfer distances of the compared methods are large because the open holes are closed or the model is over-smoothed, resulting in significant errors.
|
| 173 |
+
|
| 174 |
+
As demonstrated in Figure 3, we test various types of garments, some of which have rich textures, while others are nearly a single color. Learning UDFs for textureless models is more challenging since various regions of a model are ambiguous without clear color differences. However, our 2S-UDF generates satisfactory results even without masks. Though with mask supervision, the results of
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
Figure 3. Visual comparisons on selected models of the DeepFashion3D [43] dataset. The surfaces produced by NeuS and VolSDF are closed watertight models, thereby post-processing is required to remove the unnecessary parts. NeAT can produce open models by learning an SDF and predicting which surfaces in the extracted meshes should be removed, but it needs mask for supervision. NeuralUDF can generate open surfaces, but struggles with textureless inputs, leading to double-layered regions and large reconstruction errors. NeUDF generally performs well, but its training is unstable and may stumble on less distinguished, darker models like LS-D0. In contrast, our 2S-UDF consistently delivers effective reconstructions of non-watertight models. See the supplementary material for additional results.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
Figure 4. Visualization of the learned UDFs on cross sections. Compared with the ground truth, our method can learn a UDFs that most closely resemble the ground truth, among our method, NeuralUDF, and NeUDF. NeAT is omitted in this visualization, because it learns SDFs in lieu of UDFs. Note that for LS-D0, NeUDF completely collapses without a reasonable UDF learned.
|
| 181 |
+
|
| 182 |
+
NeAT [27] are over-smoothed, missing details, resulting in large Chamfer distance errors. NeuralUDF [25] is unable to properly reconstruct textureless models on most models, possibly due to their complex density function which is difficult to converge. Some of the NeUDF [23] models
|
| 183 |
+
|
| 184 |
+
become watertight. To analyze the reasons, we illustrate these UDFs cross sections in Figure 4. To compute the ground truth UDFs, we sample 30,000 points from every input point model and compute the distances to the nearest sample point for every point in a 3D grid of resolution $512 \times 512 \times 512$ . All other UDFs are extracted by querying the distance neural network in a 3D grid of the same resolution. Our learned UDFs resemble the ground truth with little difference. While, the UDFs of NeuralUDF deviate from the ground truth significantly explaining its difficulty to converge. The UDFs of NeUDF are better, but the distances approach zero around open holes. As a result, it is challenging and tricky to generate non-watertight models and some of them are even closed. NeAT learns SDF, so we do not show their distance fields.
|
| 185 |
+
|
| 186 |
+
As illustrated in Figure 5, perhaps due to the absolute of an MLP for UDF representation, NeuralUDF possibly generates two layers of zero level-sets on both sides of the surface resulting in double-layered regions after Stage 1 learning. However, in its Stage 2 refinement, the surface is crushed into pieces and the Chamfer distance errors surge suddenly.
|
| 187 |
+
|
| 188 |
+
In Figure 6, we conduct additional experiments on some open model dataset provided by NeUDF [23]. For the rack model, the thin structures reconstructed by NeuralUDF [25] and NeUDF [23] seem eroded, but ours don't. The thin structures reconstructed by NeAT [27] is the closest to the reference image, but the surface is dented inward with visible artifacts due to imperfect SDF validity learning.
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
Figure 5. Plots of the Chamfer distance throughout the training process. Our method consistently reduces CD across both stages. In contrast, NeuralUDF, which also adopts a two-stage learning strategy, exhibits instability and yields a fragmented output following the second stage. The first-stage output of NeuralUDF, however, contains double-layered regions as marked above. In this figure, both methods start their stage 2 training at 250k iterations.
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Stage 1 NeuralUDF
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Stage 1 2S-UDF
|
| 198 |
+
|
| 199 |
+
<table><tr><td>Method</td><td>37</td><td>55</td><td>65</td><td>69</td><td>97</td><td>105</td><td>106</td><td>114</td><td>118</td><td>122</td><td>Mean</td></tr><tr><td>NeuralUDF</td><td>1.18</td><td>0.44</td><td>0.66</td><td>0.67</td><td>0.94</td><td>0.95</td><td>0.57</td><td>0.37</td><td>0.56</td><td>0.55</td><td>0.69</td></tr><tr><td>NeAT</td><td>1.18</td><td>0.47</td><td>0.82</td><td>0.84</td><td>1.09</td><td>0.75</td><td>0.76</td><td>0.38</td><td>0.56</td><td>0.55</td><td>0.74</td></tr><tr><td>NeUDF</td><td>0.90</td><td>0.65</td><td>0.73</td><td>0.97</td><td>1.07</td><td>0.63</td><td>0.94</td><td>0.59</td><td>0.72</td><td>0.62</td><td>0.78</td></tr><tr><td>Ours</td><td>0.89</td><td>0.55</td><td>0.68</td><td>0.88</td><td>1.15</td><td>0.70</td><td>0.74</td><td>0.41</td><td>0.61</td><td>0.51</td><td>0.71</td></tr></table>
|
| 200 |
+
|
| 201 |
+
Table 2. Chamfer distances on DTU dataset.
|
| 202 |
+
|
| 203 |
+
The plant model does not have an object mask, making NeAT [27] impractical for training. NeuralUDF [25] completely fails to reconstruct a reasonable surface. Between our method and NeUDF [23] which can reconstruct a sensible model, the flower pot region marked in red is missing in NeUDF but not in ours. These show our method's ability to reconstruct non-watertight models more robustly compared to other methods.
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
Figure 6. Qualitative comparisons with NeAT [27], NeuralUDF [25] and NeUDF [23] on some example data released by NeUDF [23]. Note that NeAT cannot reconstruct "plant" dataset because the ground truth mask for "plant" is unavailable.
|
| 207 |
+
|
| 208 |
+
# 4.2. Comparisons on Watertight Models
|
| 209 |
+
|
| 210 |
+
Other methods can also be used as the first stage of our 2S- UDF. We use NeUDF for the first stage training on the DTU dataset [19]. As detailed in Table 2, we compare the Chamfer distances of the reconstruction results with NeuralUDF,
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
Figure 7. Qualitative comparisons with NeAT, NeuralUDF and NeUDF on the DTU [19] dataset and close-up comparisons against NeUDF. Our method can reconstruct surfaces closer to the ground truth point clouds in various places such as the marked region, generally improving the reconstruction accuracy of NeUDF by around $10\%$ , on a par with NeuralUDF and NeAT at the bottom two rows.
|
| 222 |
+
|
| 223 |
+
NeAT and NeUDF without our second-stage training. SDFs generally excel at learning watertight models, and it is worth pointing out that NeuralUDF takes the absolute value of the output of MLP as the UDF value of a given point. Therefore for closed models, they can easily learn an SDF and take its absolute value to produce a UDF. NeAT, on the other hand, explicitly learns an SDF. NeUDF and our method truly learn UDFs. While UDF learning is much more complicated than SDF learning because the UDF gradient nearby 0 is blurry and the gradient is not available at 0, our method still improves the reconstruction quality of NeUDF by around $10\%$ as shown in Figure 7. We further provide a close-up view of specific parts of the models for detailed comparisons in Figure 7. These local callouts exhibit the ground truth points located on both sides of our surfaces, whereas most of the points are only on one side of the surfaces of NeUDF. These illustrate our reconstructed surfaces are closer to the ground truth points and thus improving the resulting quality over NeUDF, on a par with NeuralUDF and NeAT.
|
| 224 |
+
|
| 225 |
+
# 4.3. Ablation Studies
|
| 226 |
+
|
| 227 |
+
In this section, we present main ablation studies. We refer interested readers to the supplementary material for additional ablation studies.
|
| 228 |
+
|
| 229 |
+
Effect of the two-stage training. We conduct an ablation
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Method</td><td>#1</td><td>#7</td><td>#8</td><td>LS-D0</td></tr><tr><td>S1 & S2</td><td>4.55</td><td>2.88</td><td>3.21</td><td>2.46</td></tr><tr><td>S1</td><td>7.22</td><td>2.46</td><td>3.38</td><td>6.04</td></tr><tr><td>S2</td><td>5.75</td><td>4.00</td><td>5.96</td><td>3.65</td></tr><tr><td>Method</td><td>NS-D1</td><td>LS-C1</td><td>DTU 114</td><td>DTU 122</td></tr><tr><td>S1 & S2</td><td>1.47</td><td>2.14</td><td>0.41</td><td>0.51</td></tr><tr><td>S1</td><td>1.46</td><td>6.23</td><td>0.59</td><td>0.62</td></tr><tr><td>S2</td><td>1.64</td><td>2.98</td><td>0.63</td><td>0.60</td></tr></table>
|
| 232 |
+
|
| 233 |
+
Table 3. Chamfer distances of models learned by both Stage 1 and 2 (S1 & S2), only Stage 1 (S1) and only Stage 2 (S2) on selected datasets. Models learned by two stages yield similar Chamfer distances, but when trained with only Stage 1 or Stage 2, the Chamfer distances generally become significantly higher.
|
| 234 |
+
|
| 235 |
+
study on the effect of the two-stage learning. We compare the Chamfer distances among both two stages, only Stage 1 and only Stage 2 training, shown in Table 3. Our results show that two-stage training improves the Chamfer distance (lower is better) compared to training with only Stage 1 or 2, under most circumstances.
|
| 236 |
+
|
| 237 |
+
It should be noted that training by the second stage from scratch is also capable of generating a generally reasonable result. However, the Chamfer distances, as shown in Table 3, indicate that its learning ability is limited. Therefore, the second refinement learning stage should cooperate with the first coarse learning stage to generate the best results.
|
| 238 |
+
|
| 239 |
+
Choice of accumulated weight threshold $\delta_{thres}$ . In Stage 2, being a ray truncate point requires the accumulated weight up until that point to be greater than $\delta_{thres}$ , where we intuitively select $\delta_{thres} = 0.5$ . Figure 8 shows the reconstruction results for other choices of $\delta_{thres}$ , namely 0.3 and 0.7, respectively. We observe that all threshold choices successfully reconstruct the model. Setting the threshold $\delta_{thres}$ up to 0.7 produces visually similar results. Setting the threshold $\delta_{thres}$ down to 0.3 also works fine generally despite that it may introduce more holes to the reconstructed meshes. We deduce that setting a lower threshold increases the possibility that a ray may be truncated prematurely, leading to less desirable results. Nevertheless, we still have a considerable range of $\delta_{thres}$ from 0.3 to 0.7 without major result regression, indicating that our Stage 2 training exhibits robustness against $\delta_{thres}$ .
|
| 240 |
+
|
| 241 |
+
# 4.4. Limitations
|
| 242 |
+
|
| 243 |
+
Since the light is cut off after going through a layer of surface, our method relinquishes the ability to model planes with transparency. Occasionally, due to learning uncertainty, the Chamfer distance may increase slightly in the second stage, but the difference is quite small without visual impact. Overall, the two-stage learning improves the quality significantly. For watertight models, SDF learning is more suitable than UDF learning, since UDF learning is
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 8. Qualitative comparisons on different choices of accumulated weight $\delta_{\text{thres}}$ . Setting a higher threshold works well few little visual differences; Setting a lower threshold generally works fine, but may introduce more holes in reconstructed meshes.
|
| 247 |
+
|
| 248 |
+
more complicated than SDF learning. We still advise using SDF learning, e.g., NeuS [34], HF-NeuS [36] or PET-NeuS [37], for watertight model reconstruction. Also, the mesh extraction of MeshUDF [15] tends to generate holes and "staircase" artifacts affecting the mesh reconstruction quality. Adopting a more robust extraction method, e.g., DoubleCoverUDF [18], could alleviate the problem, but we use MeshUDF here for all methods for a fair comparison.
|
| 249 |
+
|
| 250 |
+
# 5. Conclusions
|
| 251 |
+
|
| 252 |
+
Overall, 2S-UDF offers a promising approach to the problem of reconstructing both open and watertight models from multi-view images. Its advantages over existing methods lie in the use of a simple and more accurate density function, and a smooth differentiable UDF representation, so that the learned UDF approximates the ground truth as much as possible. A two-stage learning strategy further eliminates bias and improves UDF accuracy. Results from our experiments on the DeepFashion3D, DTU and BlendedMVS datasets demonstrate the effectiveness of our method, particularly in learning smooth and stably open UDFs revealing the robustness of 2S-UDF. Moreover, our method does not rely on object masks for open model reconstruction, making it more practical in real-world applications.
|
| 253 |
+
|
| 254 |
+
# Acknowledgments
|
| 255 |
+
|
| 256 |
+
This project was supported in part by the National Natural Science Foundation of China under Grants (61872347, 62072446), in part by the National Key R&D Program of China under Grant 2023YFB3002901, in part by the Basic Research Project of ISCAS under Grant ISCAS-JCMS-202303 and in part by the Ministry of Education, Singapore, under its Academic Research Fund Grants (MOET2EP20220-0005, RG20/20 & RT19/22).
|
| 257 |
+
|
| 258 |
+
# References
|
| 259 |
+
|
| 260 |
+
[1] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural RGB-D Surface Reconstruction. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6280-6291, 2022. 3
|
| 261 |
+
[2] Fausto Bernardini, Joshua Mittleman, Holly Rushmeier, Cláudio Silva, and Gabriel Taubin. The ball-pivoting algorithm for surface reconstruction. IEEE Trans. Vis. Comput. Graph., 5(4):349-359, 1999. 2
|
| 262 |
+
[3] A. Broadhurst, T.W. Drummond, and R. Cipolla. A probabilistic framework for space carving. In Int. Conf. Comput. Vis., pages 388-393 vol.1, 2001. 2
|
| 263 |
+
[4] Rohan Chabra, Jan E. Lenssen, Eddy Ilg, Tanner Schmidt, Julian Straub, Steven Lovegrove, and Richard Newcombe. Deep Local Shapes: Learning Local SDF Priors for Detailed 3D Reconstruction. In Eur. Conf. Comput. Vis., pages 608-625, Cham, 2020. Springer International Publishing. 2
|
| 264 |
+
[5] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit Functions in Feature Space for 3D Shape Reconstruction and Completion. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6968-6979, 2020. 2
|
| 265 |
+
[6] Julian Chibane, Mohamad Aymen mir, and Gerard Pons-Moll. Neural Unsigned Distance Fields for Implicit Function Learning. In Adv. Neural Inform. Process. Syst., pages 21638-21652. Curran Associates, Inc., 2020. 3
|
| 266 |
+
[7] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In IEEE Conf. Comput. Vis. Pattern Recog., pages 6250-6259, 2022. 1, 2
|
| 267 |
+
[8] J. De Bonet and P. Viola. Roxels: responsibility weighted 3D volume reconstruction. In Int. Conf. Comput. Vis., pages 418-425 vol.1, 1999. 2
|
| 268 |
+
[9] Charles Dugas, Yoshua Bengio, François Bélisle, Claude Nadeau, and René Garcia. Incorporating Second-Order Functional Knowledge for Better Option Pricing. In Adv. Neural Inform. Process. Syst. MIT Press, 2000. 4
|
| 269 |
+
[10] Qiancheng Fu, Qingshan Xu, Yew Soon Ong, and Wenbing Tao. Geo-Neus: Geometry-Consistent Neural Implicit Surfaces Learning for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 3403–3416. Curran Associates, Inc., 2022. 2
|
| 270 |
+
[11] Kunihiko Fukushima. Cognitron: a self-organizing multilayered neural network. Biological Cybernetics, 20(3-4):121-136, 1975. 4
|
| 271 |
+
[12] Yasutaka Furukawa and Carlos Hernández. Multi-View Stereo: A Tutorial. Found. Trends. Comput. Graph. Vis., 9 (1-2):1-148, 2015. 2
|
| 272 |
+
[13] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively Parallel Multiview Stereopsis by Surface Normal Diffusion. In Int. Conf. Comput. Vis., pages 873-881, 2015. 2
|
| 273 |
+
[14] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit Geometric Regularization for Learning Shapes. In Proceedings of the 37th International Conference on Machine Learning, pages 3789-3799. PMLR, 2020. 5
|
| 274 |
+
|
| 275 |
+
[15] Benoit Guillard, Federico Stella, and Pascal Fua. MeshUDF: Fast and Differentiable Meshing of Unsigned Distance Field Networks. In Eur. Conf. Comput. Vis., pages 576-592, Cham, 2022. Springer Nature Switzerland. 8
|
| 276 |
+
[16] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, 2 edition, 2004. 2
|
| 277 |
+
[17] Fei Hou, Chiyu Wang, Wencheng Wang, Hong Qin, Chen Qian, and Ying He. Iterative poisson surface reconstruction (iPSR) for unoriented points. ACM Trans. Graph., 41(4), 2022. 2
|
| 278 |
+
[18] Fei Hou, Xuhui Chen, Wencheng Wang, Hong Qin, and Ying He. Robust Zero Level-Set Extraction from Unsigned Distance Fields Based on Double Covering. ACM Trans. Graph., 42(6), 2023. 8
|
| 279 |
+
[19] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engil Tola, and Henrik Aanæs. Large Scale Multi-view Stereopsis Evaluation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 406-413, 2014. 2, 5, 7
|
| 280 |
+
[20] Mengqi Ji, Jinzhi Zhang, Qionghai Dai, and Lu Fang. SurfaceNet+: An End-to-end 3D Neural Network for Very Sparse Multi-View Stereopsis. IEEE Trans. Pattern Anal. Mach. Intell., 43(11):4078-4093, 2021. 2
|
| 281 |
+
[21] Abhishek Kar, Christian Hane, and Jitendra Malik. Learning a Multi-View Stereo Machine. In Adv. Neural Inform. Process. Syst. Curran Associates, Inc., 2017. 2
|
| 282 |
+
[22] Michael Kazhdan and Hugues Hoppe. Screenedoisson surface reconstruction. ACM Trans. Graph., 32(3), 2013. 2
|
| 283 |
+
[23] Yu-Tao Liu, Li Wang, Jie Yang, Weikai Chen, Xiaoxu Meng, Bo Yang, and Lin Gao. NeUDF: Leaning Neural Unsigned Distance Fields with Volume Rendering. In IEEE Conf. Comput. Vis. Pattern Recog., pages 237-247, 2023. 1, 2, 3, 4, 5, 6, 7
|
| 284 |
+
[24] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. SparseNeuS: Fast Generalizable Neural Surface Reconstruction from Sparse Views. In Eur. Conf. Comput. Vis., pages 210-227, Cham, 2022. Springer Nature Switzerland. 2
|
| 285 |
+
[25] Xiaoxiao Long, Cheng Lin, Lingjie Liu, Yuan Liu, Peng Wang, Christian Theobalt, Taku Komura, and Wenping Wang. NeuralUDF: Learning Unsigned Distance Fields for Multi-View Reconstruction of Surfaces with Arbitrary Topologies. In IEEE Conf. Comput. Vis. Pattern Recog., pages 20834–20843, 2023. 1, 2, 3, 4, 5, 6, 7
|
| 286 |
+
[26] Baorui Ma, Zhizhong Han, Yu-Shen Liu, and Matthias Zwicker. Neural-Pull: Learning Signed Distance Function from Point clouds by Learning to Pull Space onto Surface. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, pages 7246-7257. PMLR, 2021. 2
|
| 287 |
+
[27] Xiaoxu Meng, Weikai Chen, and Bo Yang. NeAT: Learning Neural Implicit Surfaces with Arbitrary Topologies from Multi-View Images. In IEEE Conf. Comput. Vis. Pattern Recog., pages 248–258, 2023. 2, 5, 6, 7
|
| 288 |
+
[28] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In
|
| 289 |
+
|
| 290 |
+
IEEE Conf. Comput. Vis. Pattern Recog., pages 4455-4465, 2019. 2
|
| 291 |
+
[29] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Eur. Conf. Comput. Vis., pages 405-421, Cham, 2020. Springer International Publishing. 1, 2
|
| 292 |
+
[30] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In IEEE Conf. Comput. Vis. Pattern Recog., pages 165-174, 2019. 2
|
| 293 |
+
[31] Johannes L. Schonberger, Enliang Zheng, Jan-Michael Frahm, and Marc Pollefeys. Pixelwise View Selection for Unstructured Multi-View Stereo. In Eur. Conf. Comput. Vis., pages 501–518, Cham, 2016. Springer International Publishing. 2
|
| 294 |
+
[32] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In Adv. Neural Inform. Process. Syst., pages 7462-7473. Curran Associates, Inc., 2020. 2
|
| 295 |
+
[33] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. NeuralRecon: Real-Time Coherent 3D Reconstruction from Monocular Video. In IEEE Conf. Comput. Vis. Pattern Recog., pages 15593-15602, 2021. 2
|
| 296 |
+
[34] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Adv. Neural Inform. Process. Syst., pages 27171-27183. Curran Associates, Inc., 2021. 1, 2, 5, 8
|
| 297 |
+
[35] Yifan Wang, Lukas Rahmann, and Olga Sorkine-Hornung. Geometry-Consistent Neural Shape Representation with Implicit Displacement Fields. In Int. Conf. Learn. Represent. OpenReview.net, 2022. 2
|
| 298 |
+
[36] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. In Adv. Neural Inform. Process. Syst., pages 1966–1978. Curran Associates, Inc., 2022. 1, 2, 3, 5, 8
|
| 299 |
+
[37] Yiqun Wang, Ivan Skorokhodov, and Peter Wonka. PETNeuS: Positional Encoding Tri-Planes for Neural Surfaces. In IEEE Conf. Comput. Vis. Pattern Recog., pages 12598–12607, 2023. 2, 8
|
| 300 |
+
[38] Yao Yao, Zixin Luo, Shiwei Li, Tianwei Shen, Tian Fang, and Long Quan. Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference. In IEEE Conf. Comput. Vis. Pattern Recog., pages 5520–5529, 2019. 2
|
| 301 |
+
[39] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. BlendedMVS: A Large-Scale Dataset for Generalized Multi-View Stereo Networks. In IEEE Conf. Comput. Vis. Pattern Recog., pages 1787–1796, 2020. 5
|
| 302 |
+
[40] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume Rendering of Neural Implicit Surfaces. In Adv. Neural Inform. Process. Syst., pages 4805-4815. Curran Associates, Inc., 2021. 1, 2, 5
|
| 303 |
+
|
| 304 |
+
[41] Fang Zhao, Wenhao Wang, Shengcai Liao, and Ling Shao. Learning Anchored Unsigned Distance Functions with Gradient Direction Alignment for Single-view Garment Reconstruction. In Int. Conf. Comput. Vis., pages 12654-12663, 2021. 3
|
| 305 |
+
[42] Junsheng Zhou, Baorui Ma, Yu-Shen Liu, Yi Fang, and Zhizhong Han. Learning Consistency-Aware Unsigned Distance Functions Progressively from Raw Point Clouds. In Adv. Neural Inform. Process. Syst., pages 16481-16494. Curran Associates, Inc., 2022. 3
|
| 306 |
+
[43] Heming Zhu, Yu Cao, Hang Jin, Weikai Chen, Dong Du, Zhangye Wang, Shuguang Cui, and Xiaoguang Han. Deep Fashion3D: A Dataset and Benchmark for 3D Garment Reconstruction from Single Images. In Eur. Conf. Comput. Vis., pages 512-530, Cham, 2020. Springer International Publishing. 2, 5, 6
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00da6e4b7182e1d9fb570f675cf1222b4845baeca2e2a7efb1bd7dd43bf29d2a
|
| 3 |
+
size 466709
|
2sudfanoveltwostageudflearningmethodforrobustnonwatertightmodelreconstructionfrommultiviewimages/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2998861887a784b313cff8188aff700d652c4ce5eef7b8888011f9de9383d0db
|
| 3 |
+
size 377439
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:867af28cc4ffa09bf929dde74012eabdee9469e1896130c84f1beb8476af640b
|
| 3 |
+
size 74137
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30d7903ade326cd10cff6c1e2ef004e9c8ef09b4f168b35c6213201210fc6501
|
| 3 |
+
size 94334
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/b8d3f2a2-b423-4011-8c8e-f2db936aeb7d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:01d96ce145bbf44f2adf1c5d78e08990550d928553a9f19f322e401d5c9e2fd8
|
| 3 |
+
size 3434502
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/full.md
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 360DVD: Controllable Panorama Video Generation with 360-Degree Video Diffusion Model
|
| 2 |
+
|
| 3 |
+
Qian Wang $^{1,2}$ , Weiqi Li $^{1}$ , Chong Mou $^{1,2}$ , Xinhua Cheng $^{1,2}$ , Jian Zhang $^{1,2}$ $^{1}$ School of Electronic and Computer Engineering, Peking University
|
| 4 |
+
$^{2}$ Peking University Shenzhen Graduate School-Rabbitpre AIGC Joint Research Laboratory
|
| 5 |
+
{qianwang, liweiqi, eechongm, chengxinhua}@stu.pku.edu.cn, zhangjian.sz@pku.edu.cn
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Panorama video recently attracts more interest in both study and application, courtesy of its immersive experience. Due to the expensive cost of capturing $360^{\circ}$ panoramic videos, generating desirable panorama videos by prompts is urgently required. Lately, the emerging text-to-video (T2V) diffusion methods demonstrate notable effectiveness in standard video generation. However, due to the significant gap in content and motion patterns between panoramic and standard videos, these methods encounter challenges in yielding satisfactory $360^{\circ}$ panoramic videos. In this paper, we propose a pipeline named 360-Degree Video Diffusion model (360DVD) for generating $360^{\circ}$ panoramic videos based on the given prompts and motion conditions. Specifically, we introduce a lightweight 360-Adapter accompanied by 360 Enhancement Techniques to transform pre-trained T2V models for panorama video generation. We further propose a new panorama dataset named WEB360 consisting of panoramic video-text pairs for training 360DVD, addressing the absence of captioned panoramic video datasets. Extensive experiments demonstrate the superiority and effectiveness of 360DVD for panorama video generation. Our project page is at https://akaneqwq.github.io/360DVD/.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
With the recent advancements in VR technology, 360-degree panoramic videos have been gaining increasing popularity. This video format which offers audiences an immersive experience, is helpful for various applications, including entertainment, education, and communication. To capture details of the entire scene, $360^{\circ}$ videos are typically recorded using an array of high-resolution fisheye cameras that yields a $360^{\circ} \times 180^{\circ}$ field-of-view (FoV) [1], which
|
| 14 |
+
|
| 15 |
+
This work was supported by National Natural Science Foundation of China under Grant 62372016. (Corresponding author: Jian Zhang)
|
| 16 |
+
|
| 17 |
+
is quite costly in both time and resources. Therefore, the generation of $360^{\circ}$ panoramic videos is urgently required for border applications, while panoramic video generation receives little attention in studies to date.
|
| 18 |
+
|
| 19 |
+
Thanks to the emerging theory and training strategies, text-to-image (T2I) diffusion models [26, 27, 31, 32, 35] demonstrate remarkable image generation capacity from prompts given by users, and such impressive achievement in image generation is further extended to text-to-video (T2V) generation. Various T2V diffusion models [3, 16, 37, 46, 52, 60] are recently proposed with adopting space-time separable architectures, wherein spatial operations are inherited from the pre-trained T2I models to reduce the complexity of constructing space-time models from scratch. Among these,AnimateDiff [16] enables the capability to generate animated images for various personalized T2I models, which alleviates the requirement for model-specific tuning and achieves compelling content consistency over time.
|
| 20 |
+
|
| 21 |
+
Although T2V methods on standard videos are widely studied, there is no method proposed for panorama video generation. One potential approach is to leverage existing powerful T2V models, e.g.,AnimateDiff to directly generate the equirectangular projection (ERP) of panoramic videos. Since ERP is a commonly adopted format for storing and transmitting panoramic videos, each frame is treated by ERP as a rectangular image with an aspect ratio of 1:2, which aligns well with the output format of existing standard T2V models. However, due to the significant differences between panoramic videos and standard videos, existing methods suffer challenges in directly producing satisfactory $360^{\circ}$ panoramic videos. Concretely, the main challenges include three aspects: (1) The content distribution of ERPs differs from standard videos. ERPs require a wider FoV, reaching $360^{\circ} \times 180^{\circ}$ . (2) The motion patterns of ERPs are different from standard videos, with movements often following curves rather than straight lines. (3) The left and right ends of ERPs should exhibit continuity since they correspond to the same meridian on the Earth.
|
| 22 |
+
|
| 23 |
+
Therefore, we propose a specifically designed method
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1. Main results. Our 360DVD creates text-aligned, coherent, and high-quality $360^{\circ}$ panorama videos. Furthermore, 360DVD can cooperate with multiple personalized text-to-image models and consistently generate stylized panorama videos.
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
named 360-Degree Video Diffusion (360DVD) for generating panorama videos. We first introduce a plug-and-play module named 360-Adapter to address challenge mentioned above. Our 360-Adapter receives zero values or motion conditions (e.g., optical flow) as input and outputs motion features, which are fed into the frozen denoising U-Net at different levels of the encoder. This transformation is aimed at converting the T2V model into a panoramic video generation without altering the foundational generative capabilities. In addition, we introduce 360 Enhancement Techniques including two mechanisms to enhance continuity at both ends of ERPs from both macro and micro perspectives, and a latitude-aware loss function for encouraging the model to focus more on low-latitude regions. Cooperated with carefully designed techniques, our 360DVD generates text-aligned, coherent, high-quality, $360^{\circ}$ panorama videos with various styles, as shown in Fig. 1.
|
| 31 |
+
|
| 32 |
+
Furthermore, we collect a panorama dataset named WEB360 including ERP-formatted videos from the internet and games for training our method. WEB360 involves approximately 2,000 video clips with each clip consisting of 100 frames. Considering the domain gap between panoramic and standard images, to enhance the accuracy and granularity of captions, we introduce a GPT-based 360 Text Fusion module for obtaining detailed captions. Our contributions can be summarized as follows:
|
| 33 |
+
|
| 34 |
+
- We introduce a controllable $360^{\circ}$ panorama video generation diffusion model named 360DVD, achieved by adopting a controllable standard T2V model with a trainable lightweight 360-Adapter. Our model can generate text-guided panorama videos conditioned on desired motions.
|
| 35 |
+
- We design 360 Enhancement Techniques including a latitude-aware loss and two mechanisms to enhance the content and motion quality of generated panorama videos.
|
| 36 |
+
- We propose a new high-quality dataset named WEB360 comprising approximately 2,000 panoramic videos, with each video accompanied by a detailed caption enhanced through 360 Text Fusion.
|
| 37 |
+
- Experiments demonstrate that our 360DVD is capable of generating high-quality, high-diversity, and more consistent $360^{\circ}$ panorama videos.
|
| 38 |
+
|
| 39 |
+
# 2. Related Works
|
| 40 |
+
|
| 41 |
+
# 2.1. Text-to-Image Diffusion Model
|
| 42 |
+
|
| 43 |
+
The Denoising Diffusion Probabilistic Model [9, 17, 39] has proven to be highly successful in generating high-quality images, outperforming previous approaches such as generative adversarial networks (GANs)[11, 57], variational autoencoders (VAEs)[20, 38], and flow-based methods [5]. With text guidance during training, users can generate images based on textual input. Noteworthy examples include
|
| 44 |
+
|
| 45 |
+
GLIDE [27], DALLE-2 [31], Imagen [35]. To address the computational burden of the iterative denoising process, LDM [32] conducts the diffusion process on a compressed latent space rather than the original pixel space. This accomplishment has prompted further exploration in extending customization [14, 34], image guidance [53, 55], precise control [25, 26, 58] and protection [56].
|
| 46 |
+
|
| 47 |
+
# 2.2. Text-to-Video Diffusion Model
|
| 48 |
+
|
| 49 |
+
Despite significant advancements in Text-to-Image (T2I) generation, Text-to-Video (T2V) generation faces challenges, including the absence of large-scale, high-quality paired text-video datasets, the inherent complexity in modeling temporal consistency, and the resource-intensive nature of training. To address these challenges, many works leverage the knowledge from pre-trained T2I models, and they manage training costs by executing the diffusion process in the latent space. Some methods [15, 29, 48, 49, 54] utilize T2I models in zero-shot or few-shot ways. However, these methods often suffer from suboptimal frame consistency due to insufficient training. To address this limitation, another category of T2V diffusion models typically adopts space-time separable architectures. These models [3, 37, 46, 60] inherit spatial operations from pre-trained T2I models, reducing the complexity of constructing space-time models from scratch. Given that most personalized T2I models are derived from the same base one (e.g. Stable Diffusion [32]),AnimateDiff [16] designs a motion modeling module that trained with a base T2I model and could animate most of derived personalized T2I models once for all. There are also efforts focused on enhancing control in T2V models. Gen-1 [13], MCDiff [6], LaMD [18] and VideoComposer [47] introduce diverse conditions to T2V models. Despite these advancements, the aforementioned methods demand extensive training and lack a plug-and-play nature, making it challenging to apply them to a diverse range of personalized T2I models.
|
| 50 |
+
|
| 51 |
+
# 2.3. Panorama Generation
|
| 52 |
+
|
| 53 |
+
GAN-based methods for generating panoramic images have been widely studied [2, 4, 7, 10, 12, 23, 24, 28, 40, 41, 43, 50]. For instance, OmniDreamer [2] accepts a single NFoV image as an input condition and introduces a cyclic inference scheme to meet the inherent horizontal cyclicity of 360-degree images. ImmenseGAN [12] fine-tunes the generative model using a large-scale private text-image pair dataset, making the generation more controllable. Text2Light [7] introduces a zero-shot text-guided 360-image synthesis pipeline by utilizing the CLIP model. Very recently, diffusion models have achieved promising results in panoramic image generation. DiffCollage [59] uses semantic maps as conditions and generates images based on complex factor graphs using retrained diffusion mod
|
| 54 |
+
|
| 55 |
+
els. PanoGen [21] employs a latent diffusion model and synthesizes new indoor panoramic images through recursive image drawing techniques based on multiple text descriptions. PanoDiff [45] achieves a multi-NFoV synthesis of panoramic images through a two-stage pose estimation module. IPO-LDM [51] uses a dual-modal diffusion structure of RGB-D to better learn the spatial distribution and patterns of panoramic images. StitchDiffusion [44] employs a T2I diffusion model, ensuring continuity at both ends through stitching. However, to date, panoramic video generation has received limited attention. To the best of our knowledge, we are the first to leverage diffusion models for panoramic video generation.
|
| 56 |
+
|
| 57 |
+
# 3. Method
|
| 58 |
+
|
| 59 |
+
In this section, we begin with a concise review of the latent diffusion fusion model andAnimateDiff [16]. Following that, we introduce the construction method of the WEB360 dataset. We then provide an overview of 360DVD and elaborate on the implementation details of 360-Adapter. Finally, we describe the 360 enhancement techniques aimed at enriching the panoramic nature of the video.
|
| 60 |
+
|
| 61 |
+
# 3.1. Preliminaries
|
| 62 |
+
|
| 63 |
+
Latent Diffusion Model. Given an input signal $\mathbf{x}_0$ , a diffusion forward process in DDPM [17] is defined as:
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
p _ {\theta} \left(\mathbf {x} _ {t} \mid \mathbf {x} _ {t - 1}\right) = \mathcal {N} \left(\mathbf {x} _ {t}; \sqrt {1 - \beta_ {t}} \mathbf {x} _ {t - 1}, \beta_ {t} \mathbf {I}\right), \tag {1}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
for $t = 1,\ldots ,T$ , where $T$ is the total timestep of the diffusion process. A noise depending on the variance $\beta_{t}$ is gradually added to $\mathbf{x}_{t - 1}$ to obtain $\mathbf{x}_t$ at the next timestep and finally reach $\mathbf{x}_T\in \mathcal{N}(0,\mathbf{I})$ . The goal of the diffusion model is to learn to reverse the diffusion process (denoising). Given a random noise $\mathbf{x}_t$ , the model predicts the added noise at the next timestep $\mathbf{x}_{t - 1}$ until the origin signal $\mathbf{x}_0$ :
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
p _ {\theta} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t}) = \mathcal {N} (\mathbf {x} _ {t - 1}; \boldsymbol {\mu} _ {\theta} (\mathbf {x} _ {t}, t), \boldsymbol {\Sigma} _ {\theta} (\mathbf {x} _ {t}, t)), \tag {2}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
for $t = T,\ldots ,1$ . We fix the variance $\Sigma_{\theta}(\mathbf{x}_t,t)$ and utilize the diffusion model with parameter $\theta$ to predict the mean of the inverse process $\pmb{\mu}_{\theta}(\mathbf{x}_t,t)$ . The model can be simplified as denoising models $\epsilon_{\theta}(\mathbf{x}_t,t)$ , which are trained to predict the noise of $\mathbf{x}_t$ with a noise prediction loss:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\mathcal {L} = \mathbb {E} _ {\mathbf {x} _ {0}, \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \epsilon - \epsilon_ {\theta} (\mathbf {x} _ {t}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})) \| _ {2} ^ {2} ], \tag {3}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where $\epsilon$ is the added noise to the input image $\mathbf{x}_0$ , $\mathbf{y}$ is the corresponding textual description, $\tau_{\theta}(\cdot)$ is a text encoder mapping the string to a sequence of vectors.
|
| 82 |
+
|
| 83 |
+
Latent Diffusion Model (LDM) [32] executes the denoising process in the latent space of an autoencoder, namely $\mathcal{E}(\cdot)$ and $\mathcal{D}(\cdot)$ , implemented as VQ-GAN [19] or VQ-VAE [42] pre-trained on large image datasets. During the
|
| 84 |
+
|
| 85 |
+
training of the latent diffusion networks, an input image $\mathbf{x}_0$ is initially mapped to the latent space by the frozen encoder, yielding $\mathbf{z}_0 = \mathcal{E}(\mathbf{x}_0)$ . Thus, the training objective can be formulated as follows:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\mathcal {L} = \mathbb {E} _ {\mathcal {E} (\mathbf {x} _ {0}), \mathbf {y}, \boldsymbol {\epsilon} \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} (\mathbf {z} _ {t}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})) \| _ {2} ^ {2} ]. \tag {4}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
In widely-used LDM Stable Diffusion (SD), which our method is based on, $\epsilon_{\theta}(\cdot)$ is implemented with a modified UNet [33] that incorporates four downsample/upsample blocks and one middle block, resulting in four resolution levels within the networks' latent space. Each resolution level integrates 2D convolution layers as well as self- and cross-attention mechanisms. Text model $\tau_{\theta}(\cdot)$ is implemented using the CLIP [30] ViT-L/14 text encoder.
|
| 92 |
+
|
| 93 |
+
AnimateDiff.AnimateDiff inflates base SD by adding temporal-aware structures and learning reasonable motion priors from large-scale video datasets. Since the original SD can only process 4D image data batches, while T2V task takes a 5D video tensor as input. It transforms each 2D convolution and attention layer in the original image model into spatial-only pseudo-3D layers. The motion module is inserted at every resolution level of the U-shaped diffusion network, using vanilla temporal transformers consisting of several self-attention blocks operating along the temporal axis. The training objective ofAnimateDiff can be written as:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathcal {L} = \mathbb {E} _ {\mathcal {E} \left(\mathbf {x} _ {0} ^ {1: N}\right), \mathbf {y}, \boldsymbol {\epsilon} \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} \left(\mathbf {z} _ {t} ^ {1: N}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y})\right) \| _ {2} ^ {2} ], \tag {5}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $\mathbf{x}_0^{1:N}$ is the sampled video data, $\mathbf{z}_0^{1:N}$ is the latent code which $\mathbf{x}_0^{1:N}$ are encoded into via the pre-trained autoencoder, $\mathbf{z}_t^{1:N}$ is the latent code obtained by perturbing the initial latent code $\mathbf{z}_0^{1:N}$ with noise at timestep $t$ . During training, the pre-trained weights of the base T2I model are frozen to keep its feature space unchanged.
|
| 100 |
+
|
| 101 |
+
# 3.2. WEB360 Dataset
|
| 102 |
+
|
| 103 |
+
Diverse text-video pairs datasets are essential for training open-domain text-to-video generation models. However, existing $360^{\circ}$ panorama video datasets lack corresponding textual annotations. Moreover, these datasets are often constrained either in scale or quality, thereby impeding the upper limit of high-quality video generation.
|
| 104 |
+
|
| 105 |
+
To address the aforementioned challenges and achieve high-quality 360 panorama video generation, we introduce a novel text-video dataset named WEB360. This dataset comprises 2114 text-video pairs sourced from open-domain content, presented in high-definition (720p) ERP format. Our dataset creation process involved extracting 210 high-resolution panoramic video clips from the ODV360 [4] training set. Additionally, we collected over 400 original videos from YouTube. Due to the complex scene transitions present in the original videos, which pose challenges
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
Figure 2. 360 Text Fusion. The captions of four images with a FoV of 90 are fed into ChatGPT to generate a new $360^{\circ}$ summarization. Compared to the caption of ERP at the bottom right, 360 Text Fusion allows for more fine-grained captions.
|
| 109 |
+
|
| 110 |
+
for models in learning temporal correlations, we perform a manual screening process to split the original videos into 1904 single-scene video clips. We employ BLIP [22] to annotate the first frame of the 2104 video clips. However, we observed that direct application of BLIP to ERP images often resulted in bad captions. Therefore, we propose a panoramic image caption method named 360 Text Fusion, based on ChatGPT.
|
| 111 |
+
|
| 112 |
+
360 Text Fusion. We find that directly using BLIP [22] to label ERP has drawbacks. On one hand, errors may arise due to the distortion caused by the polarities, leading to misidentifications such as labeling "person" as "dog". On the other hand, the captions generated by BLIP lack granularity, making them insufficient for providing a detailed description of the current scene. Thus, we propose 360 Text Fusion (360TF) method, as shown in Fig. 2. To deal with the irregular distortion of ERP, we turn to less-distorted perspective images. We first project the original ERP image to four non-overlapping perspective images at 0 degrees longitude, with a FoV of 90. The four images are then fed into BLIP to be captioned. By pre-informing ChatGPT about the task and providing examples, these four captions are collectively input to ChatGPT, which then generates a summary of the scene as our final caption. In comparison to directly using BLIP to label the entire image, our 360TF demonstrates a significant advantage in granularity.
|
| 113 |
+
|
| 114 |
+
# 3.3. 360-degree Video Diffusion Model
|
| 115 |
+
|
| 116 |
+
An overview of the 360-degree Video Diffusion Model (360 DVD) is presented in Fig. 3, which is composed of a pretrained denoising U-Net and 360-Adapter. The pre-trained denoising U-Net adopts a structure identical to that ofAnimateDiff. In every resolution level of the U-Net, the spatial layer unfolds pre-trained weights from SD, while the temporal layer incorporates the motion module ofAnimateDiff trained on a large-scale text-video dataset.
|
| 117 |
+
|
| 118 |
+
During the training process, we first sample a video $\mathbf{x}_0^{1:N}$
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
Figure 3. Overview of 360DVD. 360DVD leverages a trainable 360-Adapter to extend standard T2V models to the panorama domain and is able to generate high-quality panorama videos with given prompts and optional motion conditions. In addition, 360 Enhancement Techniques are proposed for quality improvement in the panorama perspective.
|
| 122 |
+
|
| 123 |
+
from the dataset. The video is encoded into latent code $\mathbf{z}_0^{1:N}$ through pre-trained VAE encoder $\mathcal{E}(\cdot)$ and noised to $\mathbf{z}_t^{1:N}$ . Simultaneously, the corresponding text $\mathbf{y}$ for the video is encoded using the text encoder $\pmb{\tau}_{\theta}(\cdot)$ of the CLIP. The video is also input into a motion estimation network to generate corresponding motion conditions $\mathbf{c}$ , which are then fed into the 360-Adapter $\mathcal{F}_{360}(\cdot)$ . Finally, noised latent code $\mathbf{z}_t^{1:N}$ , timestep $t$ , text embedding $\pmb{\tau}_{\theta}(\mathbf{y})$ , and the feature maps $\mathbf{f}_{360}$ generated by 360-Adapter are collectively input into the U-Net $\epsilon(\cdot)$ to predict the noise strength added to the latent code. As we aim to preserve the priors learned by SD andAnimateDiff on large datasets, we freeze their weights during the training process. If we use a simple L2 loss term, the training objective is given as follows:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\mathcal {L} = \mathbb {E} _ {\mathcal {E} (\mathbf {x} _ {0} ^ {1: N}), \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} (\mathbf {z} _ {t} ^ {1: N}, t, \boldsymbol {\tau} _ {\theta} (\mathbf {y}), \mathbf {f} _ {3 6 0}) \| _ {2} ^ {2} ]. \tag {6}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
To ensure satisfactory generation of $360^{\circ}$ panoramic videos without motion control input, we set the input of the 360-Adapter to zero with a probability $P$ during training. This strategy aims to encourage the model to learn representations that are not solely reliant on motion conditions, enhancing its ability to generate compelling panoramic videos without explicit motion guidance.
|
| 130 |
+
|
| 131 |
+
In inference, users have the option to selectively provide text prompts and motion guidance to carry out denoising over a total of $T$ steps. Here, we employ DDIM [39] to accelerate the sampling process. The estimated latent code $\hat{\mathbf{z}}_0^{1:N}$ is then input into a pre-trained VAE decoder to decode the desired $360^\circ$ panoramic videos $\hat{\mathbf{x}}_0^{1:N}$ . Due to constraints such as resolution limitations imposed by existing SD and considerations regarding GPU memory usage, the experimental results presented in this paper showcase a resolution of $512 \times 1024$ . In practical applications, super-resolution methods [8, 40] can be employed to upscale the generated results to the desired size.
|
| 132 |
+
|
| 133 |
+
360-Adapter. Our proposed 360-Adapter is simple and
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
Figure 4. Overview of 360-Adapter. 360-Adapter is a simple but effective module in which intermediate features are fed into the U-Net encoder blocks for modulation.
|
| 137 |
+
|
| 138 |
+
lightweight, as shown in Fig. 4. The original condition input has the same resolution as the video of $H \times W$ . Here, we utilize the pixel unshuffle [36] operation to downsample it to $H / 8 \times W / 8$ . Following that are four 360-Adapter blocks, we depict only one for simplification in Fig. 4. To maintain consistency with the U-Net architecture, the first three 360-Adapter blocks each include a downsampling block. In each 360-Adapter block, one 2D convolution layer and a residual block (RB) with pseudo-3D convolution layers are utilized to extract the condition feature $\mathbf{f}_{360}^{k}$ . Finally, multiscale condition features $\mathbf{f}_{360} = \{\mathbf{f}_{360}^{1}, \mathbf{f}_{360}^{2}, \mathbf{f}_{360}^{3}, \mathbf{f}_{360}^{4}\}$ are formed. Suppose the intermediate features in the U-Net encoder block is $\mathbf{f}_{enc} = \{\mathbf{f}_{enc}^{1}, \mathbf{f}_{enc}^{2}, \mathbf{f}_{enc}^{3}, \mathbf{f}_{enc}^{4}\}$ . $\mathbf{f}_{360}$ is then added with $\mathbf{f}_{enc}$ at each scale. In summary, the condition
|
| 139 |
+
|
| 140 |
+
feature extraction and conditioning operation of the 360-Adapter can be defined as the following formulation:
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\mathbf {f} _ {3 6 0} = \mathcal {F} _ {3 6 0} (\mathbf {c}), \tag {7}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\hat {\mathbf {f}} _ {e n c} ^ {i} = \mathbf {f} _ {e n c} ^ {i} + \mathbf {f} _ {3 6 0} ^ {i}, i \in \{1, 2, 3, 4 \}. \tag {8}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
In the previous description, we omit some details. Our motion condition $\mathbf{c}$ is a 5D tensor, assuming its size is batch $\times$ channels $\times$ frames $\times$ height $\times$ width. We first reshape it into a 4D tensor of size (batch $\times$ frames) $\times$ channels $\times$ height $\times$ width to allow it to be fed into the 2D convolution layer and restore it to 5D to go through the RB with pseudo-3D convolution layers. Subsequently, in the RB, we employ a $1 \times 3 \times 3$ pseudo-3D convolution to extract features in the spatial dimension, followed by a $3 \times 1 \times 1$ pseudo-3D convolution to model information along the temporal dimension. The resulting features are reshaped back to (batch $\times$ frames) $\times$ channels $\times$ height $\times$ width to add the output of the skip connection. Finally, condition features are reshaped back into a 5D vector of size batch $\times$ channels $\times$ frames $\times$ height $\times$ width to align with the U-Net encoder intermediate features.
|
| 151 |
+
|
| 152 |
+
# 3.4. 360 Enhancement Techniques
|
| 153 |
+
|
| 154 |
+
Latitude-aware Loss. When projecting panoramic videos into ERPs, meridians are mapped as vertically spaced lines with a constant interval, while parallels are mapped as horizontally spaced lines with a constant interval. This projection method establishes a straightforward mapping relationship, but it is neither equal-area nor conformal, introducing significant distortion, particularly in the polar regions. To make the denoiser pay more attention to low-latitude regions with less distortion, which is more crucial for human visual perception, we introduce a latitude-aware loss:
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\mathcal {L} = \mathbb {E} _ {\mathcal {E} \left(\mathbf {x} _ {0} ^ {1: N}\right), \mathbf {y}, \epsilon \sim \mathcal {N} (0, \mathbf {I}), t} [ \| \left| \mathbf {W} \odot (\epsilon - \hat {\epsilon} _ {\theta}) \right| | _ {2} ^ {2} ], \tag {9}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
where $\hat{\epsilon}_{\theta} = \epsilon_{\theta}(\mathbf{z}_t^{1:N}, t, \boldsymbol{\tau}_{\theta}(\mathbf{y}), \mathbf{f}_{360})$ , and $\mathbf{W}$ is a weight matrix used to perform element-wise product, defined as:
|
| 161 |
+
|
| 162 |
+
$$
|
| 163 |
+
\mathbf {W} _ {i, j} = \cos \left(\frac {2 i - H / 8 + 1}{H / 4} \pi\right), \tag {10}
|
| 164 |
+
$$
|
| 165 |
+
|
| 166 |
+
where $i \in [0, H/8)$ , $j \in [0, W/8)$ , $H/8$ and $W/8$ is the height and width of latent code $\mathbf{z}_t^{1:N}$ . The visualized result of $\mathbf{W}$ is shown in Fig. 5, where pixels in low and middle latitudes are given more weight during training.
|
| 167 |
+
|
| 168 |
+
Latent Rotation Mechanism. Because ERPs can be considered as the unfolding of a spherical surface along a meridian, they are meant to be wraparound consistent, implying that their left and right sides are continuous. However, during the process of video generation, the left and right sides are physically separated. Inspired by PanoDiff [45], we employ a latent rotation mechanism to enhance
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
Figure 5. Left: the visualization of weight matrix $\mathbf{W}$ , brighter colors indicate values closer to 1, while darker colors suggest values closer to 0. Right: a schematic diagram of the latent rotation mechanism. In each iteration, the far left portion of angle $\theta$ is shifted to the far right.
|
| 172 |
+
|
| 173 |
+
the macroscopic coherence between the left and right ends of the video. During the inference process, we perform a horizontal rotation at an angle of $\theta$ on $\mathbf{z}_t^{1:N}$ and motion condition $\mathbf{c}$ , at each denoising step. As illustrated in Fig. 5, the content on the far left is shifted to the far right, where we use $\mathbf{x}_0^1$ to replace $\mathbf{z}_t^{1:N}$ for a better visual effect of its continuity. During the training process, we also randomly rotate the training videos along with the motion condition by a random angle as a data augmentation strategy.
|
| 174 |
+
|
| 175 |
+
Circular Padding Mechanism. Although the previous latent rotation mechanism achieves semantic continuity at a macroscopic level, achieving pixel-level continuity is challenging. Therefore, in the inference process, we adopt a mechanism of circular padding by modifying the padding method of the convolution layers. We observe that the early stages of $360^{\circ}$ video generation often involve layout modeling, while the later stages focus on detail completion. To maintain the stable video generation quality of 360DVD, we only implement the circular padding mechanism in the late $\left\lfloor \frac{T}{2} \right\rfloor$ steps of a total of $T$ denoising steps.
|
| 176 |
+
|
| 177 |
+
# 4. Experiment
|
| 178 |
+
|
| 179 |
+
# 4.1. Implementation Details
|
| 180 |
+
|
| 181 |
+
Training Settings. We choose Stable Diffusion v1.5 and Motion Module v14 as our base model. We utilize the panoramic optical flow estimator PanoFlow [45] to generate motion conditions. We train the 360-Adapter using the proposed WEB360 dataset. The resolution is set to $512 \times 1024$ , the length of frames to 16, the batch size to 1, the learning rate to $1 \times 10^{-5}$ , and the total number of training steps to $100k$ , probability $P = 0.2$ . We use a linear beta schedule as animateDiff, where $\beta_{start} = 0.00085$ and $\beta_{end} = 0.012$ .
|
| 182 |
+
|
| 183 |
+
Inference Settings. We use DDIM with 25 sampling steps, and the scale for text guidance is 7.5, the angle $\theta = \pi /2$ . We collect several personalized Stable Diffusion models from CivitAI to verify the effectiveness and generalizability of our method, including Realistic Vision, Lyriel, ToonYou, and RCNZ Cartoon.
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Figure 6. Qualitative comparisons with baseline methods. 360DVD successfully produces stable and high-quality panorama video over various prompts while other methods are failed.
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 7. Qualitative comparisons of optical flow. 360DVD generates panorama videos with reasonable motion patterns consistent with the conditioned optical flow.
|
| 190 |
+
|
| 191 |
+
# 4.2. Qualitative Results
|
| 192 |
+
|
| 193 |
+
Due to space limitations, we only display several frames of each video. We strongly recommend readers refer to our project page for more results and better visual quality.
|
| 194 |
+
|
| 195 |
+
Prompt-guided Panorama Video Generation. We present several prompt-guided $360^{\circ}$ panorama video generation results across different personalized models in Fig. 1. The figure shows that our method successfully turns personalized T2I models into panorama video generators. Our method can produce impressive generation results ranging from real to cartoon styles, from natural landscapes to cultural scenery. This success is attributed to the fact that our
|
| 196 |
+
|
| 197 |
+
method preserves the image generation priors and temporal modeling priors learned by SD andAnimateDiff on large-scale datasets.
|
| 198 |
+
|
| 199 |
+
Motion-guided Panorama Video Generation. We showcase panoramic video generation results guided by three typical optical flow maps, as shown in Fig. 7. The optical flow maps in the first row indicate the primary motion areas in the Arctic, where we can observe significant movement of clouds in the sky. The optical flow maps in the second row and third row indicate motion areas primarily in the Antarctic, where we can see the movement of trees and hot air balloons near the Antarctic.
|
| 200 |
+
|
| 201 |
+
<table><tr><td rowspan="2">Index</td><td rowspan="2">Methods</td><td colspan="2">Video Criteria</td><td colspan="3">Panorama Criteria</td></tr><tr><td>Graphics Quality</td><td>Frame Consistency</td><td>End Continuity</td><td>Content Distribution</td><td>Motion Pattern</td></tr><tr><td>A</td><td>AnimateDiff</td><td>11.3%</td><td>15.3%</td><td>5.3%</td><td>4.8%</td><td>4.4%</td></tr><tr><td>B</td><td>A+LoRA</td><td>14.1%</td><td>10.5%</td><td>6.0%</td><td>12.1%</td><td>6.5%</td></tr><tr><td>C</td><td>B+360ET</td><td>23.0%</td><td>9.7%</td><td>16.9%</td><td>16.1%</td><td>14.5%</td></tr><tr><td>D</td><td>Ours</td><td>51.6%</td><td>64.5%</td><td>71.8%</td><td>67.0%</td><td>74.6%</td></tr></table>
|
| 202 |
+
|
| 203 |
+
Table 1. User preference studies. More raters prefer videos generated by our 360DVD, especially over panorama criteria including if generated videos have left-to-right continuity, the panorama content distribution, and the panorama motion pattern.
|
| 204 |
+
|
| 205 |
+
# 4.3. Comparison
|
| 206 |
+
|
| 207 |
+
We compare our results with nativeAnimateDiff,AnimateDiff with a LoRA for panorama image generation from CivitAI named LatentLabs360,AnimateDiff with panoramic LoRA, and our proposed 360 Enhancement Techniques (loss excepted). We can observe that the results generated by the nativeAnimateDiff have a very narrow field of view, which does not align with the content distribution of panoramic videos. WhenAnimateDiff is augmented with panoramic LoRA, it produces videos with a broader field of view; however, the two ends of videos lack continuity, and object movements are highly random. Our proposed 360ET method significantly enhances the continuity between two ends of the videos but fails to address issues such as non-compliance with panoramic motion patterns and poor cross-frame consistency. Notably, our 360DVD can generate videos that best adhere to the content distribution and motion patterns of panoramic videos. We are pleased to discover that, thanks to the high-quality training data provided by WEB360, the videos generated by 360DVD exhibit more realistic colors and nuanced lighting, providing an immersive experience.
|
| 208 |
+
|
| 209 |
+
# 4.4. Ablation Study
|
| 210 |
+
|
| 211 |
+
We primarily conducted ablation studies on the proposed 360 Text Fusion strategy, the pseudo-3D layer in the 360-Adapter, and the latitude-aware loss, as illustrated in Fig. 8. Given the prompt "a car driving down a street next to a forest", the first row without 360TF can not generate the car because of low-quality captions in the training process. The second row without pseudo-3D layer can generate a car, but due to the lack of temporal modeling, the results exhibit flickering. The third row without latitude-aware loss can produce relatively good results, but it still falls slightly short in terms of clarity, field of view, and other aspects compared to the last row with the complete 360DVD.
|
| 212 |
+
|
| 213 |
+
# 4.5. User Study
|
| 214 |
+
|
| 215 |
+
31 participants were surveyed to evaluate the graphics quality, cross-frame consistency, left-right continuity, content distribution, and motion patterns of 8 sets of generated
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
Figure 8. Ablation studies on 360 Text Fusion (360TF), pseudo-3D layer in 360-Adapter (Pseudo-3D), and latitude-aware loss (Lat. Loss).
|
| 219 |
+
|
| 220 |
+
results. For each criterion, they selected the video they deemed most fitting for the theme of high-quality 360-degree panoramic videos. The data presented in Table 1 indicates that our model outperforms the other three methods significantly across all five dimensions. Simultaneously, our proposed 360ET can remarkably improve video quality, and left-right continuity, solely based on the nativeAnimateDiff and panoramic LoRA.
|
| 221 |
+
|
| 222 |
+
# 5. Conclusion
|
| 223 |
+
|
| 224 |
+
In this paper, we introduce 360DVD, a pipeline for controllable $360^{\circ}$ panorama video generation. Our framework leverages text prompts and motion guidance to animate personalized T2I models. Utilizing the proposed WEB360 dataset, 360-Adapter, and 360 Enhancement Techniques, our framework can generate videos that adhere to the content distribution and motion patterns in real captured panoramic videos. Extensive experiments demonstrate our effectiveness in creating high-quality panorama videos with various prompts and styles. We believe that our framework provides a simple but effective solution for panoramic video generation, and leads to inspiration for possible future works.
|
| 225 |
+
|
| 226 |
+
# References
|
| 227 |
+
|
| 228 |
+
[1] Hao Ai, Zidong Cao, Jinjing Zhu, Haotian Bai, Yucheng Chen, and Lin Wang. Deep learning for omnidirectional vision: A survey and new perspectives. arXiv preprint arXiv:2205.10468, 2022. 1
|
| 229 |
+
[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. *Diverse plausible 360-degree image outpainting for efficient 3dgc background creation.* In *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition*, pages 11441–11450, 2022. 3
|
| 230 |
+
[3] Jie An, Songyang Zhang, Harry Yang, Sonal Gupta, Jia-Bin Huang, Jiebo Luo, and Xi Yin. Latent-shift: Latent diffusion with temporal shift for efficient text-to-video generation. arXiv preprint arXiv:2304.08477, 2023. 1, 3
|
| 231 |
+
[4] Mingdeng Cao, Chong Mou, Fanghua Yu, Xintao Wang, Yinqiang Zheng, Jian Zhang, Chao Dong, Gen Li, Ying Shan, Radu Timofte, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Bin Chen, Haoyu Ma, Ming Cheng, Shijie Zhao, Wanwan Cui, Tianyu Xu, Chunyang Li, Long Bao, Heng Sun, Huaibo Huang, Xiaoqiang Zhou, Yang Ai, Ran He, Renlong Wu, Yi Yang, Zhilu Zhang, Shuo-hao Zhang, Junyi Li, Yunjin Chen, Dongwei Ren, Wang-meng Zuo, Qian Wang, Hao-Hsiang Yang, Yi-Chung Chen, Zhi-Kai Huang, Wei-Ting Chen, Yuan-Chun Chiang, Hua-En Chang, I-Hsiang Chen, Chia-Hsuan Hsieh, Sy-Yen Kuo, Zebin Zhang, Jiaqi Zhang, Yuhui Wang, Shuhao Cui, Junshi Huang, Li Zhu, Shuman Tian, Wei Yu, and Bingchun Luo. Ntire 2023 challenge on 360deg omnidirectional image and video super-resolution: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pages 1731-1745, 2023. 3, 4
|
| 232 |
+
[5] Ricky TQ Chen, Jens Behrmann, David K Duvenaud, and Jorn-Henrik Jacobsen. Residual flows for invertible generative modeling. Advances in Neural Information Processing Systems, 32, 2019. 2
|
| 233 |
+
[6] Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. Motion-conditioned diffusion model for controllable video synthesis. arXiv preprint arXiv:2304.14404, 2023. 3
|
| 234 |
+
[7] Zhaoxi Chen, Guangcong Wang, and Ziwei Liu. Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG), 41(6):1-16, 2022. 3
|
| 235 |
+
[8] Ming Cheng, Haoyu Ma, Qiufang Ma, Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Xuhan Sheng, Shijie Zhao, Junlin Li, and Li Zhang. Hybrid transformer and cnn attention network for stereo image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1702-1711, 2023. 5
|
| 236 |
+
[9] Xinhua Cheng, Nan Zhang, Jiwen Yu, Yinhuai Wang, Ge Li, and Jian Zhang. Null-space diffusion sampling for zero-shot point cloud completion. In Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence (IJ-CAI), 2023. 2
|
| 237 |
+
[10] Yen-Chi Cheng, Chieh Hubert Lin, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, and Ming-Hsuan Yang. Inout: Diverse image outpainting via gan inversion. In Proceedings of
|
| 238 |
+
|
| 239 |
+
the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11431-11440, 2022. 3
|
| 240 |
+
[11] Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, and Anil A Bharath. Generative adversarial networks: An overview. IEEE signal processing magazine, 35(1):53-65, 2018. 2
|
| 241 |
+
[12] Mohammad Reza Karimi Dastjerdi, Yannick Hold-Geoffroy, Jonathan Eisenmann, Siavash Khodadadeh, and Jean-François Lalonde. Guided co-modulated gan for $360^{\circ}$ field of view extrapolation. In 2022 International Conference on 3D Vision (3DV), pages 475–485. IEEE, 2022. 3
|
| 242 |
+
[13] Patrick Esser, Johnathan Chiu, Parmida Atighechian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7346-7356, 2023. 3
|
| 243 |
+
[14] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022.3
|
| 244 |
+
[15] Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. Tokenflow: Consistent diffusion features for consistent video editing. arXiv preprint arXiv:2307.10373, 2023. 3
|
| 245 |
+
[16] Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. 1, 3
|
| 246 |
+
[17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 2, 3
|
| 247 |
+
[18] Yaosi Hu, Zhenzhong Chen, and Chong Luo. Lamd: Latent motion diffusion for video generation. arXiv preprint arXiv:2304.11603, 2023. 3
|
| 248 |
+
[19] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3
|
| 249 |
+
[20] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2
|
| 250 |
+
[21] Jialu Li and Mohit Bansal. Panogen: Text-conditioned panoramic environment generation for vision-and-language navigation. Advances in Neural Information Processing Systems, 36, 2024. 3
|
| 251 |
+
[22] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning, pages 12888-12900. PMLR, 2022. 4
|
| 252 |
+
[23] Chieh Hubert Lin, Chia-Che Chang, Yu-Sheng Chen, Da-Cheng Juan, Wei Wei, and Hwann-Tzong Chen. Coco-gan: Generation by parts via conditional coordinating. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4512-4521, 2019. 3
|
| 253 |
+
[24] Chieh Hubert Lin, Hsin-Ying Lee, Yen-Chi Cheng, Sergey Tulyakov, and Ming-Hsuan Yang. Infinitygan: To
|
| 254 |
+
|
| 255 |
+
wards infinite-pixel image synthesis. arXiv preprint arXiv:2104.03963, 2021. 3
|
| 256 |
+
[25] Chong Mou, Xintao Wang, Jiechong Song, Ying Shan, and Jian Zhang. Dragondiffusion: Enabling drag-style manipulation on diffusion models. In The Twelfth International Conference on Learning Representations, 2024. 3
|
| 257 |
+
[26] Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, and Ying Shan. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 4296–4304, 2024. 1, 3
|
| 258 |
+
[27] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 1, 3
|
| 259 |
+
[28] Changgyoon Oh, Wonjune Cho, Yujeong Chae, Daehee Park, Lin Wang, and Kuk-Jin Yoon. Bips: Bi-modal indoor panorama synthesis via residual depth-aided adversarial learning. In European Conference on Computer Vision, pages 352–371. Springer, 2022. 3
|
| 260 |
+
[29] Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, and Qifeng Chen. Fatezero: Fusing attentions for zero-shot text-based video editing. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15932-15942, 2023. 3
|
| 261 |
+
[30] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4
|
| 262 |
+
[31] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3
|
| 263 |
+
[32] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bjorn Ommer. High-resolution image synthesis with latent diffusion models. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 1, 3
|
| 264 |
+
[33] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolutional Networks for Biomedical Image Segmentation, page 234–241. 2015. 4
|
| 265 |
+
[34] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 3
|
| 266 |
+
[35] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3
|
| 267 |
+
[36] Wenzhe Shi, Jose Caballero, Ferenc Huszar, Johannes Totz, Andrew P. Aitken, Rob Bishop, Daniel Rueckert, and Zehan
|
| 268 |
+
|
| 269 |
+
Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5
|
| 270 |
+
[37] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792, 2022. 1, 3
|
| 271 |
+
[38] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. Advances in neural information processing systems, 28, 2015. 2
|
| 272 |
+
[39] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 2, 5
|
| 273 |
+
[40] Xiaopeng Sun, Weiqi Li, Zhenyu Zhang, Qiufang Ma, Xuhan Sheng, Ming Cheng, Haoyu Ma, Shijie Zhao, Jian Zhang, Junlin Li, et al. Opdn: Omnidirectional position-aware deformable network for omnidirectional image superresolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1293-1301, 2023. 3, 5
|
| 274 |
+
[41] Piotr Teterwak, Aaron Sarna, Dilip Krishnan, Aaron Maschinot, David Belanger, Ce Liu, and William T Freeman. Boundless: Generative adversarial networks for image extension. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10521-10530, 2019. 3
|
| 275 |
+
[42] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 3
|
| 276 |
+
[43] Guangcong Wang, Yinuo Yang, Chen Change Loy, and Zwei Liu. Stylelight: HDR panorama generation for lighting estimation and editing. In European Conference on Computer Vision, pages 477-492. Springer, 2022. 3
|
| 277 |
+
[44] Hai Wang, Xiaoyu Xiang, Yuchen Fan, and Jing-Hao Xue. Customizing 360-degree panoramas through text-to-image diffusion models. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 4933-4943, 2024. 3
|
| 278 |
+
[45] Jionghao Wang, Ziyu Chen, Jun Ling, Rong Xie, and Li Song. 360-degree panorama generation from few unregistered nfov images. arXiv preprint arXiv:2308.14686, 2023. 3, 6
|
| 279 |
+
[46] Wenjing Wang, Huan Yang, Zixi Tuo, Huiguo He, Junchen Zhu, Jianlong Fu, and Jiaying Liu. Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation. arXiv preprint arXiv:2305.10874, 2023. 1, 3
|
| 280 |
+
[47] Xiang Wang, Hangjie Yuan, Shiwei Zhang, Dayou Chen, Jiuniu Wang, Yingya Zhang, Yujun Shen, Deli Zhao, and Jingren Zhou. Videocomposer: Compositional video synthesis with motion controllability. Advances in Neural Information Processing Systems, 36, 2024. 3
|
| 281 |
+
[48] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning
|
| 282 |
+
|
| 283 |
+
of image diffusion models for text-to-video generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7623-7633, 2023. 3
|
| 284 |
+
[49] Ruiqi Wu, Liangyu Chen, Tong Yang, Chunle Guo, Chongyi Li, and Xiangyu Zhang. Lamp: Learn a motion pattern for few-shot-based video generation. arXiv preprint arXiv:2310.10769, 2023. 3
|
| 285 |
+
[50] Songsong Wu, Hao Tang, Xiao-Yuan Jing, Haifeng Zhao, Jianjun Qian, Nicu Sebe, and Yan Yan. Cross-view panorama image synthesis. IEEE Transactions on Multimedia, 2022. 3
|
| 286 |
+
[51] Tianhao Wu, Chuanxia Zheng, and Tat-Jen Cham. IPO-ldm: Depth-aided 360-degree indoor rgb panorama outpainting via latent diffusion model. arXiv preprint arXiv:2307.03177, 2023. 3
|
| 287 |
+
[52] Jinbo Xing, Menghan Xia, Yuxin Liu, Yuechen Zhang, Y He, H Liu, H Chen, X Cun, X Wang, Y Shan, et al. Makeyour-video: Customized video generation using textual and structural guidance. IEEE Transactions on Visualization and Computer Graphics, 2024. 1
|
| 288 |
+
[53] Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, and Fang Wen. Paint by example: Exemplar-based image editing with diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18381-18391, 2023. 3
|
| 289 |
+
[54] Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. Rerender a video: Zero-shot text-guided video-to-video translation. In SIGGRAPH Asia 2023 Conference Papers, pages 1-11, 2023. 3
|
| 290 |
+
[55] Jiwen Yu, Yinhuai Wang, Chen Zhao, Bernard Ghanem, and Jian Zhang. Freedom: Training-free energy-guided conditional diffusion model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 23174-23184, 2023. 3
|
| 291 |
+
[56] Jiwen Yu, Xuanyu Zhang, Youmin Xu, and Jian Zhang. CRoSS: Diffusion model makes controllable, robust and secure image steganography. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 3
|
| 292 |
+
[57] Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pages 5907-5915, 2017. 2
|
| 293 |
+
[58] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836-3847, 2023. 3
|
| 294 |
+
[59] Qinsheng Zhang, Jiaming Song, Xun Huang, Yongxin Chen, and Ming-Yu Liu. Diffcollage: Parallel generation of large content with diffusion models. In 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 10188-10198. IEEE, 2023. 3
|
| 295 |
+
[60] Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018, 2022. 1, 3
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29d46efbd71e8b7d19a9e69c68cff74baba9594d1d0bd1956842b632d438e409
|
| 3 |
+
size 838967
|
360dvdcontrollablepanoramavideogenerationwith360degreevideodiffusionmodel/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fcd8f315c6148d434d167afb87ccec64300cbbf01712eea31e3379debc118978
|
| 3 |
+
size 403758
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6cf89dbe5c3015e4c3811edae1380a21e9172547cb75a63266f844c216aff74
|
| 3 |
+
size 87164
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5e2b4ad1ec4e249675c22bc8135269b4103543631248da494f86b23d285d19b
|
| 3 |
+
size 107458
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/15eb225d-3032-419c-84b0-35d6ec576cbc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2abcc7cbf6a0fe4c6079ceeaf4fc07e39a5fffed2cfb3ef1a507f409d831f68a
|
| 3 |
+
size 8065437
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/full.md
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 360Loc: A Dataset and Benchmark for Omnidirectional Visual Localization with Cross-device Queries
|
| 2 |
+
|
| 3 |
+
Huajian Huang $^{1*}$ Changkun Liu $^{1*}$ Yipeng Zhu $^{1}$ Hui Cheng $^{2}$ Tristan Braud $^{1}$ Sai-Kit Yeung $^{1}$ $^{1}$ The Hong Kong University of Science and Technology * equal contribution Sun Yat-sen University
|
| 4 |
+
|
| 5 |
+
{hhuangbg, cliudg, yzhudg}@connect.ust.hk, chengh9@mail.sysu.edu.cn, {braudit, saikit}@ust.hk
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Portable $360^{\circ}$ cameras are becoming a cheap and efficient tool to establish large visual databases. By capturing omnidirectional views of a scene, these cameras could expedite building environment models that are essential for visual localization. However, such an advantage is often overlooked due to the lack of valuable datasets. This paper introduces a new benchmark dataset, 360Loc, composed of $360^{\circ}$ images with ground truth poses for visual localization. We present a practical implementation of $360^{\circ}$ mapping combining $360^{\circ}$ images with lidar data to generate the ground truth 6DoF poses. 360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras. We propose a virtual camera approach to generate lower-FoV query frames from $360^{\circ}$ images, which ensures a fair comparison of performance among different query types in visual localization tasks. We also extend this virtual camera approach to feature matching-based and pose regression-based methods to alleviate the performance loss caused by the cross-device domain gap, and evaluate its effectiveness against state-of-the-art baselines. We demonstrate that omnidirectional visual localization is more robust in challenging large-scale scenes with symmetries and repetitive structures. These results provide new insights into 360-camera mapping and omnidirectional visual localization with cross-device queries. Project Page and dataset: https://huajianup.github.io/research/360Loc/.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Visual localization refers to predicting the 6DoF absolute pose (translation and rotation) of query images in a known scene. Accurate visual localization has wide applications in augmented reality (AR), navigation, and robotics.
|
| 14 |
+
|
| 15 |
+
Over the last decade, many visual localization methods have been proposed, including feature matching-based approaches [17, 33, 42, 45, 54], scene coordinate regression [5-7] and absolute pose regressors (APRs) [23, 24, 49]. Much of this progress has been driven by the availability of numerous datasets and benchmarks targeting different challenges, as shown in Table 1. However, existing methods and datasets focus on localization and mapping using pinhole images. Although the merits of $360^{\circ}$ camera on visual perception have been recognized [22, 60, 62], the application of $360^{\circ}$ cameras for visual localization is still under-explored. Recently, SensLoc [61] started to apply $360^{\circ}$ cameras to facilitate data collection, but their pipeline cannot perform omnidirectional localization directly from the $360^{\circ}$ images.
|
| 16 |
+
|
| 17 |
+
This paper introduces 360Loc, a new challenging benchmark dataset to facilitate research on omnidirectional visual localization. The dataset contains $360^{\circ}$ images captured in diverse campus-scale indoor and outdoor environments, featuring highly symmetrical and repetitive features, as well as interference of dynamic objects. To capture this dataset, we present a practical pipeline using a portable 360-cameras platform to obtain reliable pose estimations of $360^{\circ}$ cameras as ground truth. Although $360^{\circ}$ cameras present significant advantages for capturing reference data, real-life applications applying visual localization often rely on traditional cameras. Examples include robots equipped with fisheye cameras and phone-based AR applications using the embedded pinhole camera. This raises the problem of cross-device visual localization on image databases captured with $360^{\circ}$ cameras. We thus supplement the reference database composed of $360^{\circ}$ images with query frames including pin-hole, fisheye and $360^{\circ}$ cameras.
|
| 18 |
+
|
| 19 |
+
We introduce the concept of virtual camera to generate high-quality lower-FoV images with different camera parameters from $360^{\circ}$ images. This enables a fair comparison of performance among queries from different devices in cross-device visual localization. We adapt existing feature-matching-based methods and APRs to support $360^{\circ}$ image queries and benchmark these methods for 360-based cross-
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1. Overview of dataset collection and ground truth generation: 1) Use the platform to collect $360^{\circ}$ images and frame-by-frame point clouds. Obtain real-time camera poses; 2) Apply optimization methodology to achieve data registration, resulting in a globally reconstructed point cloud model. Then, align the models in daytime and nighttime to get consistent poses; 3) Perform cropping to get virtual camera images and generate corresponding depth images. As a result, 360Loc takes advantage of $360^{\circ}$ images for efficient mapping while providing query images in five different camera models in order to analyze the challenge of cross-domain visual localization.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
device visual localization. Since different cameras present different imaging patterns, the cross-device domain gap is expected to lead to performance loss. We extend the virtual camera approach to data augmentation for end-to-end solutions such as image retrieval (IR) and APRs.
|
| 31 |
+
|
| 32 |
+
By conducting exhaustive evaluations, we demonstrate the advantages of $360^{\circ}$ cameras in reducing ambiguity in visual localization on scenes featuring symmetric or repetitive features. We also show improvements against state-of-the-art (SOTA) baselines using the virtual camera method for cross-device visual localization on images databases captured with $360^{\circ}$ cameras. These results provide novel insights on mapping using $360^{\circ}$ images, enhancing the anti-ambiguity capability of query images, reducing domain gap cross-device in visual localization, and improving the generalization ability of APRs by applying virtual cameras.
|
| 33 |
+
|
| 34 |
+
Our contribution can be summarized as follows:
|
| 35 |
+
|
| 36 |
+
- We propose a practical implementation of $360^{\circ}$ mapping combining lidar data with $360^{\circ}$ images for establishing the ground truth 6DoF poses.
|
| 37 |
+
- A virtual camera approach to generate high-quality lower-FoV images with different camera parameters from $360^{\circ}$ views.
|
| 38 |
+
- A novel dataset for cross-device visual localization based on $360^{\circ}$ reference images with pinhole, fisheye, and $360^{\circ}$ query images.
|
| 39 |
+
|
| 40 |
+
- Demonstration of our approach's efficacy over state-of-the-art solutions for visual localization using $360^{\circ}$ image databases, resulting in decreased localization ambiguity, reduced cross-device domain gap, and improved generalization ability of APRs.
|
| 41 |
+
|
| 42 |
+
# 2. Related work
|
| 43 |
+
|
| 44 |
+
# 2.1. Visual Localization
|
| 45 |
+
|
| 46 |
+
Structure-based methods predict camera poses by establishing 2D-3D correspondences indirectly with local feature extractors and matchers [16, 35, 42, 43, 52, 55] or directly with scene coordinate regression [5-7]. HLoc [42, 43] pipeline scales up to large scenes using image retrieval [1, 3, 18, 20] as an intermediate step, which achieves SOTA accuracy on many benchmarks. This type of approach usually supports pinhole cameras with different intrinsic parameters. However, the performance of $360^{\circ}$ and fisheye cameras has not been evaluated before due to the lack of support for $360^{\circ}$ cameras in the Structure from Motion (SfM) tools like COLMAP [45] and the lack of datasets for fisheye and $360^{\circ}$ cameras. [25-27] are point-cloud-based panorama localization methods for $360^{\circ}$ queries but they do not consider cross-device visual localization.
|
| 47 |
+
|
| 48 |
+
Absolute Pose Regressors (APRs) are end-to-end learning-based methods that directly regress the absolute camera
|
| 49 |
+
|
| 50 |
+
pose from input images without the knowledge of 3D models and establish 2D-3D correspondences. APRs [4, 8, 12, 13, 23, 24, 36, 37, 49, 59] provide faster inference than structure-based methods at the cost of accuracy and robustness [47]. Besides, APRs have generally only been tested on the [9], 7Scenes [50], and Cambridge Landmarks [24] datasets in previous studies. A notable characteristic of these datasets is that the training set and test set images were taken from the same camera. In this paper, we enhance cross-device pose regression for APRs by introducing virtual cameras as a data augmentation technique.
|
| 51 |
+
|
| 52 |
+
# 2.2. Datasets
|
| 53 |
+
|
| 54 |
+
The existing dataset has the following limitations. 1). Most datasets [9, 10, 24, 50, 54, 58] do not consider the need for cross-device localization, i.e., query images come from the same camera. Even though some datasets [11, 14, 30, 44, 46, 48, 53, 61] take into account cross-device localization, these devices are only pinhole cameras with different camera intrinsic parameters and do not have particularly large domain-gaps. Compared to [32], our pinhole and fisheye images are extracted from $360^{\circ}$ images via virtual cameras, which makes less demands on the device and allows for a fair and more flexible comparison of the effects of different FoVs. In this paper, our 360Loc datasets provide five kinds of queries from pinhole, fisheye and $360^{\circ}$ cameras to promote the research of cross-device localization. 2). Now there is no 6DoF visual localization dataset and benchmark considering $360^{\circ}$ reference images and $360^{\circ}$ query images, even though [2, 25, 38] contain $360^{\circ}$ images with 6DoF pose labels, they are not standard visual localization datasets with independent mapping/reference sequences and query sequences like datasets in Table 1. Other datasets [11, 61] use $360^{\circ}$ cameras for data collection, in the end they cropped $360^{\circ}$ to perspective images and then tailor these images to the classical visual localization pipeline of pinhole cameras. The academic community is mainly driven by benchmarks where all training, reference, and query images are pinhole images because they rely on SfM tools [45] which does not support $360^{\circ}$ cameras to obtain ground-truth (GT) and get sparse 3D point cloud models for recovering camera poses. However, we note that the $360^{\circ}$ camera can cover the scene with greater efficiency than normal pinhole cameras with narrow Field-of-View (FoV), which makes $360^{\circ}$ images particularly suitable as reference images. 3) Although the current dataset has explored the challenges of visual localization from various aspects such as weather variations, daynight transitions, scene changes, and moving individuals and objects [24, 30, 44, 46, 58, 61], there is still insufficient research specifically targeting highly ambiguous environments which contain symmetries, repetitive structures and insufficient textures. Only two indoor datasets [9, 53]
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Figure 2. The four scenes in 360Loc, all four scenes contain symmetrical, repetitive structures and moving objects. The camera trajectories are visualized as spheres.
|
| 58 |
+
|
| 59 |
+
and LaMAR [44] consider challenges in ambiguous environments. In this paper, we studied 4 ambiguous scenes from both indoor and outdoor environments with a scale much larger than dataset [9] (See Figure 2). We conduct exhaustive assessments of image retrieval, local matching localization, and absolute pose regression to show that queries from the $360^{\circ}$ camera are harder to obtain plausible solutions than other queries from cameras with narrower FoV.
|
| 60 |
+
|
| 61 |
+
# 3. The 360Loc Dataset
|
| 62 |
+
|
| 63 |
+
The 360Loc dataset contains 4 locations from a local university. Figure 2 displays the reference point cloud and example frames from each scene. Atrium is inside a building with a surrounding structure that exhibits a high degree of symmetry and repetition, making it a highly ambiguous environment. Concourse is a large indoor scene with many moving people, which can be used for evaluating the robustness of any localization algorithm in scenes with many moving objects. Piatrium is a scene containing both indoor Atrium and outdoor environments, covering an outdoor piazza with coffee shops, bookstores, and souvenir shops. Hall is a modern building of a student dormitory.
|
| 64 |
+
|
| 65 |
+
# 3.1. 360 Mapping Platform
|
| 66 |
+
|
| 67 |
+
We utilized the handheld multimodal data acquisition platform depicted in Figure 1 for data collection. This platform incorporates a $360^{\circ}$ camera, a Velodyne VLP-16 multi-line lidar, an NUC mini-computer, and a display screen. Figure 1 also illustrates the relative relationship among the $360^{\circ}$ camera coordinate system $\mathbf{O}_{\mathrm{c}}$ -XYZ, the lidar coordinate system $\mathbf{O}_{\mathrm{l}}$ -XYZ as well as the world coordinate $\mathbf{O}_{\mathrm{w}}$ -XYZ. The portable 360 camera equipped on this device can capture high-resolution omnidirectional images with a resolution of $6144 \times 3072$ (2:1 aspect ratio). It also features a built-in six-axis gyroscope that provides stabilization support, making it suitable for handheld mobile data capture. The Velodyne VLP-16 multi-line lidar has
|
| 68 |
+
|
| 69 |
+
<table><tr><td>Dataset</td><td>Scale and Environment</td><td>Challenges</td><td>Reference/Query type</td><td>Groundtruth Solution</td><td>Accuracy</td></tr><tr><td>7Scenes [50]</td><td>Small Indoor</td><td>None</td><td>pinhole / pinhole</td><td>RGB-D</td><td>≈ cm</td></tr><tr><td>RIO10 [58]</td><td>Small Indoor</td><td>Changes</td><td>pinhole / pinhole</td><td>VIO</td><td>> d m</td></tr><tr><td>Baidu Mall [53]</td><td>Medium Indoor</td><td>People, Ambiguous</td><td>pinhole / pinhole</td><td>lidar+Manual</td><td>≈ d m</td></tr><tr><td>Naver Labs [30]</td><td>Medium Indoor</td><td>People, Changes</td><td>pinhole / pinhole</td><td>lidar+SfM</td><td>≈ d m</td></tr><tr><td>InLoc [54]</td><td>Medium Indoor</td><td>None</td><td>pinhole / pinhole</td><td>lidar+Manual</td><td>> d m</td></tr><tr><td>AmbiguousLoc [9]</td><td>Small Indoor</td><td>Ambiguous</td><td>pinhole / pinhole</td><td>SLAM</td><td>≈ cm</td></tr><tr><td>Achen [46]</td><td>Large outdoor</td><td>People, Day-Night</td><td>pinhole / pinhole</td><td>SfM</td><td>> d m</td></tr><tr><td>Cambridge [24]</td><td>Medium outdoor</td><td>People, Weather</td><td>pinhole / pinhole</td><td>SfM</td><td>> d m</td></tr><tr><td>San Francisco [11]</td><td>Large outdoor</td><td>People, Construction</td><td>pinhole / pinhole</td><td>SfM+GPS</td><td>≈ m</td></tr><tr><td>NCLT [10]</td><td>Medium Outdoor + Indoor</td><td>Weather</td><td>pinhole / pinhole</td><td>GPS+SLAM+lidar</td><td>≈ d m</td></tr><tr><td>ADVIO [14]</td><td>Medium Outdoor+Indoor</td><td>People</td><td>pinhole / pinhole</td><td>VIO+Manual</td><td>≈ m</td></tr><tr><td>ETH3D [48]</td><td>Medium Outdoor + Indoor</td><td>None</td><td>pinhole / pinhole</td><td>lidar+Manual</td><td>≈ mm</td></tr><tr><td>LaMAR [44]</td><td>Medium Outdoor+Indoor</td><td>People, Weather, Day-Night, Construction, Changes, Ambiguous</td><td>pinhole / pinhole</td><td>lidar+SfM+VIO</td><td>≈ cm</td></tr><tr><td>SensLoc [61]</td><td>Large Outdoor</td><td>People, Weather, Day-Night, Construction, Changes</td><td>pinhole / pinhole</td><td>SL+VIO+RTK+Gravity</td><td>< dm</td></tr><tr><td>360Loc (ours)</td><td>Medium Outdoor+Indoor</td><td>People, Weather, Day-Night, Construction, Changes, Ambiguous</td><td>360 / (360 + pinhole + fisheye)</td><td>lidar+VIO</td><td>≈ cm</td></tr></table>
|
| 70 |
+
|
| 71 |
+
Table 1. Overview of popular visual localization datasets. No dataset, besides ours, consider $360^{\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras.
|
| 72 |
+
|
| 73 |
+
<table><tr><td>Symbol</td><td>Name</td><td>Field of View</td><td>Resolution</td><td>Type</td></tr><tr><td>c0</td><td>360</td><td>360°</td><td>6144×3072</td><td>reference/query</td></tr><tr><td>c1</td><td>fisheye1</td><td>120°</td><td>1280×1024</td><td>query</td></tr><tr><td>c2</td><td>fisheye2</td><td>150°</td><td>1280×1024</td><td>query</td></tr><tr><td>c3</td><td>fisheye3</td><td>195°</td><td>1280×1024</td><td>query</td></tr><tr><td>c4</td><td>pinhole</td><td>85°</td><td>1920×1200</td><td>query</td></tr></table>
|
| 74 |
+
|
| 75 |
+
Table 2. The representation and parameters of 5 cameras.
|
| 76 |
+
|
| 77 |
+
<table><tr><td rowspan="2">Scene</td><td rowspan="2"># Frames Reference 360</td><td colspan="5"># Frames Query (day / night)</td><td rowspan="2">Spatial Extent (m)</td></tr><tr><td>360</td><td>Pinhole</td><td>Fisheye1</td><td>Fisheye2</td><td>Fisheye3</td></tr><tr><td>Concourse</td><td>491</td><td>593/514</td><td>1186/1028</td><td>1186/1028</td><td>1186/1028</td><td>1186/1028</td><td>93 × 15</td></tr><tr><td>Hall</td><td>540</td><td>1123/1061</td><td>2246/2122</td><td>2246/2122</td><td>2246/2122</td><td>2246/2122</td><td>105 × 52</td></tr><tr><td>Atrium</td><td>581</td><td>875/1219</td><td>1750/2438</td><td>1750/2438</td><td>1750/2438</td><td>1750/2438</td><td>65 × 36</td></tr><tr><td>Piatrium</td><td>632</td><td>1008/697</td><td>2016/1394</td><td>2016/1394</td><td>2016/1394</td><td>2016/1394</td><td>98 × 70</td></tr></table>
|
| 78 |
+
|
| 79 |
+
Table 3. 360Loc dataset description.
|
| 80 |
+
|
| 81 |
+
a FoV of $360^{\circ} \times 30^{\circ}$ , angular resolution of $0.2^{\circ} \times 2.0^{\circ}$ , and rotation rate of $10\mathrm{Hz}$ , offering a comprehensive $360^{\circ}$ environmental view. Regarding the calibration of the extrinsic poses between the lidar and the $360^{\circ}$ camera, we employed a calibration toolbox [29] that applies to both lidar and camera projection models. This toolbox utilizes the SuperGlue [43] image matching pipeline to establish 2D-3D correspondences between the lidar and camera image. We perform pseudo-registration by synchronizing the two data modalities, images, and point clouds. Eventually, we use graph-based SLAM techniques for continuous pose estimations. In the four scenes, a total of 18 independent sequences of $360^{\circ}$ images were captured (12 daytime, and 6 nighttime), resulting in a total number of 9334 images. For each scene, we selected a specific sequence captured during the daytime as the reference images, while the remaining images were defined as query images of the $360^{\circ}$ image type. We provide more details and show why $360^{\circ}$ mapping is superior to pinhole SfM in ambiguous scenes with repetitive and symmetric structures in the supplementary material.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Figure 3. Illustration of obtaining virtual camera images through random poses and image cropping.
|
| 85 |
+
|
| 86 |
+
# 3.1.1 Cross-device Queries
|
| 87 |
+
|
| 88 |
+
To enable a rigorous comparison of the difference in the performance of different FoV queries for visual localization tasks, we created four virtual cameras with diverse FoV from $360^{\circ}$ cameras, which are shown in Figure 2. Given a $360^{\circ}$ image $\mathcal{I}_{c_0}$ , the corresponding virtual camera with preconfigured intrinsic parameters is extracted by
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\mathcal {I} _ {c _ {n}} = \Psi_ {c _ {n}} \left(\mathcal {I} _ {c _ {0}}\right) = \pi_ {c _ {n}} ^ {- 1} \left(\pi_ {c _ {0}} \left(\boldsymbol {R} \mathcal {I} _ {c _ {0}}\right)\right), \tag {1}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $\pi_{c_n}$ denote the projection function of virtual camera and $\pi_{c_0}$ is the projection function of $360^{\circ}$ camera. $R\in SO(3)$ is a random relative rotation matrix to increase the diversity of views representing the scenes. Moreover, the inversed operation $\Psi_{c_n}^{-1}$ can convert the $c_{n}$ image back to a $360^{\circ}$ image. As reported in Table 2, the virtual cameras include an undistorted pinhole model with $85^{\circ}$ FoV and three fisheye cameras in Dual Sphere mode [56] with $120^{\circ}$ , $150^{\circ}$ , and $195^{\circ}$ FoV respectively. Table 3 presents the number of image frames in the 360Loc dataset.
|
| 95 |
+
|
| 96 |
+
# 3.2. Ground Truth Generation
|
| 97 |
+
|
| 98 |
+
Besides the graph-based optimization in SLAM, we designed a set of offline optimization strategies to further improve the accuracy of camera pose estimation. After the acquisition of precise dense point cloud reconstructions and poses of $360^{\circ}$ cameras, an Iterative Closest Point (ICP) algorithm is applied to align models between reference and the query sequences in the same scene. Moreover, we reconstructed the mesh model of the scenes and generated corresponding depth maps of $360^{\circ}$ cameras.
|
| 99 |
+
|
| 100 |
+
Bundle Adjustment (BA) of lidar mapping. Incremental map construction can suffer from accumulating errors due to environmental degradation. We utilized a BA framework based on feature points extracted from lidar to refine the map and the poses. The optimization process involved minimizing the covariance matrix to constrain the distances between feature points and edge lines or plane features that are mutually matched.
|
| 101 |
+
|
| 102 |
+
First, we utilize an octree data structure to perform adaptive voxelization-based feature extraction. In this method, the point cloud map is segmented into voxels of predetermined size. Each voxel is checked to determine if its points $P_{u}^{f}$ lie on a plane or a line, where $u \in \{1,2,\dots ,U\}$ , obtained from the $u$ -th frame of lidar scans. If not, the voxel is recursively subdivided using an octree structure until each voxel contains points $P_{u}^{f}$ belonging to the same feature. Let's assume that the pose of the lidar in each frame is $\pmb{\eta} = \{\pmb{\eta}_1,\pmb{\eta}_2,\dots ,\pmb{\eta}_M\}$ , where $\pmb{\eta}_{u} = (R_{u},t_{u}|R_{u} \in SO(3), t_{u} \in \mathbb{R}^{3})$ . In that case, the feature points in the global map can be represented as follows:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\boldsymbol {P} _ {u} = \boldsymbol {R} _ {u} \times \boldsymbol {P} _ {u} ^ {f} + \boldsymbol {t} _ {u}. \tag {2}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
After simplifying the lidar map to edge or plane features, the process of BA becomes focused on determining the pose $\pmb{\eta}$ and the location of the single feature, which can be represented as $(\pmb{n}_f,\pmb{q})$ , where $\pmb{q}$ represents the location of a specific feature, $\pmb{n}_f$ is the direction vector of an edge line or the normal vector of a plane. To minimize the distance between each feature point and the corresponding feature, we can utilize the BA:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\left(\boldsymbol {\eta} ^ {*}, \boldsymbol {n} _ {f} ^ {*}, \boldsymbol {q} ^ {*}\right) = \underset {\boldsymbol {\eta}, \boldsymbol {n} _ {f}, \boldsymbol {q}} {\arg \min } \frac {1}{U} \sum_ {u = 1} ^ {U} \left(\boldsymbol {n} _ {f} ^ {T} \left(\boldsymbol {P} _ {\boldsymbol {u}} - \boldsymbol {q}\right)\right) ^ {2}. \tag {3}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
It has been proved that when the plane's normal vector is set to the minimum eigenvector, and $\mathbf{q}$ is set to the centroid of the feature, i.e. $\mathbf{q} = \hat{\mathbf{P}} = \frac{1}{U}\sum_{u=1}^{U}\mathbf{P}_{u}$ , Eq. 3 reaches its minimum value. Additionally, the BA problem in lidar mapping has a closed-form solution that is independent of the features $(\mathbf{n}_f,\mathbf{q})$ [34]. It can be simplified to the following problem:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\boldsymbol {\eta} ^ {*} = \underset {\boldsymbol {\eta}} {\arg \min } \lambda_ {\min } (\boldsymbol {A}), \tag {4}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
Figure 4. Overview of GT generation.
|
| 122 |
+
|
| 123 |
+
where, $\lambda$ represents the eigenvalue of $A$ , and
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\boldsymbol {A} = \frac {1}{U} \sum_ {u = 1} ^ {U} \left(\boldsymbol {P} _ {u} - \hat {\boldsymbol {P}}\right) \left(\boldsymbol {P} _ {u} - \hat {\boldsymbol {P}}\right) ^ {T}. \tag {5}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
Now, the BA problem is simplified by adjusting the lidar pose $\pmb{\eta}$ to minimize the smallest eigenvalue $\lambda_3$ of the point covariance matrix $\mathbf{A}$ defined in Eq. 5. By employing this strategy, we refined the pose $\pmb{\eta}$ of each frame and the edge or plane features in the lidar map.
|
| 130 |
+
|
| 131 |
+
Refined cameras poses. The poses of $360^{\circ}$ camera obtained from online SLAM are further optimized by the registration with respect to the dense refined point cloud model. Taking the pre-calibrated extrinsic parameters as the initial guess, we used the RANSAC to refine the lidar-camera transformation [29]. This registration process is based on the normalized information distance (NID) [51], which serves as a mutual information-based cross-modal distance metric. Finally, we align the reference models and query models into the same coordinate system to generate the ground truth for the query sequences. Specifically, we utilize the CloudCompare toolbox [19] to manually select feature points across multiple point cloud models as initial values. Then, we employ the ICP algorithm to register the point cloud models together. Afterwards, we employed a practical approach to volumetric surface reconstruction called Truncated Signed Distance Functions (TSDFs) [57] to achieve the reconstruction from point clouds to meshes with an efficient and sparse data structure called Voxel Data Base (VDB) [39]. At this stage, we can utilize the ray-mesh intersection method [15] to cast rays from cameras onto the mesh model. By intersecting the rays with the mesh, we can determine the depths of the corresponding points on the mesh surface. After a series of joint optimizations between multiple modalities, we have generated a set of GT data. Figure 2 shows some instances. This GT data includes reference images $\mathcal{I}_{c_0}^r$ , the depth maps $D_{c_0}^r$ of the reference images, and the reference maps containing the point cloud models $\mathcal{P}$ , mesh models $M$ , as well as camera pose odom
|
| 132 |
+
|
| 133 |
+
etry $\{\xi \}$ . Figure 4 summarizes the GT generation.
|
| 134 |
+
|
| 135 |
+
# 4. Omnidirectional Visual Localization
|
| 136 |
+
|
| 137 |
+
We extend the current feature-matching-based and absolute pose regression pipelines for omnidirectional visual localization. Given a query image $\mathcal{I}^q$ in any camera model, we seek to estimate its poses within the environment modeled by $360^{\circ}$ images $\mathbf{I}^r$ . To minimize the domain gap between the query image from $c_{1}, c_{2}, c_{3}, c_{4}$ and reference images, we explore visual cameras (VC) in two ways: VC1, remapping query images to 360 domain using $\Psi_{c_n}^{-1}$ ; VC2, rectifying $360^{\circ}$ images into queries' domains using $\Psi_{c_n}$ .
|
| 138 |
+
|
| 139 |
+
# 4.1. Feature-matching-based Localization
|
| 140 |
+
|
| 141 |
+
Most feature-matching-based techniques first perform IR to reduce the search space before estimating the pose.
|
| 142 |
+
|
| 143 |
+
# 4.1.1 Image Retrieval
|
| 144 |
+
|
| 145 |
+
For method VC1, if query $\mathcal{I}^q$ captured from $c_{0}$ , we retrieve the $k$ most similar images from $\mathbf{I}^r$ by calculating and sorting simi $_{\mathrm{cos}}(\mathcal{F}(\mathcal{I}^q), \mathcal{F}(\mathcal{I}^r))$ , $\mathcal{I}^r \in \mathbf{I}^r$ and $\mathcal{F}(\cdot)$ denotes the function to map each image to the global feature domain. simi $_{\mathrm{cos}}(\cdot)$ is cosine similarity for two feature embeddings. If query $\mathcal{I}^q$ captured from $c_{1}, c_{2}, c_{3}, c_{4}$ , we then retrieve top- $k$ reference images based on simi $_{\mathrm{cos}}(\mathcal{F}(\Psi_{c_n}^{-1}(\mathcal{I}^q)), \mathcal{F}(\mathcal{I}^r)), \mathcal{I}^r \in \mathbf{I}^r$ .
|
| 146 |
+
|
| 147 |
+
In method VC2, we expand the global features for each $360^{\circ}$ reference image by cameras $c$ including virtual pin-hole cameras forming a cube map and virtual fisheye cameras. We define the similarity score between $\mathcal{I}^q$ and $\mathcal{I}^r$ as:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\max \left(\operatorname {s i m i} _ {\cos} \left(\mathcal {F} \left(\mathcal {I} ^ {q}\right), \mathcal {G} _ {\mathcal {F}} \left(\mathcal {I} ^ {r}\right)\right), \right. \tag {6}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where global feature group of reference is $\mathcal{G}_{\mathcal{F}}(\mathcal{I}^r) = \{\mathcal{F}(\Psi_c(\mathcal{I}^r)),\ldots \}$ . We use the highest similarity value calculated from $\mathcal{F}(\mathcal{I}^q)$ and $\mathcal{G}_{\mathcal{F}}(\mathcal{I}^r)$ as the similarity score for each $\mathcal{I}^r$ to ensure retrieve $k$ most similar $360^{\circ}$ reference images because some rectified images are from the same $\mathcal{I}^r$ . Note that we can eliminate the domain gap during the image retrieval step in this way.
|
| 154 |
+
|
| 155 |
+
# 4.1.2 Local Feature Matching and Pose Estimation
|
| 156 |
+
|
| 157 |
+
For each pinhole query frame, we retrieve relevant reference images, match their local features, leverage the depth maps $D_{c_0}$ to establish the 2D-3D correspondences, and finally estimate a pose with $\mathrm{PnP + RANSAC}$ . Unlike [11, 61], we directly match query image with retrieved $360^{\circ}$ reference images described in Section 4.1.1. For query images from $c_{0}, c_{1}, c_{2}, c_{3}$ , i.e., fisheye and $360^{\circ}$ query frames, we utilize the function that calculates pose error in sphere camera model in OpenGV [28] library for $\mathrm{PnP + RANSAC}$ .
|
| 158 |
+
|
| 159 |
+
# 4.2. Absolute Pose Regression
|
| 160 |
+
|
| 161 |
+
APRs train deep neural networks to regress the 6DoF camera pose of a query image.
|
| 162 |
+
|
| 163 |
+
PN. PoseNet (PN) is the first APR model. Since there is no open source code [23, 24], we follow the modification in [8, 36] and use ResNet34 [21] as the backbone network.
|
| 164 |
+
|
| 165 |
+
MS-T. MS-Transformer [49] is an APR model incorporating attention and implementing transformers as backbone. We note APR methods using our virtual camera method, VC2, as $\mathbf{APR}^{vc2}$ . The difference between APR and $\mathbf{APR}^{vc2}$ is the training stage. For APR baselines, the training set is $\mathbf{I}^r$ . For $\mathbf{APR}^{vc2}$ , they are trained with $360^\circ$ images, cropped pinhole images, and cropped fisheye images, i.e., $\mathbf{I}^r \cup \Psi_c(\mathbf{I}^r)$ introduced in Section 4.1.1 and Eq. 1.
|
| 166 |
+
|
| 167 |
+
All APR models are implemented in Python using PyTorch [41]. During training, all input images are resized to $256 \times 256$ and then randomly cropped to $224 \times 224$ . For both PN and MS-T, we set an initial learning rate of $\lambda = 10^{-4}$ and a batch size of 32 for 300 epochs of each scene. Training and evaluation in Section 5 are performed on an NVIDIA GeForce GTX 3090 GPU.
|
| 168 |
+
|
| 169 |
+
# 5. Evaluation
|
| 170 |
+
|
| 171 |
+
We provide detailed results for each scene in the dataset and more settings in supplementary material.
|
| 172 |
+
|
| 173 |
+
# 5.1. Image Retrieval
|
| 174 |
+
|
| 175 |
+
We evaluate global descriptors computed by NetVLAD [1], CosPlace [3], OpenIBL [18] and AP-GeM [20]. The query image is deemed correctly localized if at least one of the top $k$ retrieved database images is within $d = 5m$ from the ground truth position of the query for Concourse and $d = 10m$ for the other three scenes. The image retrieval results are shown in Table 4. Among all global feature descriptor methods, the $360^{\circ}$ query exhibits the best precision and recall in most cases, while the pinhole query performs the worst. The remap method (VC1) provides limited improvement for pinhole queries but yields higher improvement for fisheye1, fisheye2, and fisheye3 queries. The reason is that the FoV of pinhole cameras is only $85^{\circ}$ . Consequently, VC1 results in significant black borders when converting to a $360^{\circ}$ image due to the limited coverage.
|
| 176 |
+
|
| 177 |
+
The rectify method (VC2) significantly improves pin-hole, fisheye1, fisheye2, and fisheye3 queries by eliminating the domain gap in IR. However, the pinhole, fisheye1, and fisheye2 queries' recall and precision are still much lower than those of the $360^{\circ}$ query. Only the query from fisheye3 (widest FoV) approaches the performance of $360^{\circ}$ query. The domain gap mainly affects the precision and recall of fisheye3. Both remap (VC1) and crop (VC2) significantly improve IR performance for fisheye3. On the other hand, pinhole queries are more prone to being mistaken as error
|
| 178 |
+
|
| 179 |
+
<table><tr><td rowspan="2">Query</td><td colspan="5">NetVLAD [1]</td><td colspan="5">Cosplace [3]</td><td colspan="5">OpenIBL [18]</td><td colspan="5">AP-GeM [20]</td></tr><tr><td>R@1</td><td>R@5</td><td>P@5</td><td>R@10</td><td>P@10</td><td>R@1</td><td>R@5</td><td>P@5</td><td>R@10</td><td>P@10</td><td>R@1</td><td>R@5</td><td>P@5</td><td>R@10</td><td>P@10</td><td>R@1</td><td>R@5</td><td>P@5</td><td>R@10</td><td>P@10</td></tr><tr><td>pinhole</td><td>0.23</td><td>0.45</td><td>0.22</td><td>0.58</td><td>0.22</td><td>0.15</td><td>0.26</td><td>0.15</td><td>0.33</td><td>0.15</td><td>0.18</td><td>0.36</td><td>0.18</td><td>0.48</td><td>0.18</td><td>0.2</td><td>0.37</td><td>0.2</td><td>0.47</td><td>0.2</td></tr><tr><td>+VC1</td><td>0.24</td><td>0.45</td><td>0.24</td><td>0.57</td><td>0.23</td><td>0.21</td><td>0.33</td><td>0.21</td><td>0.41</td><td>0.21</td><td>0.21</td><td>0.39</td><td>0.21</td><td>0.5</td><td>0.2</td><td>0.25</td><td>0.42</td><td>0.25</td><td>0.53</td><td>0.24</td></tr><tr><td>+VC2</td><td>0.5</td><td>0.67</td><td>0.48</td><td>0.75</td><td>0.47</td><td>0.32</td><td>0.41</td><td>0.32</td><td>0.48</td><td>0.31</td><td>0.51</td><td>0.67</td><td>0.49</td><td>0.75</td><td>0.47</td><td>0.5</td><td>0.68</td><td>0.49</td><td>0.77</td><td>0.47</td></tr><tr><td>fisheye1</td><td>0.42</td><td>0.67</td><td>0.41</td><td>0.77</td><td>0.39</td><td>0.28</td><td>0.43</td><td>0.28</td><td>0.52</td><td>0.28</td><td>0.37</td><td>0.58</td><td>0.36</td><td>0.69</td><td>0.34</td><td>0.35</td><td>0.55</td><td>0.34</td><td>0.66</td><td>0.33</td></tr><tr><td>+VC1</td><td>0.51</td><td>0.72</td><td>0.49</td><td>0.8</td><td>0.47</td><td>0.36</td><td>0.48</td><td>0.35</td><td>0.56</td><td>0.34</td><td>0.52</td><td>0.7</td><td>0.5</td><td>0.79</td><td>0.48</td><td>0.43</td><td>0.62</td><td>0.42</td><td>0.72</td><td>0.4</td></tr><tr><td>+VC2</td><td>0.73</td><td>0.91</td><td>0.63</td><td>0.95</td><td>0.57</td><td>0.63</td><td>0.85</td><td>0.51</td><td>0.92</td><td>0.43</td><td>0.74</td><td>0.91</td><td>0.62</td><td>0.95</td><td>0.54</td><td>0.65</td><td>0.88</td><td>0.57</td><td>0.94</td><td>0.51</td></tr><tr><td>fisheye2</td><td>0.45</td><td>0.7</td><td>0.44</td><td>0.8</td><td>0.42</td><td>0.3</td><td>0.46</td><td>0.31</td><td>0.55</td><td>0.31</td><td>0.41</td><td>0.62</td><td>0.4</td><td>0.73</td><td>0.38</td><td>0.38</td><td>0.59</td><td>0.36</td><td>0.68</td><td>0.35</td></tr><tr><td>+VC1</td><td>0.54</td><td>0.74</td><td>0.52</td><td>0.83</td><td>0.49</td><td>0.37</td><td>0.49</td><td>0.36</td><td>0.57</td><td>0.35</td><td>0.56</td><td>0.73</td><td>0.54</td><td>0.81</td><td>0.51</td><td>0.46</td><td>0.65</td><td>0.45</td><td>0.74</td><td>0.43</td></tr><tr><td>+VC2</td><td>0.74</td><td>0.92</td><td>0.65</td><td>0.95</td><td>0.58</td><td>0.64</td><td>0.87</td><td>0.53</td><td>0.93</td><td>0.45</td><td>0.76</td><td>0.92</td><td>0.65</td><td>0.96</td><td>0.56</td><td>0.67</td><td>0.89</td><td>0.58</td><td>0.94</td><td>0.52</td></tr><tr><td>fisheye3</td><td>0.57</td><td>0.79</td><td>0.55</td><td>0.86</td><td>0.52</td><td>0.4</td><td>0.56</td><td>0.4</td><td>0.65</td><td>0.4</td><td>0.53</td><td>0.74</td><td>0.51</td><td>0.83</td><td>0.49</td><td>0.45</td><td>0.66</td><td>0.43</td><td>0.75</td><td>0.41</td></tr><tr><td>+VC1</td><td>0.63</td><td>0.81</td><td>0.61</td><td>0.88</td><td>0.58</td><td>0.48</td><td>0.61</td><td>0.48</td><td>0.68</td><td>0.47</td><td>0.67</td><td>0.82</td><td>0.65</td><td>0.88</td><td>0.61</td><td>0.55</td><td>0.73</td><td>0.53</td><td>0.81</td><td>0.51</td></tr><tr><td>+VC2</td><td>0.77</td><td>0.93</td><td>0.68</td><td>0.96</td><td>0.61</td><td>0.69</td><td>0.89</td><td>0.58</td><td>0.94</td><td>0.5</td><td>0.79</td><td>0.93</td><td>0.68</td><td>0.96</td><td>0.6</td><td>0.67</td><td>0.9</td><td>0.59</td><td>0.94</td><td>0.54</td></tr><tr><td>360</td><td>0.79</td><td>0.86</td><td>0.77</td><td>0.88</td><td>0.73</td><td>0.92</td><td>0.95</td><td>0.91</td><td>0.96</td><td>0.89</td><td>0.89</td><td>0.94</td><td>0.88</td><td>0.95</td><td>0.83</td><td>0.79</td><td>0.9</td><td>0.77</td><td>0.94</td><td>0.72</td></tr></table>
|
| 180 |
+
|
| 181 |
+
Table 4. Image retrieval results based on $360^{\circ}$ reference database average over four scenes, the recall, and precision for the top $k$ retrieved images, $k = 1,5,10$ . $\#$ indicates the highest value of R@k and P@k for each device w and w/o virtual cameras (VC1, VC2). Best results for all devices of R@k and P@k are in bold with $\#$ .
|
| 182 |
+
|
| 183 |
+
<table><tr><td rowspan="3"></td><td colspan="6">NetVLAD [1]</td><td colspan="6">CosPlace [3]</td></tr><tr><td colspan="2">DISK + LG</td><td colspan="2">SP + LG</td><td colspan="2">SP + SG</td><td colspan="2">DISK + LG</td><td colspan="2">SP + LG</td><td colspan="2">SP + SG</td></tr><tr><td>Day</td><td>Night</td><td>Day</td><td>Night</td><td>Day</td><td>Night</td><td>Day</td><td>Night</td><td>Day</td><td>Night</td><td>Day</td><td>Night</td></tr><tr><td>pinhole</td><td>6.0/11.3/24.6</td><td>1.7/4.4/10.3</td><td>8.0/14.9/30.9</td><td>2.2/5.5/13.5</td><td>8.4/15.2/30.7</td><td>2.3/5.6/12.3</td><td>4.2/7.8/18.0</td><td>1.6/3.5/8.6</td><td>4.8/10.2/22.1</td><td>1.9/4.7/11.1</td><td>5.4/10.4/21.1</td><td>2.1/4.7/10.4</td></tr><tr><td>+VC1</td><td>8.5/14.0/23.5</td><td>2.2/4.1/7.9</td><td>10.4/17.0/27.5</td><td>2.9/5.3/10.1</td><td>10.9/17.8/28.5</td><td>2.8/5.6/9.9</td><td>6.1/10.8/21.1</td><td>1.7/3.6/8.2</td><td>7.5/13.2/22.5</td><td>2.0/4.5/9.6</td><td>7.6/13.5/22.8</td><td>2.1/4.7/9.6</td></tr><tr><td>+VC2</td><td>14.2/22.2/35.5</td><td>4.1/7.8/13.6</td><td>19.8 / 29.7/42.9</td><td>6.1/10.4/16.9</td><td>21.6/33.2 / 49.7</td><td>5.9 / 11.0 / 18.4</td><td>8.0/13.1/23.5</td><td>2.5/4.6/9.1</td><td>10.7/16.4/26.6</td><td>3.0/5.7/11.4</td><td>11.6/18.5/30.5</td><td>3.5/6.8/12.8</td></tr><tr><td>fisheye1</td><td>1.6/4.4/17.7</td><td>0.5/1.8/7.4</td><td>1.9/5.4/20.1</td><td>0.7/2.3/10.5</td><td>1.6/4.7/18.4</td><td>0.5/1.9/8.2</td><td>0.8/2.5/11.8</td><td>0.4/1.4/5.8</td><td>1.0/3.5/13.0</td><td>0.5/1.4/8.2</td><td>0.9/3.4/12.1</td><td>0.3/1.4/7.0</td></tr><tr><td>+VC1</td><td>3.3/9.2/27.6</td><td>0.8/2.7/9.6</td><td>4.1/10.6/32.2</td><td>1.4/4.4/14.9</td><td>3.0/9.5/29.6</td><td>0.9/3.1/11.7</td><td>2.3/5.5/19.4</td><td>0.5/1.6/7.3</td><td>2.1/6.1/19.9</td><td>0.7/2.2/9.0</td><td>1.9/5.5/19.1</td><td>0.5/1.9/7.3</td></tr><tr><td>+VC2</td><td>3.9/10.5/33.0</td><td>1.0/4.0/14.6</td><td>4.3/12.4/38.2</td><td>1.9/6.4/21.8</td><td>3.6/11.0/34.5</td><td>1.1/5.3/19.4</td><td>2.5/6.9/25.3</td><td>0.8/2.8/12.2</td><td>2.8/8.2/29.0</td><td>1.3/4.6/18.0</td><td>2.1/7.1/26.7</td><td>1.0/4.0/16.2</td></tr><tr><td>fisheye2</td><td>1.6/4.9/20.9</td><td>0.5/2.0/8.7</td><td>1.9/6.7/23.2</td><td>0.8/3.0/11.8</td><td>1.7/5.2/19.5</td><td>0.7/2.5/9.9</td><td>1.3/3.5/14.2</td><td>0.4/1.6/6.9</td><td>1.2/3.8/15.2</td><td>0.5/1.5/9.1</td><td>1.2/3.9/12.9</td><td>0.6/1.6/7.2</td></tr><tr><td>+VC1</td><td>4.3 / 10.8/30.9</td><td>0.8/3.0/11.2</td><td>4.7/12.4/34.1</td><td>1.8/5.4/15.8</td><td>4.1 / 10.6/31.5</td><td>1.1/3.6/13.7</td><td>2.5/6.5/20.6</td><td>0.5/1.77.4</td><td>2.5/7.0/22.1</td><td>0.8/2.4/9.4</td><td>2.2/6.8/20.2</td><td>0.5/2.1/8.0</td></tr><tr><td>+VC2</td><td>4.3/11.0/34.4</td><td>1.1/4.7/17.3</td><td>5.1/14.0/41.1</td><td>2.0/7.2/24.8</td><td>3.7 / 11.5/36.8</td><td>1.5/5.9/21.2</td><td>2.8/7.3/27.1</td><td>0.8/2.9/13.4</td><td>2.9/8.9/32.0</td><td>1.6/5.3/20.1</td><td>2.5/8.0/27.9</td><td>1.1/4.2/17.7</td></tr><tr><td>fisheye3</td><td>3.8/9.5/29.8</td><td>1.0/3.6/13.8</td><td>4.0/10.5/31.6</td><td>1.3/4.6/16.4</td><td>3.4/9.1/28.4</td><td>0.8/3.8/13.8</td><td>2.5/6.3/21.9</td><td>0.6/2.4/10.1</td><td>2.8/7.2/22.3</td><td>0.9/2.9/12.4</td><td>2.0/5.9/20.0</td><td>1.3/4.2/15.0</td></tr><tr><td>+VC1</td><td>5.9/14.7 / 39.5</td><td>1.5/5.2/17.7</td><td>6.0 / 16.2/43.5</td><td>2.0/6.8/21.9</td><td>5.8/14.7 / 39.1</td><td>1.8/5.5/18.3</td><td>4.4/10.2 / 30.1</td><td>1.1/3.3/12.8</td><td>4.6/11.6/32.0</td><td>1.4/4.1/14.4</td><td>4.3/10.5 / 29.7</td><td>1.2/3.8/12.3</td></tr><tr><td>+VC2</td><td>5.2/13.9 / 41.8</td><td>2.1/6.5/22.5</td><td>5.9 / 16.5/46.3</td><td>2.5/8.6/29.1</td><td>5.4/14.2 / 40.5</td><td>2.1/7.3/25.9</td><td>4.3/9.8 / 34.6</td><td>1.7/5.2/19.5</td><td>4.7/12.6 / 36.8</td><td>2.2/7.1/23.8</td><td>3.8 / 10.5 / 32.5</td><td>1.6/5.1/20.7</td></tr><tr><td>360</td><td>17.1 / 30.8 / 66.1</td><td>8.5 / 20.1 / 47.5</td><td>18.2 / 34.6 / 64.2</td><td>7.0 / 18.7 / 45.3</td><td>15.8 / 31.2 /</td><td>60.4 / 7.0 / 17.8 / 42.8</td><td>17.6 / 31.8 / 68.1</td><td>8.7 / 22.0 / 56.0</td><td>18.7 / 34.9 / 68.1</td><td>7.3 / 20.0 / 53.4</td><td>16.6 / 32.6 / 65.7</td><td>7.1 / 18.7 / 50.4</td></tr></table>
|
| 184 |
+
|
| 185 |
+
Table 5. Local matching localization results. The average percentage of predictions with high (0.25m, $2^{\circ}$ ), medium (0.5m, $5^{\circ}$ ), and low (5m, $10^{\circ}$ ) accuracy [46] (higher is better) over four scenes. # indicates the highest value for each device w and w/o virtual cameras (VC1, VC2) of each accuracy level. The best results for all devices of each accuracy level are in bold with # .
|
| 186 |
+
|
| 187 |
+
neous locations with similar structures due to their narrower FoV even there is no cross-device domain gap during IR by applying VC2 (Some figures in supplementary material).
|
| 188 |
+
|
| 189 |
+
# 5.2. Visual Localization
|
| 190 |
+
|
| 191 |
+
We compare our approach with the following baselines in two categories: 1) Local feature matching pipelines tailored from HLoc [42], using different keypoint descriptors (Superpoint (SP) [16] and DISK [55]), and matchers (SuperGlue (SG) [43], follow-up SOTA LightGlue (LG) [31]). 2) The end-to-end APRs: PN [23, 24] and MS-T [49].
|
| 192 |
+
|
| 193 |
+
Local feature matching: During local feature matching, all $360^{\circ}$ images are cropped to $1228 \times 614$ because of the tradeoff of time and computation. We report the average results over four scenes in Table 5. The $360^{\circ}$ query achieves the best performance in three accuracy levels in most cases
|
| 194 |
+
|
| 195 |
+
across all IR, keypoint descriptors, and matchers settings. It is especially more robust in challenging nighttime conditions. VC1 and VC2 techniques improve the recall and precision of IR, increasing the accuracy of 2D-2D matching for all cameras. In most cases, the performance at the low accuracy level $(5m, 10^{\circ})$ is correlated with the FoV, where a larger FoV results in higher performance. However, the pin-hole query with VC2 during IR performs comparably to the $360^{\circ}$ queries at the high $(0.25m, 2^{\circ})$ and median $(0.5m, 5^{\circ})$ accuracy levels. In contrast, query frames from $c_{1}, c_{2}$ and $c_{3}$ demonstrate relatively lower performance at the high and medium accuracy levels.
|
| 196 |
+
|
| 197 |
+
As observed in Table 4, different IR methods display different performances depending on the type of camera. We thus consider both NetVLAD and CosPlace in visual localization. In most cases, $360^{\circ}$ query frames achieve higher
|
| 198 |
+
|
| 199 |
+
accuracy with CosPlace while pinhole and fisheye query frames have lower accuracy than NetVLAD as shown in Table 5. These results match the precision and recall difference noted in Table 4. We believe that the FoV not only affects the robustness of IR but also has an impact on local 2D-2D matching performance. Pinhole queries suffer from erroneous matches due to interference from symmetrical and repetitive structures, while the larger FoV of fisheye and $360^{\circ}$ query frames capture more unique visual features. We provide examples in the supplementary material.
|
| 200 |
+
|
| 201 |
+
APR: APRs cannot extrapolate well beyond the training set [40, 47]. cross-device queries further complicate this challenge by introducing an additional dimension of FoV. Due to the high efficiency of $360^{\circ}$ mapping, the training set $\mathbf{I}^r$ in 360Loc contains only around one-third of the images compared to datasets [24]. Figure 5 shows that when PN and MS-T are trained solely on $\mathbf{I}^r$ with only $360^{\circ}$ images, a smaller domain gap between the query and the $360^{\circ}$ image yields a lower error. However, when we introduce images from virtual cameras for data augmentation, $\mathrm{PN}^{vc2}$ and MS- $\mathrm{T}^{vc2}$ exhibit significantly reduced translation and rotation errors across all queries, particularly during daytime. MS- $\mathrm{T}^{vc2}$ reduces translation error by up to $79\%$ and rotation error by up to $72\%$ compared to MS-T. $\mathrm{PN}^{vc2}$ displays similar improvement over PN. In most cases, except for $\mathrm{PN}^{vc2}$ , s rotation error for the $360^{\circ}$ queries during daytime, both the $360^{\circ}$ and fisheye queries exhibit higher accuracy than the pinhole query on $\mathrm{PN}^{vc2}$ and MS- $\mathrm{T}^{vc2}$ . This suggests that a larger FoV still helps improve visual localization accuracy in challenging scenes. Another interesting finding is that even though the augmented training set $\mathbf{I}^r \cup \Psi_c(\mathbf{I}^r)$ , which includes virtual camera images, does not increase the number of $360^{\circ}$ images, the error for the $360^{\circ}$ query still decreases. This reduction is particularly noticeable in the case of translation errors during daytime. The result fully demonstrates the utility of employing virtual cameras for data augmentation.
|
| 202 |
+
|
| 203 |
+
# 5.3. Analysis
|
| 204 |
+
|
| 205 |
+
Cross-device visual positioning presents significant challenges for IR, local matching, and APRs. Our VC1 and VC2 methods demonstrate practical enhancements in the performance of IR and APR for cross-device scenarios. However, it is essential to note that during the local matching process, the accuracy of matches and the recall and precision of IR for query frames from different cameras may not align perfectly. The chosen IR method and its training noticeably affect accuracy for similar cameras. Fisheye cameras exhibit better performance in IR compared to pinhole cameras. However, pinhole cameras outperform fisheye cameras for high accuracy and median accuracy levels in local matching. This is likely due to existing feature extraction and matching models lacking training data on $360^{\circ}$ and fisheye
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
(a) Trans. (day)
|
| 213 |
+
(c) Trans. (night)
|
| 214 |
+
Figure 5. The average of median translation/rotation errors in $(m / ^{\circ})$ over 4 scenes.
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
(b) Rot. (day)
|
| 218 |
+
(d) Rot. (night)
|
| 219 |
+
|
| 220 |
+
cameras, resulting in less accurate matching. We attribute the inferior performance of pinhole query frames at the low accuracy level to IR's insufficient recall and precision. Additionally, pinhole queries are more susceptible to interference when there are many repetitive and symmetrical features in the scene, even when the retrieved reference image is correct (some example figures in the supplementary material). By utilizing VC2 to augment IR and APR's training data, we eliminate the cross-device domain gap. We demonstrate that panoramic perspective and a larger FoV can significantly improve the performance of IR and APRs and find that query frames from $360^{\circ}$ camera and ultra-wide FoV cameras are less prone to being misidentified as erroneous locations with similar structures. This result suggests the promising potential of fisheye and $360^{\circ}$ cameras as viable sensors for localization tasks in indoor environments with low GPS accuracy.
|
| 221 |
+
|
| 222 |
+
# 6. Conclusion
|
| 223 |
+
|
| 224 |
+
360Loc is the first dataset and benchmark that explores the challenge of cross-device visual positioning, involving $360^{\circ}$ reference frames, and query frames from pinhole, ultra-wide FoV fisheye, and $360^{\circ}$ cameras. We first identified the absence of datasets with ground truth 6DoF poses for $360^{\circ}$ images, and the limited research on cross-device localization and the robustness of different cameras in ambiguous scenes. To address these limitations, we build a dataset with $360^{\circ}$ images as reference and query frames from pinhole, ultra-wide FoV fisheye camera and $360^{\circ}$ cameras via a virtual camera solution. This method enables fair comparisons in cross-device visual localization tasks and helps reduce the domain gap between different cameras. By evaluating feature-matching-based and pose regression-based methods, we demonstrate the effectiveness of our virtual camera approach and the increased robustness of $360^{\circ}$ cameras in visual localization for challenging and ambiguous scenes.
|
| 225 |
+
|
| 226 |
+
# References
|
| 227 |
+
|
| 228 |
+
[1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. Netvlad: Cnn architecture for weakly supervised place recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5297-5307, 2016. 2, 6, 7
|
| 229 |
+
[2] Iro Armeni, Sasha Sax, Amir R Zamir, and Silvio Savarese. Joint 2d-3d-semantic data for indoor scene understanding. arXiv preprint arXiv:1702.01105, 2017. 3
|
| 230 |
+
[3] Gabriele Berton, Carlo Masone, and Barbara Caputo. Rethinking visual geo-localization for large-scale applications. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4878-4888, 2022. 2, 6, 7
|
| 231 |
+
[4] Hunter Blanton, Connor Greenwell, Scott Workman, and Nathan Jacobs. Extending absolute pose regression to multiple scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2020. 3
|
| 232 |
+
[5] Eric Brachmann and Carsten Rother. Learning less is more - 6D camera localization via 3D surface regression. In CVPR, 2018. 1, 2
|
| 233 |
+
[6] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021.
|
| 234 |
+
[7] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-Differentiable RANSAC for camera localization. In CVPR, 2017. 1, 2
|
| 235 |
+
[8] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In IEEE conference on computer vision and pattern recognition, 2018. 3, 6
|
| 236 |
+
[9] Mai Bui, Tolga Birdal, Haowen Deng, Shadi Albarqouni, Leonidas Guibas, Slobodan Ilic, and Nassir Navab. 6d camera relocalization in ambiguous scenes via continuous multi-modal inference. 2020. 3, 4
|
| 237 |
+
[10] Nicholas Carlevaris-Bianco, Arash K Ushani, and Ryan M Eustice. University of michigan north campus long-term vision and lidar dataset. The International Journal of Robotics Research, 35(9):1023-1035, 2016. 3, 4
|
| 238 |
+
[11] David M Chen, Georges Baatz, Kevin Koser, Sam S Tsai, Ramakrishna Vedantham, Timo Pylvanäinen, Kimmo Roimela, Xin Chen, Jeff Bach, Marc Pollefeys, et al. City-scale landmark identification on mobile devices. In CVPR 2011, pages 737-744. IEEE, 2011. 3, 4, 6
|
| 239 |
+
[12] Shuai Chen, Zirui Wang, and Victor Prisacariu. Directposenet: absolute pose regression with photometric consistency. In 2021 International Conference on 3D Vision (3DV), pages 1175-1185. IEEE, 2021. 3
|
| 240 |
+
[13] Shuai Chen, Xinghui Li, Zirui Wang, and Victor A Prisacariu. Dfnet: Enhance absolute pose regression with direct feature matching. In ECCV 2022. Tel Aviv, Israel, October 23-27, 2022, Part X. Springer, 2022. 3
|
| 241 |
+
[14] Santiago Cortés, Arno Solin, Esa Rahtu, and Juho Kannala. Advio: An authentic dataset for visual-inertial odometry. In
|
| 242 |
+
|
| 243 |
+
Proceedings of the European Conference on Computer Vision (ECCV), pages 419-434, 2018. 3, 4
|
| 244 |
+
[15] Dawson-Haggerty et al. trimesh. 5
|
| 245 |
+
[16] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 2, 7
|
| 246 |
+
[17] Mihai Dusmanu, Ignacio Rocco, Tomas Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 8092-8101, 2019. 1
|
| 247 |
+
[18] Yixiao Ge, Haibo Wang, Feng Zhu, Rui Zhao, and Hongsheng Li. Self-supervising fine-grained region similarities for large-scale image localization. In European Conference on Computer Vision, 2020. 2, 6, 7
|
| 248 |
+
[19] Daniel Girardeau-Montaut. Cloudcompare. France: EDF R&D Telecom ParisTech, 11, 2016. 5
|
| 249 |
+
[20] A. Gordo, J. Almazan, J. Revaud, and D. Larlus. End-to-end learning of deep visual representations for image retrieval. *IJCV*, 2017. 2, 6, 7
|
| 250 |
+
[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6
|
| 251 |
+
[22] Huajian Huang, Yinzhe Xu, Yingshu Chen, and Sai-Kit Yeung. 360vot: A new benchmark dataset for omnidirectional visual object tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 20566–20576, 2023. 1
|
| 252 |
+
[23] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. In IEEE conference on computer vision and pattern recognition, pages 5974-5983, 2017. 1, 3, 6, 7
|
| 253 |
+
[24] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera relocalization. In Proceedings of the IEEE international conference on computer vision, pages 2938-2946, 2015. 1, 3, 4, 6, 7, 8
|
| 254 |
+
[25] Junho Kim, Changwoon Choi, Hojun Jang, and Young Min Kim. Piccolo: point cloud-centric omnidirectional localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3313-3323, 2021. 2, 3
|
| 255 |
+
[26] Junho Kim, Hojun Jang, Changwoon Choi, and Young Min Kim. Cpo: Change robust panorama to point cloud localization. In European Conference on Computer Vision, pages 176-192. Springer, 2022.
|
| 256 |
+
[27] Junho Kim, Eun Sun Lee, and Young Min Kim. Calibrating panoramic depth estimation for practical localization and mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8830-8840, 2023. 2
|
| 257 |
+
[28] Laurent Kneip and Paul Furgale. Opengv: A unified and generalized approach to real-time calibrated geometric vision. In 2014 IEEE international conference on robotics and automation (ICRA), pages 1-8. IEEE, 2014. 6
|
| 258 |
+
|
| 259 |
+
[29] Kenji Koide, Shuji Oishi, Masashi Yokozuka, and Atsuhiko Banno. General, single-shot, target-less, and automatic lidar-camera extrinsic calibration toolbox. arXiv preprint arXiv:2302.05094, 2023. 4, 5
|
| 260 |
+
[30] Donghwan Lee, Soohyun Ryu, Suyong Yeon, Yonghan Lee, Deokhwa Kim, Cheolho Han, Yohann Cabon, Philippe Weinzaepfel, Nicolas Guérin, Gabriela Csurka, et al. Large-scale localization datasets in crowded indoor spaces. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3227-3236, 2021. 3, 4
|
| 261 |
+
[31] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV, 2023. 7
|
| 262 |
+
[32] Haomin Liu, Linsheng Zhao, Zhen Peng, Weijian Xie, Mingxuan Jiang, Hongbin Zha, Hujun Bao, and Guofeng Zhang. A low-cost and scalable framework to build large-scale localization benchmark for augmented reality. IEEE Transactions on Circuits and Systems for Video Technology, 2023. 3
|
| 263 |
+
[33] Liu Liu, Hongdong Li, and Yuchao Dai. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In Proceedings of the IEEE International Conference on Computer Vision, pages 2372-2381, 2017. 1
|
| 264 |
+
[34] Zheng Liu and Fu Zhang. Balm: Bundle adjustment for lidar mapping. IEEE Robotics and Automation Letters, 6(2): 3184-3191, 2021. 5
|
| 265 |
+
[35] David G Lowe. Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60:91-110, 2004. 2
|
| 266 |
+
[36] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Image-based localization using hourglass networks. In IEEE international conference on computer vision workshops, 2017. 3, 6
|
| 267 |
+
[37] Arthur Moreau, Nathan Piasco, Dzmitry Tsishkou, Bogdan Stanciulescu, and Arnaud de La Fortelle. Coordinet: uncertainty-aware pose regressor for reliable vehicle localization. In IEEE/CVF Winter Conference on Applications of Computer Vision, 2022. 3
|
| 268 |
+
[38] Jeffri Murragarra-Llerena, Thiago LT Da Silveira, and Claudio R Jung. Pose estimation for two-view panoramas based on keypoint matching: A comparative study and critical analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5202-5211, 2022. 3
|
| 269 |
+
[39] Ken Museth. Vdb: High-resolution sparse volumes with dynamic topology. ACM transactions on graphics (TOG), 32 (3):1-22, 2013. 5
|
| 270 |
+
[40] Tony Ng, Adrian Lopez-Rodriguez, Vassileios Balntas, and Krystian Mikolajczyk. Reassessing the limitations of cnn methods for camera pose regression. arXiv preprint arXiv:2108.07260, 2021. 8
|
| 271 |
+
[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6
|
| 272 |
+
|
| 273 |
+
[42] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12716–12725, 2019. 1, 2, 7
|
| 274 |
+
[43] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4938–4947, 2020. 2, 4, 7
|
| 275 |
+
[44] Paul-Edouard Sarlin, Mihai Dusmanu, Johannes L Schonberger, Pablo Speciale, Lukas Gruber, Viktor Larsson, Ondrej Miksik, and Marc Pollefeys. Lamar: Benchmarking localization and mapping for augmented reality. In European Conference on Computer Vision, pages 686-704. Springer, 2022. 3, 4
|
| 276 |
+
[45] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & effective prioritized matching for large-scale image-based localization. IEEE transactions on pattern analysis and machine intelligence, 39(9):1744-1756, 2016. 1, 2, 3
|
| 277 |
+
[46] Torsten Sattler, Will Maddern, Carl Toft, Akihiko Torii, Lars Hammarstrand, Erik Stenborg, Daniel Safari, Masatoshi Okutomi, Marc Pollefeys, Josef Sivic, et al. Benchmarking 6dof outdoor visual localization in changing conditions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8601-8610, 2018. 3, 4, 7
|
| 278 |
+
[47] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In IEEE/CVF conference on computer vision and pattern recognition, 2019. 3, 8
|
| 279 |
+
[48] Thomas Schops, Johannes L Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3260-3269, 2017. 3, 4
|
| 280 |
+
[49] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pages 2733-2742, 2021. 1, 3, 6, 7
|
| 281 |
+
[50] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 3, 4
|
| 282 |
+
[51] Alexander D Stewart. Localisation using the appearance of prior structure. PhD thesis, University of Oxford, 2014. 5
|
| 283 |
+
[52] Jiaming Sun, Zehong Shen, Yuang Wang, Hujun Bao, and Xiaowei Zhou. Loftr: Detector-free local feature matching with transformers. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8922-8931, 2021. 2
|
| 284 |
+
[53] Xun Sun, Yuanfan Xie, Pei Luo, and Liang Wang. A dataset for benchmarking image-based localization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7436-7444, 2017. 3, 4
|
| 285 |
+
[54] Hajime Taira, Masatoshi Okutomi, Torsten Sattler, Mircea Cimpoi, Marc Pollefeys, Josef Sivic, Tomas Pajdla, and Ak
|
| 286 |
+
|
| 287 |
+
ihiko Torii. Inloc: Indoor visual localization with dense matching and view synthesis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7199-7209, 2018. 1, 3, 4
|
| 288 |
+
[55] Michal Tyszkiiewicz, Pascal Fua, and Eduard Trulls. Disk: Learning local features with policy gradient. Advances in Neural Information Processing Systems, 33:14254-14265, 2020. 2, 7
|
| 289 |
+
[56] Vladyslav Usenko, Nikolaus Demmel, and Daniel Cremers. The double sphere camera model. In 2018 International Conference on 3D Vision (3DV), pages 552-560. IEEE, 2018. 4
|
| 290 |
+
[57] Ignacio Vizzo, Tiziano Guadagnino, Jens Behley, and Cyril Stachniss. Vdbfusion: Flexible and efficient tsdf integration of range sensor data. Sensors, 22(3):1296, 2022. 5
|
| 291 |
+
[58] Johanna Wald, Torsten Sattler, Stuart Golodetz, Tommaso Cavallari, and Federico Tombari. Beyond controlled environments: 3d camera re-localization in changing indoor scenes. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VII 16, pages 467-487. Springer, 2020. 3, 4
|
| 292 |
+
[59] Jian Wu, Liwei Ma, and Xiaolin Hu. Delving deeper into convolutional neural networks for camera relocalization. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 5644-5651. IEEE, 2017. 3
|
| 293 |
+
[60] Hang Xu, Qiang Zhao, Yike Ma, Xiaodong Li, Peng Yuan, Bailan Feng, Chenggang Yan, and Feng Dai. Pandora: A panoramic detection dataset for object with orientation. In ECCV, 2022. 1
|
| 294 |
+
[61] Shen Yan, Yu Liu, Long Wang, Zehong Shen, Zhen Peng, Haomin Liu, Maojun Zhang, Guofeng Zhang, and Xiaowei Zhou. Long-term visual localization with mobile sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17245-17255, 2023. 1, 3, 4, 6
|
| 295 |
+
[62] Dawen Yu and Shunping Ji. Grid based spherical cnn for object detection from panoramic images. Sensors, 19(11): 2622, 2019. 1
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80ab4724618d260e9dde29119cf59483149a9e7fd54a15f47ea8255f528f095f
|
| 3 |
+
size 728977
|
360locadatasetandbenchmarkforomnidirectionalvisuallocalizationwithcrossdevicequeries/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2723ea8614f00dc81665ffff6d86fed95808b8569140a1bf2a28f7dde83af303
|
| 3 |
+
size 500104
|
360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:642474a5435baea0ea08225f646d980702a443533ec68cfba117664c28bfd53e
|
| 3 |
+
size 74091
|
360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6ec79eb59ec5ddc0102039aee7ebf5bb26e29165440ed67f74dd3957254d1a8
|
| 3 |
+
size 88340
|
360xapanopticmultimodalsceneunderstandingdataset/1b673a67-1eed-49d6-b7e1-f0b4a9d871e2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43351f3e0b4a971218148f732f3953cef594fe528c6ee4da25b9b730a699e7a0
|
| 3 |
+
size 1039950
|
360xapanopticmultimodalsceneunderstandingdataset/full.md
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $360 + x$ : A Panoptic Multi-modal Scene Understanding Dataset
|
| 2 |
+
|
| 3 |
+
Hao Chen Yuqi Hou Chenyuan Qu Irene Testini Xiaohan Hong Jianbo Jiao
|
| 4 |
+
|
| 5 |
+
The Machine Intelligence $+x$ Group, University of Birmingham, UK
|
| 6 |
+
|
| 7 |
+
Project page: https://x360dataset.github.io/
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Human perception of the world is shaped by a multitude of viewpoints and modalities. While many existing datasets focus on scene understanding from a certain perspective (e.g. egocentric or third-person views), our dataset offers a panoptic perspective (i.e. multiple viewpoints with multiple data modalities). Specifically, we encapsulate third-person panoramic and front views, as well as egocentric monocular/binocular views with rich modalities including video, multi-channel audio, directional binaural delay, location data and textual scene descriptions within each scene captured, presenting comprehensive observation of the world. To the best of our knowledge, this is the first database that covers multiple viewpoints with multiple data modalities to mimic how daily information is accessed in the real world. Through our benchmark analysis, we presented 5 different scene understanding tasks on the proposed $360 + x$ dataset to evaluate the impact and benefit of each data modality and perspective in panoptic scene understanding. We hope this unique dataset could broaden the scope of comprehensive scene understanding and encourage the community to approach these problems from more diverse perspectives.
|
| 12 |
+
|
| 13 |
+
# 1. Introduction
|
| 14 |
+
|
| 15 |
+
Scene understanding is crucial for robotics and artificial intelligent systems to perceive the environment around them. As humans, we intuitively understand the world through primarily visual inputs, as well as auditory and other sensory inputs (e.g. touch and smell). The community has made remarkable progress in mimicking human perception with contributions from various datasets and benchmarks [4, 5, 7, 9, 13, 15, 23]. These efforts have approached scene understanding from a diverse range of perspectives, such as normal frontal-view vision [5, 13, 23], panoramic view [22, 28], binocular/stereo view [20, 30], egocentric monocular view [4, 9], and audio [2, 7].
|
| 16 |
+
|
| 17 |
+
While there has been exciting progress in understanding scenes from a limited number of perspectives, it is notable
|
| 18 |
+
|
| 19 |
+
that humans understand the world by incorporating a combination of viewpoints, in a holistic manner. This includes an egocentric view for activities we are involved in and a third-person view for activities we are observing. In addition to visual cues, we also rely on a range of modalities, including hearing and binaural delay, to fully comprehend our surroundings and track movements. Our prior knowledge of the scene, such as localisation information and scene descriptions, has also supported our understanding of the environment (e.g. the cafe in the city centre may be different from a similar cafe on a university campus).
|
| 20 |
+
|
| 21 |
+
Taking the above observations into consideration, a new dataset covering all these aforementioned aspects is presented in this work, to provide a panoptic scene understanding, termed $360 + x$ dataset. This new dataset offers a diverse selection of perspectives, including a $360^{\circ}$ panoramic view providing a complete panoptic view of the environment, and a third-person front view that highlights the region of interest that has the most movements in front of the camera. Additionally, we have included egocentric monocular and binocular videos to capture the first-person perspective of individuals in the environment. These viewpoints are complemented by aligned multi-channel audio with directional binaural delay information, as well as location information and scene descriptions as metadata. An illustration of the presented dataset collection system is shown in Figure 1.
|
| 22 |
+
|
| 23 |
+
Based on this newly collected dataset, we perform 5 visual-audio scene understanding tasks to analyse the contribution and effectiveness of each data viewpoint and modality. Particularly, we look at video classification, temporal action localisation, self-supervised representation learning, cross-modality retrieval and pre-training model migration for dataset adaptation, with interesting findings and insights from extensive experimental analysis. The main contributions of this work are summarised as follows:
|
| 24 |
+
|
| 25 |
+
- We propose to our knowledge the first and probably the most authentic panoptic scene understanding dataset covering multiple viewpoints and data modalities in the wild.
|
| 26 |
+
- We perform extensive experimental analysis to validate the effectiveness of the proposed dataset on different tasks
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 1. Illustration of the proposed $360 + x$ dataset. The $360^{\circ}$ camera records fish-eye raw videos with front and back lenses. These videos are merged to create a spherical $360^{\circ}$ panorama (middle-up figure, zoom in for details), which is then transformed to (a) $360^{\circ}$ panoramic data using equirectangular projection. The (b) third-person front view is obtained by de-warping the rich movements region highlighted red in the spherical field of $360^{\circ}$ panorama (the middle-left figure). By wearing stereo cameras, the capturers record (c) egocentric clips while staying visible to the fixed $360^{\circ}$ camera (central ellipse). (e) Directional audio time delay data is generated from left and right audio inputs (d) from the $360^{\circ}$ camera by interaural time delay process [3]. This helps locate sound sources in the $360^{\circ}$ panorama.
|
| 30 |
+
|
| 31 |
+
from various perspectives and modalities.
|
| 32 |
+
|
| 33 |
+
- Interesting findings are derived from the analysis, suggesting the effectiveness of each viewpoint and data modality. Learning from this new dataset without supervision even shows a better performance than that from a model trained in a supervised manner.
|
| 34 |
+
|
| 35 |
+
# 2. Related Works
|
| 36 |
+
|
| 37 |
+
Video understanding and analysis. Video analysis has been extensively studied in the literature. Existing datasets such as UCF101 [23], ActivityNet [5] and Kinetics [13] have provided large-scale video data for activity understanding tasks. However, these datasets often exhibit lower complexity compared to real-world scenes. Some datasets, like MultiThumos [31], aim to increase complexity but are limited to specific scenarios with domain-specific actions, deviating from real-life daily activities. In contrast, our dataset builds upon the activity labels from ActivityNet [5] and strives to capture data that closely simulates real-life scenarios. Apart from that, we also include multiple data viewpoints and modalities as compared to existing datasets.
|
| 38 |
+
|
| 39 |
+
Panoramic scene understanding. In recent years, panoramic scene understanding has gained significant attention due to its holistic reflection of the environment. Several datasets have been introduced to facilitate research in this area. For instance, the KITTI-360 [16] provides a collection of panoramic images for urban scene analysis. EGOK360 [1] has been introduced to address the need for video data with a panoramic view. Im2Pano3D [22] presents a panoramic dataset for indoor scenarios with semantic segmentation and focuses on the prediction from a partial observation. However, these datasets primarily focus on panoramic visual data while lacking the incorporation of other viewpoints (e.g. egocentric) and data modalities (e.g. audio), limiting their potential for comprehensive scene understanding and analysis.
|
| 40 |
+
|
| 41 |
+
Egocentric video analysis. Focusing on understanding scenes from a first-person perspective, existing datasets such as EPIC-Kitchens [4] and Ego4D [9] provide egocentric video data collected during daily activities. They have contributed to research on activity recognition and object detection in egocentric scenes. Unlike these datasets fo
|
| 42 |
+
|
| 43 |
+
cusing on egocentric views, our dataset also covers other viewpoints and modalities aiming at supporting scene understanding research in a more panoptic manner.
|
| 44 |
+
|
| 45 |
+
Visual-audio analysis. Integrating visual and audio information often enhances the performance of models in scene understanding tasks, as it provides richer contextual information. There are some existing datasets available to support research in audio-visual analysis, e.g. AVA [10], AudioSet [6] and VGGSound [2], to name a few. However, these datasets are lacking in multiple viewpoints and the directional property of audio signals, which are provided in the proposed new dataset.
|
| 46 |
+
|
| 47 |
+
# 3. $360 + x$ Dataset
|
| 48 |
+
|
| 49 |
+
# 3.1. Data Acquisition and Alignment
|
| 50 |
+
|
| 51 |
+
Two main devices were used for our data collection: the Insta 360 One X2 and Snapchat Spectacles 3 cameras. The 360 One X2 has two fish-eye cameras that collect $360^{\circ}$ panoramic visual information in the scene with $5760 \times 2880$ resolution and a frame rate of 25 FPS. Additionally, directional audio was recorded using four microphones in directional audio mode. While the Spectacles 3 has a stereo camera attached to a pair of glasses used to capture the egocentric binocular vision within the scene at a resolution of $2432 \times 1216$ and a frame rate of 60 FPS.
|
| 52 |
+
|
| 53 |
+
Once we obtained the raw data, we aligned the different viewpoints and modalities through a specific process. The initial raw footage captured by the two fish-eye cameras on the $360^{\circ}$ camera was in the form of two circular videos, which were then stitched and de-warped into a spherical panorama. This panorama can be projected into an equirectangular format to produce a panoramic video. However, this direct compression of the spherical view into a rectangular format can introduce unnatural distortions. In order to provide a more natural and informative view, we inversely project a rectangular region into equirectangular space and use it to crop the spherical panorama. We use optical flow to determine the crop region with the most motion activity in the spherical panorama field. This crop region is then projected back to rectangular, resulting in an informative video view with minimal distortions.
|
| 54 |
+
|
| 55 |
+
Egocentric binocular videos, as shown in Figure 1(c), were captured ranging from approximately 30 seconds to 1 minute in duration for each clip. A total of 1 to 5 stereo clips were recorded, scattered throughout the duration of the average 6 mins $360^{\circ}$ video. In addition to stereo videos, we also provide the corresponding monocular videos for the egocentric view.
|
| 56 |
+
|
| 57 |
+
The audio recordings were temporally aligned with their corresponding videos with left/right channel modality. The four-channel audios with the $360^{\circ}$ panoramic video are provided as well for further exploration. Moreover, we also
|
| 58 |
+
|
| 59 |
+
provide the directional information of the audio which was presented using the estimated interaural time delay of the sound obtained from the method introduced in [3]. The GPS information and weather information were also provided.
|
| 60 |
+
|
| 61 |
+
Given the possibility of occlusions in regions visible to the egocentric camera but not to the $360^{\circ}$ camera, we ensured during data collection that the cameras were positioned in close proximity. This setup, with clear mutual visibility, allowed both cameras to capture a similar overall scene.
|
| 62 |
+
|
| 63 |
+
# 3.2. Scene Selection
|
| 64 |
+
|
| 65 |
+
To broaden scene coverage and promote multi-modal collaborative learning, we integrated a strategic selection process for captured scenes, governed by three key criteria:
|
| 66 |
+
|
| 67 |
+
i) Scene categories must be carefully crafted to be comprehensive, yet concise, while also being authoritative and reflective of everyday life. The location where a scene unfolds plays a crucial role in providing essential environmental context to the activities within it [17]. Distinct scenes can impart unique meanings or emotional nuances to identical events. For instance, the act of chatting could convey divergent implications in a school setting as compared to a home environment. Such nuances are critical as they offer deeper insights into the contextual interpretation of behaviours and interactions in varied settings.
|
| 68 |
+
|
| 69 |
+
ii) The data should ideally span a wide array of weather and lighting conditions. This criterion aims to ensure the inclusion of both indoor and outdoor activities under various environmental scenarios. Such diversity is important in accurately representing the multifaceted nature of daily life and the various conditions in which these activities occur.
|
| 70 |
+
|
| 71 |
+
iii) Our third criterion is the inclusion of scenarios rich in distinctive sound sources, particularly those where multiple activities co-occur. It is essential for the dataset to not only visually represent these activities but also to capture the corresponding auditory elements. The goal is to present the complexity and realism of real-world environments as much as possible, marked by simultaneous and various actions and behaviours.
|
| 72 |
+
|
| 73 |
+
It is worth noting that our dataset was collected across several countries, including the United Kingdom (e.g. London, Birmingham, Cardiff and Jersey), France (Paris), Spain (e.g. Oviedo and Picos de Europa), China (e.g. Guangzhou and Shenzhen), and Japan (e.g. Kyoto and Osaka). During the data collection, the $360^{\circ}$ Camera was placed statically to record the scene, while a capturer wearing the Spectacles glasses recorded first-person interactions with the scene.
|
| 74 |
+
|
| 75 |
+
Sensitive data handling. Our dataset was collected in a real-world setting and may contain sensitive personal information (e.g. human faces). To ensure ethical and responsible research, the video capture was conducted with proper
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
(a). Distribution of the scene categories (number).
|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
(d). Distribution comparison of the number of action instances per video.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
(b). Geographical distribution of actions.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
(e). Capture time of the day.
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
(c). Overall distribution of actions duration.
|
| 93 |
+
(f). Binaural delay per clip.
|
| 94 |
+
Figure 2. Dataset statistics analysis, on the distributions of (a) the scene category, (b) action distribution per cities, (c) temporal action instance duration, and (d) number of actions per video, (e) capturing time, (f) binaural delay per clip.
|
| 95 |
+
|
| 96 |
+
consent. Additionally, we have taken measures to protect privacy by anonymising the data. This includes applying a face detection mechanism to outline predicted face locations in each frame and applying blurring filters to maintain meaningful details while ensuring information security. More detailed information on our privacy protection measures can be found in the supplementary material.
|
| 97 |
+
|
| 98 |
+
# 3.3. Data Annotation
|
| 99 |
+
|
| 100 |
+
Scene label rationale. The $360 + x$ dataset comprises a total of 28 scene categories (15 indoor scenes and 13 outdoor scenes), as illustrated in Figure 2(a). To establish comprehensive and authoritative scene categories that reflect daily life, we referred to the Places Database [34], which is derived from WordNet [18], as our primary basis. We then leverage the sophisticated semantic analysis capabilities of large language models, to conduct a thorough filtering and classification of a multitude of everyday scenes. This curation resulted in a refined set of 28 scene categories, each symbolising aspects of daily life. Simultaneously, the recordings concentrate on capturing common occurrences within conventional settings, providing a realistic depiction of everyday life. Detailed descriptions defining each category, along with discussions regarding these constraints and potential sampling biases, are presented in the supplimen
|
| 101 |
+
|
| 102 |
+
tary material.
|
| 103 |
+
|
| 104 |
+
Temporal segmentation label. We also provide temporal segment labelling for the understanding of activities in the shooting scenes. We follow the activity hierarchy standard defined by ActivityNet [5], which provides a comprehensive categorisation of human activities, consisting of seven top-level categories (Personal Care, Eating and Drinking, Household, Caring and Helping, Working, Socialising and Leisure, and Sports and Exercises). To capture the diversity and granularity of activities within each category, we defined a total of 38 action instances, covering specific actions and behaviours. To ensure high-quality annotations, the temporal segmentation labelling was annotated by three experienced annotators. Each annotator independently annotated the temporal segments corresponding to the activities in the videos. To obtain a consensus, we merged the individual annotations and resolved any discrepancies according to discussion and consensus among the annotators.
|
| 105 |
+
|
| 106 |
+
# 3.4. Dataset Statistics and Analysis
|
| 107 |
+
|
| 108 |
+
Overview. Existing publicly available datasets primarily focus on visual unimodality [4, 5, 13, 15, 23]. In contrast, our dataset introduces a novel approach by collecting different views or modalities, as presented in Table 1, including
|
| 109 |
+
|
| 110 |
+
Table 1. Dataset comparison. Ego: Egocentric, V: Video, A: Audio, A+V: Audio-visual events.
|
| 111 |
+
|
| 112 |
+
<table><tr><td rowspan="2">Dataset</td><td colspan="4">Video Viewpoints</td><td colspan="3">Other Modalities</td><td colspan="3">Statistics</td><td colspan="2">Attributions</td></tr><tr><td>Third-person Front View</td><td>360° Panoramic</td><td>Ego Monocular</td><td>Ego Binocular</td><td>Normal Audio</td><td>Directional Binaural Delay</td><td>GPS Info</td><td>Avg Duration</td><td>Total Duration(s)</td><td>Frames Count(K)</td><td>Annotations Source</td><td>Multiple Events</td></tr><tr><td>UCF101 [23]</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>✓</td><td>X</td><td>X</td><td>7.21 s</td><td>96,000</td><td>2,400</td><td>V</td><td>X</td></tr><tr><td>Kinetics [13]</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>10 s</td><td>2,998,800</td><td>74,970</td><td>V</td><td>X</td></tr><tr><td>HMDB51 [14]</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>3 s</td><td>21,426</td><td>643</td><td>V</td><td>X</td></tr><tr><td>ActivityNet [5]</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td><td>2 min</td><td>2,332,800</td><td>11,664</td><td>V</td><td>✓</td></tr><tr><td>EPIC-Kitchens [4]</td><td>X</td><td>X</td><td>✓</td><td>X</td><td>✓</td><td>X</td><td>X</td><td>7.6 min</td><td>198,000</td><td>11,500</td><td>V</td><td>X</td></tr><tr><td>Ego4D [9]</td><td>X</td><td>X</td><td>✓</td><td>X</td><td>✓</td><td>X</td><td>✓</td><td>8 min</td><td>13,212,000</td><td>-</td><td>A+V</td><td>✓</td></tr><tr><td>360+x (Ours)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>6.2 min</td><td>244,000</td><td>8,579</td><td>A+V</td><td>✓</td></tr></table>
|
| 113 |
+
|
| 114 |
+
$360^{\circ}$ panoramic video, third-person front view video, egocentric monocular video, egocentric binocular video, normal audio, directional binaural delay, location and textual scene description. This diverse range of modalities provides multiple dimensions and clues for understanding and analysing complex scenes. Our dataset consists of 2,152 videos representing 232 data examples, with 464 videos captured using the 360 camera and the remaining 1,688 recorded with the Spectacles camera.
|
| 115 |
+
|
| 116 |
+
Figure 2(a) presents the distribution of video counts across each of the 28 scene categories. Our dataset is characterised by a balanced distribution of data across these scenes. Notably, it diverges from conventional databases like UCF101 [23], Kinetics [13], HMDB [15], and ActivityNet [5], particularly in terms of average video duration, which is approximately 6.2 minutes. This longer duration is crucial for maintaining the integrity and coherence of actions within each scene, allowing for a comprehensive temporal analysis of the activities.
|
| 117 |
+
|
| 118 |
+
Temporal segment label. The annotations of temporal segment labels in our dataset contribute to the fine-grained analysis of activities. We defined 38 action instances representing specific actions and behaviours. The length of each segment labelled with a specific activity varies across the dataset, as depicted in Figure 2(c). Note we acknowledge the significance of audio in accurately identifying certain actions, such as 'coughing' or 'clapping'. Therefore, our dataset combines audio information to enhance accuracy in action recognition [4, 5, 13, 15, 23], as shown in Table 1.
|
| 119 |
+
|
| 120 |
+
Comparative complexity. Due to its realistic scene simulation, our dataset offers more complexity compared to previous datasets. This complexity arises from the diverse range of activities and interactions captured, resulting in a more challenging and realistic setting for scene understanding and activity recognition. As shown in Figure 2(d), most existing datasets, such as UCF101 [23], Kinetics [13], and HMDB51 [14], typically consist of one action instance per video. While datasets like Ego4D [9] and ActivityNet [5] have large volumes and broad coverage, they often contain a limited number of action instances per individual video.
|
| 121 |
+
|
| 122 |
+
The HACS dataset [33] contains more multiple action instances per video but still pales in comparison to the richness of the proposed dataset. Our dataset surpasses these existing datasets in terms of the number of action instances per video, showcasing the extensive variety of activities captured. The improved complexity and richness of our dataset enable follow-up research to explore and develop more robust algorithms, pushing the boundaries of scene understanding in real-world contexts.
|
| 123 |
+
|
| 124 |
+
Data distribution. We have ensured a balanced distribution across various dimensions, including scene categories, action instances, binaural delay, etc. Figure 2(a) depicts the scene number distribution across 28 scene categories, demonstrating a comprehensive coverage of scene categories. Notably, the dataset achieves an almost equal proportion of indoor and outdoor scenes, accounting for $54.7\%$ and $45.3\%$ respectively. Our dataset allows each scene to conclude multiple diverse action instances naturally, and also enables different scenes to share common action instances. Notably, in Figure 2(b), it displays the 'types of action per location' that can be observed in the geographic distribution and the diversity of the data, where the inner circle shows the location and the outer circle shows the action types captured in each location. As illustrated in Figure 2(c), the distribution of action duration shows our dataset has captured extensive and realistic human behaviours across natural scenes. One interesting observation from our dataset is the high-frequency occurrence of action 'operating phone', which contributes $17.54\%$ of the whole duration, providing a reflection of mobile usage in modern daily life. Additionally, the dataset offers valuable directional audio to supplement visual understanding. The distribution of data capture times in the dataset corresponds with natural human activities, as shown in Figure 2(e). Human activities throughout the day are mainly concentrated during the daytime (more in the afternoon and evening). Figure 2(f) illustrates the diversity of binaural delay for each clip. The positive point means the audio is directed towards the left direction while the negative the right. In summary, the presented $360 + x$ dataset covers broad modalities and diversity with an authentic distribution from different per
|
| 125 |
+
|
| 126 |
+
spectives, mimicking real daily life.
|
| 127 |
+
|
| 128 |
+
# 4. Benchmark and Experiments
|
| 129 |
+
|
| 130 |
+
To establish a comprehensive benchmark for the presented $360 + x$ dataset, we choose five visual understanding tasks to delve into the exploration of multiple viewpoints and modalities usage, including: video scene classification, temporal action localisation, cross-modality retrieval, self-supervised representation learning, and dataset adaptation.
|
| 131 |
+
|
| 132 |
+
Remark: Unless specifically stated otherwise, the experiments on $360 + x$ will utilise three views: the $360^{\circ}$ view, egocentric binocular view, and the third-person front view.
|
| 133 |
+
|
| 134 |
+
# 4.1. Experimental Setting
|
| 135 |
+
|
| 136 |
+
Models. We employed a consistent set of model backbones across different tasks to minimise model interference, except for temporal action localisation task (detailed in section 4.3). We followed the commonly used setup and selected the backbone I3D [13] as our video model. To handle audio-related aspects, we chose the VGGish [12] as our audio model. Additionally, for directional binaural feature extraction, we utilised the ResNet-18 model [11]. A linear layer is positioned after the backbones to carry out each specific task based on backbone output features.
|
| 137 |
+
|
| 138 |
+
It is important to note that a simple concatenation of all modalities features can diminish the potential information derived from multi-modality [26]. Therefore, instead of solely concatenating modality features, we leverage a hierarchical attention mechanism for multi-modality integration. In this approach, the directional binaural feature serves as an attention query to direct focused attention towards the audio feature, enabling it to encapsulate the directional information into the audio feature. At the same time, the audio feature is also leveraged by acting as a query itself, enabling it to attentively interact with the video feature. This mechanism allows for creating a synergistic representation of the underlying data that integrates the features of all modalities. For more details and in-depth analysis, please refer to the supplementary material.
|
| 139 |
+
|
| 140 |
+
Training and verification setup. For each temporal action localisation model, we follow their original training settings. For I3D, VGGish, and ResNet-18 networks, the training settings are 200 epochs with the parameters described in [19]. The training process utilises the AdamW optimiser with a learning rate of $1 \times 10^{-5}$ and a decay rate of 0.1 at the 80th and 120th epochs. We also apply data augmentation techniques such as rotation, scaling, and colour jittering. The dataset was divided into training, validation, and test sets, following an 80/10/10 split. To ensure a balanced representation of scene categories, the examples were stratified probabilistically across the sets.
|
| 141 |
+
|
| 142 |
+
Table 2. Video classification performance across different views (Ego: egocentric binocular view, Front: third-person front view, and $360^{\circ}$ .. $360^{\circ}$ view) and data modalities (V: Video, A: Audio, D: Directional binaural delay). Reported in Avg. Prec. $(\%)$
|
| 143 |
+
|
| 144 |
+
<table><tr><td rowspan="2">Selected Views</td><td colspan="5">Modalities</td></tr><tr><td colspan="2">V</td><td colspan="2">V + A</td><td>V + A + D</td></tr><tr><td>Egocentric Only</td><td>51.95</td><td>(±0.0)</td><td>55.24</td><td>(±0.0)</td><td>58.92</td></tr><tr><td>Front Only</td><td>54.05</td><td>(+2.1)</td><td>65.33</td><td>(+10.1)</td><td>67.19</td></tr><tr><td>360° Only</td><td>56.33</td><td>(+4.4)</td><td>67.14</td><td>(+11.9)</td><td>70.95</td></tr><tr><td>360° + Egocentric</td><td>58.99</td><td>(+7.0)</td><td>70.48</td><td>(+15.2)</td><td>72.11</td></tr><tr><td>360° + Front</td><td>59.70</td><td>(+7.8)</td><td>75.06</td><td>(+19.8)</td><td>77.69</td></tr><tr><td>360° + Front + Ego</td><td>63.73</td><td>(+11.8)</td><td>77.32</td><td>(+22.1)</td><td>80.62</td></tr></table>
|
| 145 |
+
|
| 146 |
+
# 4.2. Video Scene Classification
|
| 147 |
+
|
| 148 |
+
Video scene classification assigns scene labels to videos based on their frames, enabling analysis of visual content and determining the subject matter.
|
| 149 |
+
|
| 150 |
+
Single view vs. multi-view. First, we are interested in the influence of different combinations of video views on the classification performance. The results, representing each combination, are summarised in Table 2. The results for single views are presented in the first three rows, indicating that using a single $360^{\circ}$ panoramic view outperforms using either an egocentric binocular view or a third-person front view only. When employing multiple views, it is noted that better performance can be achieved compared to using a single view. Specifically, utilising all three views leads to the best performance. Such a performance can be attributed to the fact that although these three views describe the same scene, each different view offers a unique perspective that contributes to a more comprehensive understanding of the scene, resulting in improved performance.
|
| 151 |
+
|
| 152 |
+
Single-modality vs. multi-modality and more. We further investigate the impact of modalities on the model's performance. Various combinations of modalities are analysed, and the results are summarised in Table 2 on a column-wise basis. In particular, the first column represents the visual modality alone, the second column combines video with audio, and the last column incorporates visual, audio, and directional binaural information modalities.
|
| 153 |
+
|
| 154 |
+
The inclusion of additional modalities leads to average precision improvements. For example, when all three views are utilised, incorporating more modalities results in improvements of $13.59\%$ and $16.89\%$ , respectively. This underscores the benefits of leveraging multiple modalities for a more comprehensive understanding of the scene and enhancing overall performance.
|
| 155 |
+
|
| 156 |
+
# 4.3. Temporal Action Localisation
|
| 157 |
+
|
| 158 |
+
Temporal Action Localisation (TAL) is a video understanding task that involves the dense identification and temporal
|
| 159 |
+
|
| 160 |
+
Table 3. Temporal action localisation results. Baseline extractors are used in [2, 21, 24, 32]. The mAP@σ represents the mean average precision (%) at a threshold of σ. The best performance is achieved by employing $V + A + D$ modalities with extractors pre-trained on $360 + x$ .
|
| 161 |
+
|
| 162 |
+
<table><tr><td rowspan="2">Extractors</td><td rowspan="2">Modalities</td><td rowspan="2">mAP @0.5</td><td colspan="3">Actionformer [32]</td><td rowspan="2">Avg.</td><td colspan="3">TemporalMaxer [24]</td><td rowspan="2">Avg.</td><td colspan="3">TriDet [21]</td><td rowspan="2">Avg.</td></tr><tr><td>mAP @0.75</td><td>mAP @0.95</td><td></td><td>mAP @0.5</td><td>mAP @0.75</td><td>mAP @0.95</td><td>mAP @0.5</td><td>mAP @0.75</td><td>mAP @0.95</td></tr><tr><td rowspan="2">Baseline Extractors</td><td>V</td><td>11.9 (±0.0)</td><td>7.8 (±0.0)</td><td>3.3 (±0.0)</td><td>7.7 (±0.0)</td><td>13.1 (±0.0)</td><td>8.8 (±0.0)</td><td>3.7 (±0.0)</td><td>8.6 (±0.0)</td><td>16.7 (±0.0)</td><td>10.1 (±0.0)</td><td>4.8 (±0.0)</td><td>10.5 (±0.0)</td><td></td></tr><tr><td>V + A</td><td>19.1 (+7.2)</td><td>11.3 (+3.5)</td><td>4.2 (+0.9)</td><td>11.5 (+3.8)</td><td>21.0 (+7.9)</td><td>14.8 (+6.0)</td><td>5.6 (+1.9)</td><td>13.8 (+5.2)</td><td>23.6 (+6.9)</td><td>17.2 (+7.1)</td><td>6.4 (+1.6)</td><td>15.7 (+5.2)</td><td></td></tr><tr><td rowspan="3">Pre-trained on 360+x</td><td>V</td><td>16.4 (+4.5)</td><td>9.8 (+2.0)</td><td>3.9 (+0.6)</td><td>10.0 (+2.3)</td><td>20.4 (+7.3)</td><td>14.3 (+5.5)</td><td>5.2 (+1.5)</td><td>13.3 (+4.7)</td><td>21.1 (+4.4)</td><td>15.3 (+5.2)</td><td>5.5 (+0.7)</td><td>14.0 (+3.5)</td><td></td></tr><tr><td>V + A</td><td>23.6 (+11.7)</td><td>16.9 (+9.1)</td><td>5.7 (+2.4)</td><td>15.4 (+7.7)</td><td>25.8 (+12.7)</td><td>18.0 (+9.2)</td><td>6.4 (+2.7)</td><td>16.7 (+8.1)</td><td>26.4 (+8.7)</td><td>18.5 (+8.4)</td><td>6.9 (+2.1)</td><td>17.3 (+6.8)</td><td></td></tr><tr><td>V + A + D</td><td>24.9 (+13.0)</td><td>17.4 (+9.6)</td><td>6.1 (+2.8)</td><td>16.1 (+8.4)</td><td>26.6 (+13.5)</td><td>18.3 (+9.5)</td><td>6.5 (+2.8)</td><td>17.1 (+8.5)</td><td>27.1 (+10.4)</td><td>18.7 (+8.6)</td><td>7.0 (+2.2)</td><td>17.6 (+7.1)</td><td></td></tr></table>
|
| 163 |
+
|
| 164 |
+
segmentation of activities within a video stream over a specific time period. Current TAL approaches typically employ a two-stage paradigm [27, 32]. The first stage extracts features from the entire video, and the second stage predicts temporal segmentation based on these features.
|
| 165 |
+
|
| 166 |
+
Feature extractors. Baseline extractors are widely utilised for various datasets, e.g. ActivityNet [5] and Ego4D [9], on the TAL task. The baseline video features are obtained from an I3D model pre-trained on the Kinetics400 dataset [13]. The baseline audio features are derived from the pre-classification layer following activation of the VG-Gish model, pre-trained on AudioSet [7]. There is no baseline extractor for directional binaural delay feature, so the $\mathrm{V + A + D}$ modality was not included accordingly. For a fair comparison, we reused our video classification models in section 4.2 as Pre-trained on $360 + x$ extractors, following the same baseline extraction setup for both video and audio features. Additionally, the ResNet-18 feature extractor was used for directional binaural delay feature extraction.
|
| 167 |
+
|
| 168 |
+
Experimental results. We provide a concise overview of the performance comparison for various temporal action localisation methods, including ActionFormer [32], TriDet [21] and TemporalMaxer [24], between the baseline extractors and our Pre-trained on $360 + x$ extractors. The summarised results are presented in Table 3, from which we can see that the introduction of additional modalities (i.e. audio and direction binaural delay) has a prominent positive impact on the TAL task, leading to performance improvements for both sets of extractors. This result highlights the importance of leveraging multiple modalities in enhancing the accuracy and effectiveness of temporal activity localisation techniques. Using our custom extractors can provide additional improvements, as the baseline extractors may not be optimised for our specific binocular or $360^{\circ}$ views.
|
| 169 |
+
|
| 170 |
+
# 4.4. Cross-modality Retrieval
|
| 171 |
+
|
| 172 |
+
In this context, we focus on a series of retrieval tasks that across modalities including audio, video and directional binaural delay. In a modality-specific retrieval scenario, the query modality (Q) serves as the input for retrieving the key modality (K) in the Q-to-K retrieval task. The performance evaluation metric $\mathrm{R}\theta$ represents the recall at ranks $\theta$ .
|
| 173 |
+
|
| 174 |
+
Table 4. Q-to-Video retrieval results. The superscript* indicates modalities are co-trained. Recall reported with rank in $\{1,5,10\}$ .
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Query Modality</td><td colspan="2">R1 (%)</td><td colspan="2">R5 (%)</td><td colspan="2">R10 (%)</td></tr><tr><td>A</td><td>39.14</td><td>(±0.0)</td><td>62.76</td><td>(±0.0)</td><td>79.21</td><td>(±0.0)</td></tr><tr><td>A + D</td><td>44.30</td><td>(+5.16)</td><td>66.92</td><td>(+4.16)</td><td>84.78</td><td>(+5.57)</td></tr><tr><td>(A + D)*</td><td>55.88</td><td>(+16.74)</td><td>72.53</td><td>(+9.77)</td><td>86.6</td><td>(+7.39)</td></tr></table>
|
| 177 |
+
|
| 178 |
+
Q-to-Video retrieval results. Table 4 illustrates the retrieval results for the Query modality retrieve videos. In this table, $A + D$ denotes a set of independently trained audio and directional binaural features employed as query features. Moreover, $(A + D)^*$ signifies the collaborative training of these features instead of treating them independently. The inter-modality retrieval results shown in Table 4 clearly show the modality compliance quality of the $360 + x$ dataset. Besides Q-to-Video retrieval, we also performed Q-to-Audio and Q-to-Directional binaural delay experiments, details can be found in the supplementary material.
|
| 179 |
+
|
| 180 |
+
# 4.5. Self-supervised Representation Learning
|
| 181 |
+
|
| 182 |
+
Experiment setup. In this section, we investigated the impact of different self-supervised learning (SSL) methods using two engaging video pretext tasks: video pace (VP) prediction [25] and clip order (CO) shuffle prediction [29]. The VP task challenges the model to determine the pace of a video, while the CO task asks the model to rearrange shuffled video clips into their correct chronological order. The original VP and CO primarily concentrated on video data, but to capitalise on the advantages of multi-modality, we expanded these approaches to include audio and directional binaural delay modalities. This extension was done to align modality with the temporal coherence and dynamics observed in the video. For more comprehensive explanations, please refer to the supplementary material.
|
| 183 |
+
|
| 184 |
+
Experimental results. We first examined the impact of self-supervised learning models for video classification. Table 5 demonstrates the consistent precision gains achieved by utilising SSL pre-trained models. Notably, leveraging both video pace and clip order SSL techniques resulted in an average performance improvement of $\sim 7\%$ .
|
| 185 |
+
|
| 186 |
+
Table 5. Models with different pre-train methods were fine-tuned and tested on video classification. The experiments use all three video views. Reported in Avg. Prec. $(\%)$ .
|
| 187 |
+
|
| 188 |
+
<table><tr><td rowspan="2">Pre-train Method</td><td colspan="5">Modalities</td></tr><tr><td colspan="2">V</td><td colspan="2">V + A</td><td>V + A + D</td></tr><tr><td>From Scratch</td><td>63.73</td><td>(±0.0)</td><td>77.32</td><td>(±0.0)</td><td>80.62</td></tr><tr><td>Video Pace [25]</td><td>69.27</td><td>(+5.5)</td><td>79.56</td><td>(+2.2)</td><td>81.97</td></tr><tr><td>Clip Order [29]</td><td>69.91</td><td>(+6.2)</td><td>80.14</td><td>(+2.8)</td><td>82.18</td></tr><tr><td>VP [25] + CO [29]</td><td>76.84</td><td>(+13.1)</td><td>82.66</td><td>(+5.3)</td><td>83.32</td></tr></table>
|
| 189 |
+
|
| 190 |
+
Table 6. Comparison between supervised pre-trained extractors with SSL pretrained counterparts on TAL task. The experiments use all three video views with modalities (V+A+D).
|
| 191 |
+
|
| 192 |
+
<table><tr><td>Pre-train Method</td><td>mAP @0.5</td><td>mAP @0.75</td><td>mAP @0.95</td><td>Avg.</td></tr><tr><td>Supervised</td><td>27.1 (±0.0)</td><td>18.7 (±0.0)</td><td>7.0 (±0.0)</td><td>17.6 (±0.0)</td></tr><tr><td>Video Pace [25]</td><td>29.4 (+2.3)</td><td>19.6 (+0.9)</td><td>7.4 (+0.4)</td><td>18.8 (+1.2)</td></tr><tr><td>Clip Order [29]</td><td>28.9 (+1.8)</td><td>19.3 (+0.6)</td><td>7.3 (+0.3)</td><td>18.5 (+0.9)</td></tr><tr><td>VP [25] + CO [29]</td><td>30.3 (+3.2)</td><td>20.2 (+1.5)</td><td>7.9 (+0.9)</td><td>19.5 (+1.9)</td></tr></table>
|
| 193 |
+
|
| 194 |
+
Table 7. Following original setup of THUMOS14 dataset [8], our dataset adaptation task uses video modality only.
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Feature Extractor</td><td>mAP@0.3</td><td>mAP@0.4</td><td>mAP@0.5</td><td>mAP@0.6</td><td>mAP0.7</td><td>Avg.</td></tr><tr><td>Kinetics400 [13] (Pre-train)</td><td>83.7 (±0.0)</td><td>80.2 (±0.0)</td><td>72.8 (±0.0)</td><td>62.4 (±0.0)</td><td>47.4 (±0.0)</td><td>69.5 (±0.0)</td></tr><tr><td>360+x (Pre-train)</td><td>84.5 (+0.8)</td><td>81.0 (+0.8)</td><td>73.4 (+0.6)</td><td>65.9 (+3.5)</td><td>54.6 (+7.2)</td><td>71.9 (+2.4)</td></tr><tr><td>Kinetics400 [13] (Pre-train) and 360+x (Fine-tune)</td><td>85.3 (+1.6)</td><td>81.8 (+1.6)</td><td>74.9 (+2.1)</td><td>68.1 (+5.7)</td><td>58.2 (+10.8)</td><td>73.7 (+4.2)</td></tr></table>
|
| 197 |
+
|
| 198 |
+
We proceeded to perform experiments using SSL pretrained models as feature extractors for the temporal action localisation task incorporating all three modalities $(\mathrm{V} + \mathrm{A} + \mathrm{D})$ with the TriDet framework [21]. Since a training-from-scratch model cannot serve as the first-stage extractor, we employed the supervised extractors from section 4.2 as a comparison. The summarised results in Table 6 indicate that pre-training with video pace (VP) or clip order (CO) individually leads to an average performance improvement of $\sim 1.2\%$ and $\sim 0.9\%$ respectively on average, compared to the supervised baseline. The combination of both SSL methods yields the highest performance gain of $\sim 1.9\%$ .
|
| 199 |
+
|
| 200 |
+
# 4.6. Pre-training Model for Dataset Adaptation
|
| 201 |
+
|
| 202 |
+
This section explores the efficacy of leveraging models pretrained on the $360 + x$ dataset for adaptation to other datasets like THUMOS14 [8]. By adhering to THUMOS14 setup, the experiments use TriDet framework [21] for conducting Temporal Action Localisation (TAL).
|
| 203 |
+
|
| 204 |
+
The performance of this experiment, specifically the mean average precision (mAP) scores covering IoU thresholds from 0.3 to 0.7, are presented in Table 7. As outlined by the results, exclusive reliance on $360 + x$ video data for training showcases the potential for enhanced performance as compared to training solely based on the Kinetics400 dataset [13]. Remarkably, this performance improvement becomes more prominent at higher IoU thresholds. The utmost optimal performance, however, emerges through a two-step approach, commencing with pre-training on the Kinetics400 dataset followed by fine-tuning on the $360 + x$ dataset with an average $\sim 4.2\%$ improvement compared to solely Kinetics400 pre-trained extractor. This finding showcases that the employment of the $360 + x$ dataset for feature extractor training can be beneficial for dataset adaptation in sub-stream tasks. More experimental results on dataset integration are available in the supplementary materials.
|
| 205 |
+
|
| 206 |
+
# 5. Conclusions
|
| 207 |
+
|
| 208 |
+
In this work, we studied the problem of panoptic scene understanding and presented, to our knowledge, the first-of-its-kind dataset $-360 + x$ to support the study. The proposed $360 + x$ is a large-scale multi-modal dataset that consists of several different viewpoints (e.g. egocentric, third-person-view, and panoramic view) and covers various real-world activities in real daily life. With the most possibly available perspectives describing a real-world scene, $360 + x$ aims to support the research in understanding the world around us in a way that humans understand (and even beyond). Additionally, we also presented a benchmark study of several scene understanding tasks based on this newly collected dataset, with a comparison to other existing datasets. Extensive experimental analysis validated the effectiveness of each of the perspectives within our dataset, and also suggested interesting insights, confirming that with more viewpoints or data modalities, the understanding of a scene could be more comprehensive. Surprisingly, models trained without manual annotation (i.e. self-supervised learning) on our dataset even perform better than those trained with human annotations in a fully supervised manner. We hope this new dataset could bring in new directions towards scene understanding and look forward to the research on them.
|
| 209 |
+
|
| 210 |
+
# Acknowledgement
|
| 211 |
+
|
| 212 |
+
This project was partially supported by the Ramsay Research Fund, and the Royal Society Short Industry Fellowship (SIF\R1\231009). Y. Hou and C. Qu were partially supported by the CSC grant (No.202308060328) and Allsee Technologies Ltd., respectively. The computations described in this research were performed using the Baskerville Tier 2 HPC service<sup>1</sup> (funded by EP/T022221/1 and EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham.
|
| 213 |
+
|
| 214 |
+
# References
|
| 215 |
+
|
| 216 |
+
[1] Keshav Bhandari, Mario A DeLaGarza, Ziliang Zong, Hugo Latapie, and Yan Yan. Egok360: A 360 egocentric kinetic human activity video dataset. In 2020 IEEE International Conference on Image Processing (ICIP), pages 266-270. IEEE, 2020. 2
|
| 217 |
+
[2] Honglie Chen, Weidi Xie, Andrea Vedaldi, and Andrew Zisserman. Vggsound: A large-scale audio-visual dataset. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 721-725. IEEE, 2020. 1, 3, 7
|
| 218 |
+
[3] Ziyang Chen, David F Fouhey, and Andrew Owens. Sound localization by self-supervised time delay estimation. In European Conference on Computer Vision, pages 489-508. Springer, 2022. 2, 3
|
| 219 |
+
[4] Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonino Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European Conference on Computer Vision (ECCV), pages 720-736, 2018. 1, 2, 4, 5
|
| 220 |
+
[5] Bernard Ghanem Fabian Caba Heilbron, Victor Escorcia and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 961-970, 2015. 1, 2, 4, 5, 7
|
| 221 |
+
[6] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 776-780, 2017. 3
|
| 222 |
+
[7] Jort F. Gemmeke, Daniel P. W. Ellis, Dylan Freedman, Aren Jansen, Wade Lawrence, R. Channing Moore, Manoj Plakal, and Marvin Ritter. Audio set: An ontology and human-labeled dataset for audio events. In 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), page 776-780. IEEE Press, 2017. 1, 7
|
| 223 |
+
[8] A. Gorban, H. Idrees, Y.-G. Jiang, A. Roshan Zamir, I. Laptev, M. Shah, and R. Sukthankar. Thumos challenge: Action recognition with a large number of classes. http://www.thumos.info, 2015. 8
|
| 224 |
+
[9] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 2, 5, 7
|
| 225 |
+
[10] Chunhui Gu, Chen Sun, David A Ross, Carl Vondrick, Caroline Pantofaru, Yeqing Li, Sudheendra Vijayanarasimhan, George Toderici, Susanna Ricco, Rahul Sukthankar, et al. Ava: A video dataset of spatio-temporally localized atomic visual actions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6047-6056, 2018. 3
|
| 226 |
+
|
| 227 |
+
[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6
|
| 228 |
+
[12] Shawn Hershey, Sourish Chaudhuri, Daniel PW Ellis, Jort F Gemmeke, Aren Jansen, R Channing Moore, Manoj Plakal, Devin Platt, Rif A Saurous, Bryan Seybold, et al. Cnn architectures for large-scale audio classification. In 2017 iee international conference on acoustics, speech and signal processing (icassp), pages 131-135. IEEE, 2017. 6
|
| 229 |
+
[13] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 1, 2, 4, 5, 6, 7, 8
|
| 230 |
+
[14] H. Kuehne, H. Jhuang, E. Garrote, T. Poggio, and T. Serre. HMDB: a large video database for human motion recognition. In Proceedings of the International Conference on Computer Vision (ICCV), 2011. 5
|
| 231 |
+
[15] Hildegard Kuehne, Hueihan Jhuang, Estíbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In 2011 International conference on computer vision, pages 2556-2563. IEEE, 2011. 1, 4, 5
|
| 232 |
+
[16] Yiyi Liao, Jun Xie, and Andreas Geiger. Kitti-360: A novel dataset and benchmarks for urban scene understanding in 2d and 3d. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 2
|
| 233 |
+
[17] Benjamin R Meagher. Ecologizing social psychology: The physical environment as a necessary constituent of social processes. *Personality and social psychology review*, 24(1): 3-23, 2020. 3
|
| 234 |
+
[18] George A Miller. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41, 1995. 4
|
| 235 |
+
[19] Xiaokang Peng, Yake Wei, Andong Deng, Dong Wang, and Di Hu. Balanced multimodal learning via on-the-fly gradient modulation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 6
|
| 236 |
+
[20] Daniel Scharstein, Heiko Hirschmüller, York Kitajima, Greg Krathwohl, Nera Nesic, Xi Wang, and Porter Westling. High-resolution stereo datasets with subpixel-accurate ground truth. In Pattern Recognition: 36th German Conference, GCPR 2014, Münster, Germany, September 2-5, 2014, Proceedings 36, pages 31-42. Springer, 2014. 1
|
| 237 |
+
[21] Dingfeng Shi, Yujie Zhong, Qiong Cao, Lin Ma, Jia Li, and Dacheng Tao. Tridet: Temporal action detection with relative boundary modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18857-18866, 2023. 7, 8
|
| 238 |
+
[22] Shuran Song, Andy Zeng, Angel X Chang, Manolis Savva, Silvio Savarese, and Thomas Funkhouser. Im2pano3d: Extrapolating 360 structure and semantics beyond the field of view. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3847-3856, 2018. 1, 2
|
| 239 |
+
[23] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 1, 2, 4, 5
|
| 240 |
+
|
| 241 |
+
[24] Tuan N Tang, Kwonyoung Kim, and Kwanghoon Sohn. Temporalmaxer: Maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055, 2023. 7
|
| 242 |
+
[25] Jiangliu Wang, Jianbo Jiao, and Yunhui Liu. Self-supervised video representation learning by pace prediction. In European Conference on Computer Vision, 2020. 7, 8
|
| 243 |
+
[26] Weiyao Wang, Du Tran, and Matt Feiszli. What makes training multi-modal classification networks hard? In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12695-12705, 2020. 6
|
| 244 |
+
[27] Xiang Wang, Zhiwu Qing, Ziyuan Huang, Yutong Feng, Shiwei Zhang, Jianwen Jiang, Mingqian Tang, Changxin Gao, and Nong Sang. Proposal relation network for temporal action detection. arXiv preprint arXiv:2106.11812, 2021. 7
|
| 245 |
+
[28] Jianxiong Xiao, Krista A Ehinger, Aude Oliva, and Antonio Torralba. Recognizing scene viewpoint using panoramic place representation. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2695-2702. IEEE, 2012. 1
|
| 246 |
+
[29] Dejing Xu, Jun Xiao, Zhou Zhao, Jian Shao, Di Xie, and Yueting Zhuang. Self-supervised spatiotemporal learning via video clip order prediction. In Computer Vision and Pattern Recognition (CVPR), 2019. 7, 8
|
| 247 |
+
[30] Guorun Yang, Xiao Song, Chaoqin Huang, Zhidong Deng, Jianping Shi, and Bolei Zhou. Drivingstereo: A large-scale dataset for stereo matching in autonomous driving scenarios. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 899–908, 2019. 1
|
| 248 |
+
[31] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126:375–389, 2018. 2
|
| 249 |
+
[32] Chen-Lin Zhang, Jianxin Wu, and Yin Li. Actionformer: Localizing moments of actions with transformers. In European Conference on Computer Vision, pages 492-510. Springer, 2022. 7
|
| 250 |
+
[33] Hang Zhao, Antonio Torralba, Lorenzo Torresani, and Zhicheng Yan. Hacs: Human action clips and segments dataset for recognition and temporal localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8668-8678, 2019. 5
|
| 251 |
+
[34] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE transactions on pattern analysis and machine intelligence, 40(6):1452-1464, 2017. 4
|
360xapanopticmultimodalsceneunderstandingdataset/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98b89547ba9c949df4745cf265dbb646475b8604b266a1712d7113dddd2f42e7
|
| 3 |
+
size 474050
|
360xapanopticmultimodalsceneunderstandingdataset/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:605d7b82c571c5770f5c43cb2b10cad2dfa5356bfa3876f190ba73d6ce511641
|
| 3 |
+
size 314543
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70bd2905da48afd7d6446522f06ec8a12d01bb75b5c16f75c9d725c247c6b2ca
|
| 3 |
+
size 76836
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b4045820ad38eb93ebebea449152c4d1e541d8e135a2f85d759d573e687a809
|
| 3 |
+
size 99151
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/d47f630a-17d8-4298-a368-699d1959d603_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a0fa72679d22a4272126af847ad1758963b741cd1ce833a938628d1721da3ec
|
| 3 |
+
size 7611252
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/full.md
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D-Aware Face Editing via Warping-Guided Latent Direction Learning
|
| 2 |
+
|
| 3 |
+
Yuhao Cheng $^{1}$ Zhuo Chen $^{1}$ Xingyu Ren $^{1}$ Wenhan Zhu $^{1}$ Zhengqin Xu $^{1}$ Di Xu $^{2}$ Changpeng Yang $^{2}$ Yichao Yan $^{1*}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ MoE Key Lab of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University $^{2}$ Huawei Cloud Computing Technologies Co., Ltd
|
| 6 |
+
|
| 7 |
+
{chengyuhao,ningci5252,rxy_sjtu,zhuwenhan823,fate311,yanyichao}@sjtu.edu.cn, {xudi21,yangchangpeng}@huawei.com
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1. An example of our warping-guided 3D-aware face editing method. Our method supports users to edit 3D faces in an intuitive way that drags points from multiple perspectives. Moreover, our method can achieve disentangled editing for shape, expression, and view, while maintaining 3D consistency. Please zoom-in for detailed observation.
|
| 11 |
+
|
| 12 |
+
# Abstract
|
| 13 |
+
|
| 14 |
+
3D facial editing, a longstanding task in computer vision with broad applications, is expected to fast and intuitively manipulate any face from arbitrary viewpoints following the user's will. Existing works have limitations in terms of intuitiveness, generalization, and efficiency. To overcome these challenges, we propose FaceEdit3D, which allows users to directly manipulate 3D points to edit a 3D face, achieving natural and rapid face editing. After one or several points are manipulated by users, we propose the tri-plane warping to directly deform the view-independent 3D representation. To address the problem of distortion caused by tri-plane warping, we train a warp-aware encoder to project the warped face onto a standardized latent space. In this space, we further propose directional latent editing to mitigate the identity bias caused by the encoder and realize the disentangled editing of various attributes. Extensive experiments show that our method achieves superior results with rich facial details and nice identity preservation. Our approach also supports general applications like
|
| 15 |
+
|
| 16 |
+
multi-attribute continuous editing and cat/car editing. The project website is https://cyh-sj.github.io/FaceEdit3D/.
|
| 17 |
+
|
| 18 |
+
# 1. Introduction
|
| 19 |
+
|
| 20 |
+
High-quality face editing has long been an important research topic in computer vision with a wide range of applications, including social media and film production. Previous methods [16, 36, 43] based on 2D GANs [22, 23] have demonstrated the capability of editing facial images with high-fidelity. Recently, benefiting from the impressive achievements of 3D-aware generative models, especially in generative digital human [2-4, 11, 15, 32, 33, 41, 45, 51, 53, 55, 56, 64], the field of 3D facial editing has further attracted significant interest due to its promising capacity of manipulating a 3D representation.
|
| 21 |
+
|
| 22 |
+
Typically, 3D face editing methods can be generally classified into three categories: prior-guided conditioning, parameter-space fine-tuning, and latent-space optimization, as summarized in Tab. 1. Specifically, prior-guided conditioning methods [18, 46-48] employ an additional well
|
| 23 |
+
|
| 24 |
+
<table><tr><td>Scheme</td><td>Methods</td><td>Intuitiveness</td><td>Generalization</td><td>Efficiency</td></tr><tr><td>Conditional control</td><td>[18, 46, 48]</td><td>✓</td><td>✗</td><td>✓</td></tr><tr><td>Fine-tuned models</td><td>[6, 13, 59]</td><td>✓</td><td>✓</td><td>✗</td></tr><tr><td>Supervised directions</td><td>[1, 36, 43]</td><td>✓</td><td>✗</td><td>✗</td></tr><tr><td rowspan="3">Unsupervised directions</td><td>[16, 42, 67]</td><td>✗</td><td>✓</td><td>✗</td></tr><tr><td>[34] (2D)</td><td>△</td><td>✓</td><td>✗</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 25 |
+
|
| 26 |
+
Table 1. Summary of 3D-aware face editing methods. $\triangle$ indicates its instructions are somewhat ambiguous semantically.
|
| 27 |
+
|
| 28 |
+
designed conditioning module to introduce the control information, e.g., semantic maps [18, 46] and 3DMM [48, 49], into the 3D-aware models. Although flexible, these models typically require a large number of face images with their control labels for training. Parameter-space finetuning methods [6, 13, 59] optimize the pre-trained generators given the target input, achieving zero-shot editing with the help of the large language-image model, e.g., CLIP [38] or Stable Diffusion [39]. However, it is required to maintain a particular generator for each specific editing target, severely constraining their generalization.
|
| 29 |
+
|
| 30 |
+
Due to the rich distributions learned in the pre-trained generator, discovering the meaningful directions in the latent space allows for a wide range of editing without the need to modify the generator and dependence on a large amount of training data. According to the exploration of editing direction, latent-space optimization can be achieved in supervised and unsupervised ways. Supervised methods [1, 36, 43, 44] search the meaningful directions in the latent space by learning labeled data for each specific editing. However, these methods cannot be generalized beyond the training domain. In contrast, unsupervised methods [16, 42, 50, 65-67] discover out-of-domain directions by analyzing the distribution of the latent space. However, the editing directions in the latent space are typically not semantically intuitive for the users. Accordingly, introducing interactive guidance to bridge the gap between the latent space and the user's intuition becomes the main purpose of the unsupervised methods.
|
| 31 |
+
|
| 32 |
+
To achieve this, several works [12, 34] utilize manipulating points on 2D images to optimize latent code in an unsupervised way, achieving image editing intuitively. The most prominent method DragGAN [34] proposes motion supervision and point tracking to optimize the latent code in a self-supervised manner, showcasing its flexible and intuitive editing capabilities. Considering their success on 2D images, it would be highly desirable if we could also manipulate 3D points to edit a 3D facial representation. However, it is non-trivial to directly extend point dragging to 3D-aware facial editing, due to the following challenges. 1) These methods ignore the global 3D facial structure and only focus on the movements of specific points, potentially
|
| 33 |
+
|
| 34 |
+
leading to exaggerated distortions. 2) These methods employ an inefficient approach to optimize the latent codes for image editing. Therefore, extending this procedure to 3D-aware generators fails to meet the demands of 3D interactive applications. 3) The controllability of point dragging is less precise and may cause ambiguous targets, e.g., enlarging the shape of the mouth may lead the mouth to open.
|
| 35 |
+
|
| 36 |
+
To overcome these challenges, we propose FaceEdit3D to learn editing directions guided by 3D-consistent facewarping, realizing intuitive and rapid 3D-aware facial editing. (1) First, we propose tri-plane warping on the 3D representation to achieve accurate 3D-consistent facial editing, which allows us to sidestep inaccurate motion supervision. Further, we introduce 3D landmarks rather than arbitrary points as face prior to constrain the change in the normal face distribution. Although tri-plane warping allows for precise editing, it introduces slight facial distortions. (2) Hence, we train a warp-aware encoder instead of latent optimization to straightforwardly project the warped renderings into the standardized space, enabling fast and photorealistic editing. Due to the complex semantic information in the latent space of 3D-aware generators, the obtained encoder suffers from inherent bias, resulting in a loss of details and identity shifting. (3) Therefore, we propose to learn the hierarchical directional editing in latent space, enabling disentangled face editing with identity and details preservation.
|
| 37 |
+
|
| 38 |
+
With all the designs above, we successfully introduce dragging-based edits into 3D face representations. Our work achieves an efficient and straightforward editing process which also enables the decoupling of facial expressions and shapes. Compared to other face editing approaches, our method offers a more intuitive bridge but avoids dependence on the 3D annotations. Extensive experiments have demonstrated the superiority of our method in intuitiveness, generalization, and efficiency for the task of facial editing.
|
| 39 |
+
|
| 40 |
+
The main contributions are summarized as follows:
|
| 41 |
+
|
| 42 |
+
- We design an efficient and straightforward 3D-aware face editing pipeline that is in line with the user's intuition.
|
| 43 |
+
- We propose to warp the face in the tri-plane feature level, enabling 3D-consistent face manipulation.
|
| 44 |
+
- We propose a warp-aware encoder to better identify the subtle changes and efficiently solve the problem of distorted face caused by the tri-plane warp.
|
| 45 |
+
- We propose directional editing in latent space, achieving disentangled facial editing with the preservation of identity and details.
|
| 46 |
+
|
| 47 |
+
# 2. Related Works
|
| 48 |
+
|
| 49 |
+
# 2.1. 3D-aware GANs
|
| 50 |
+
|
| 51 |
+
Inspired by the superiority of implicit representation [31], several attempts [2-4, 11, 15, 32, 33, 41, 45, 53, 55, 64] deploy radiance fields into generative models and thus en
|
| 52 |
+
|
| 53 |
+
able 3D consistent image synthesis. The capability of learning 3D representations from unposed single-view 2D images only empowers these 3D-aware GAN models to gain wide interests and applications. However, partial 3D-aware GANs [3, 15, 32, 33, 41, 64] adopt full implicit representation that lacks pre-computed 3D features before the point sampling. As a consequence, they need to regenerate the 3D feature when given novel viewpoints, limiting the efficiency of them in interactive applications. To address this challenge, several works [2, 4, 45, 53] adopt hybrid representations that first generate view-independent features, and enable sampling points on these pre-computed features for novel view synthesis. Consequently, these methods can realize rapid generation and maintain the inherent 3D-consistent representation. Specifically, EG3D [4] introduces the light tri-plane representation into the generator to raise efficiency and further enhance the image quality. Considering its efficient representation and mature downstream techniques, we adopt the EG3D [4] as the base 3D-aware model to demonstrate the effectiveness of our methods.
|
| 54 |
+
|
| 55 |
+
# 2.2. Implicit Representations Deformation
|
| 56 |
+
|
| 57 |
+
The deformation of 3D implicit representation has long attracted wide focus, as it serves as the foundation of broad animation applications. Prior researches predominantly introduce an additional deformation field based on the original representation to modify the 3D points. Specifically, deformation fields can be implemented through proxy-based editing [14, 21, 35, 57], cage-based editing [17, 37, 54], and parametric prior-based editing [40, 52, 63], etc. Proxy-based editing learns a lightweight neural network to compute the translation and rotation of 3D points, enabling the deformation of original 3D coordinates. The cage-based methods establish a surrounding cage to fully cover up the original surface of an implicit representation and then modify the cage to deform the inherent surface. Parametric prior-based methods leverage the parametric models such as SMPL [29] and FLAME [27] as a prior condition of the deformation network to drive the implicit representations. However, all of these approaches need to optimize a controllable module for each specific object, lack of efficiency and generality. In contrast, our work provides a landmark-based way to directly edit the 3D representation without optimization and further compresses the 3D deformation into 2D feature planes to improve efficiency.
|
| 58 |
+
|
| 59 |
+
# 2.3. Face Editing in GANs
|
| 60 |
+
|
| 61 |
+
As the latent space learned by the conditioned GANs contains most of the distribution knowledge, many works [1, 42, 43, 50, 69] explore the latent space of a pre-trained generator for the following facial attribute editing. Specifically, InterFaceGAN [43] studies the semantics encoded in the latent space and disentangles the facial semantics with linear
|
| 62 |
+
|
| 63 |
+
projection. To explicitly edit the facial attributes, further works explore utilizing the intuitive representation, e.g., semantic maps [5, 46, 47, 68] and text prompts [19, 36] for the optimization or the extension of latent space. Moreover, an idea that directly drags the face for the editing catches the wide attention. DragGAN [34] optimizes the latent space via dragging selected points on the image to the target positions. However, it is hard to preserve the facial identity when setting a far distance between the two points, preventing the DragGAN from large-scale editing. Despite the prominent performance of latent space manipulation, it still faces a challenge in balancing the identity preservation and editing amplitude. To further enhance the editing capability, several works [6, 13, 24] focus on the parameter space of a pre-trained generator. While these methods can achieve out-of-domain editing, they need to maintain a specific generator for each attribute manipulation, lacking efficiency. Compared to the methods mentioned above, our method is an intuitive way of dragging points to deform the 3D representations while improving the efficiency and preserving the identity.
|
| 64 |
+
|
| 65 |
+
# 3. Methods
|
| 66 |
+
|
| 67 |
+
Our proposed framework, FaceEdit3D, aims at multi-view consistent facial editing in shape, expression, and pose via warping-guided directional editing, as illustrated in Fig. 2. To this end, we first review the 3D-aware GAN that achieves high-resolution face rendering from multiple views (Sec. 3.1). Based on the 3D-aware generator, we propose a point-guided feature-space warping method that manipulates the inherent tri-plane representations while ensuring the 3D consistency (Sec. 3.2). However, directly editing the tri-plane may lead to distortions in the final rendered images. Therefore, we train a specifically designed encoder to project the warped renderings to the standardized latent space for photo-realistic editing results (Sec. 3.3). Finally, we delve into the mechanism of latent space and propose directional editing in latent space that enables the disentangled editing of facial shape, expression, and pose (Sec. 3.4).
|
| 68 |
+
|
| 69 |
+
# 3.1. Preliminaries on 3D-aware Face Generator
|
| 70 |
+
|
| 71 |
+
Our framework is built upon EG3D [4], one of the most powerful 3D-aware generative models that achieve photorealistic 3D face generation. The generator of EG3D introduces a tri-plane representation, which compactly encodes the geometry and appearance of a 3D face. Specifically, the tri-plane features can be denoted as $\mathbf{F} = \mathcal{G}(\mathbf{w})\in$ $\mathbb{R}^{3\times 32\times 256\times 256}$ , where $\mathbf{W}$ is a latent code. To render face images from a specific viewpoint, the features of 3D coordinates are sampled from the tri-plane features and a shallow decoder is leveraged to project the tri-plane feature $\mathbf{F}(x,y,z)\in \mathbb{R}^{32\times 3}$ into volume density $\sigma \in \mathbb{R}^1$ and color feature $c\in \mathbb{R}^{32}$ . Subsequently, a low-resolution fea
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(a) 3D-consistent Tri-plane Warp
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(b) The Pipeline of Our FaceEdit3D
|
| 78 |
+
Figure 2. Overview of our proposed FaceEdit3D. (a) A detailed illustration of our tri-plane warp. We project 2D key points onto the 3D face surface and then map them to each corresponding plane within a tri-plane representation. Afterward, we apply warping operations to each plane to achieve 3D-consistent editing. (b) The full pipeline of our FaceEdit3D. Given a source image $\mathbf{I}_s$ with its latent code $\mathbf{w}_s$ , we first perform the tri-plane warping on it and obtain the warped rendering $\hat{\mathbf{I}}_t$ . Subsequently, we utilize a warp-aware encoder to extract the latent codes $\mathbf{w}_s'$ and $\mathbf{w}_t'$ from the source image $\mathbf{I}_s$ and the warped renderings $\hat{\mathbf{I}}_t$ , respectively. Then, we employ the hierarchical latent direction to update the target latent code $\mathbf{w}_t$ . Finally, the edited facial image $\mathbf{I}_t$ can be synthesized via the updated latent code $\mathbf{w}_t$ .
|
| 79 |
+
|
| 80 |
+
ture map is generated via volume rendering and then upsampled to high-resolution images. The representation ability of tri-plane features has been verified by several recent works [7, 20, 24]. Therefore, to achieve 3D-consistent editing, we choose to operate directly on the tri-plane features.
|
| 81 |
+
|
| 82 |
+
# 3.2. Multi-view Consistent Face Warping
|
| 83 |
+
|
| 84 |
+
For 3D face editing, it is a flexible way for users to directly drag points on the rendered images. Different from 2D-level editing that limits to one specific viewpoint, 3D-level manipulation should support editing from an arbitrary viewpoint and achieve 3D-consistent editing effects. To achieve this, we propose a framework based on point-guided triplane warping, where users manipulate one or several points from a desirable viewpoint, and the tri-plane features are warped according to the point displacements.
|
| 85 |
+
|
| 86 |
+
Point Manipulation by Users. Ideally, users can directly modify arbitrary points in a rendered face to achieve editing. Nevertheless, the potential conflicts among excessive control points may lead to undesirable distortions of the facial structure during the joint point manipulation, consequently yielding results that deviate from realistic human appearances. To address this issue, we constrain the users to manipulate a set of meaningful 3D facial landmarks to guarantee a natural face structure.
|
| 87 |
+
|
| 88 |
+
Specifically, given a latent code $\mathbf{w}_s$ and a pre-trained EG3D generator $\mathcal{G}$ , the portrait is first rendered in the front view with camera intrinsic $\mathbf{K}$ . Then, 2D facial landmarks are detected by a pre-trained detector and projected on the facial surface to obtain 3D landmarks $\mathbf{P} = \{\mathbf{p}_0,\mathbf{p}_1,\dots ,\mathbf{p}_n\} \in \mathbb{R}^{n\times 3}$ , and $\mathbf{p}_i = \{\mathbf{p}_i^x,\mathbf{p}_i^y,\mathbf{p}_i^z\} \in \mathbb{R}^3$ . Consequently, users can render images from an arbitrary viewpoint with extrinsic $\mathbf{R} \in \mathbb{S}\mathbb{O}(3)$ and select any spe
|
| 89 |
+
|
| 90 |
+
cific points for editing. Take the selected point $\mathbf{p}_i$ as an example, we set the movement of the point $\Delta \mathbf{p}_i$ is perpendicular to the rendering direction. The updated 3D point $\mathbf{p}_i^{\prime}$ is represented as:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\mathbf {p} _ {i} ^ {\prime} = \mathbf {p} _ {i} + \mathbf {R} ^ {- 1} \mathbf {K} ^ {- 1} \mathbf {Z} \Delta \mathbf {p} _ {i}, \tag {1}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $\mathbf{Z}$ is the depth of the selected point in the pose $\mathbf{R}$ . After manipulating specific points within the facial structure, we obtain a set of new 3D landmarks $\mathbf{P}' = \{\mathbf{p}_0', \mathbf{p}_1', \dots, \mathbf{p}_n'\}$ .
|
| 97 |
+
|
| 98 |
+
Tri-plane Warping. After the users have manipulated the key points, we apply 3D warping on the tri-planes to edit the 3D representation. Individually considering each of the tri-plane features [7], we can extend the editing in 3D space onto three 2D planes to enhance efficiency. Therefore, we begin by projecting the 3D landmarks onto the three feature planes, and then individually apply a similar warping transformation on each of these feature planes, as illustrated in Fig. 2 (a). Take the $xy$ -plane $\mathbf{F}_{xy}$ as an example, given $n$ source projected points $\mathbf{P}^{xy} = \{\mathbf{p}_0^{xy}, \mathbf{p}_1^{xy}, \dots, \mathbf{p}_n^{xy}\} \in \mathbb{R}^{n \times 2}$ , $\mathbf{p}_i^{xy} = \{\mathbf{p}_i^x, \mathbf{p}_i^y\}$ and their target points $\hat{\mathbf{P}}^{xy} = \{\hat{\mathbf{p}}_0^{xy}, \hat{\mathbf{p}}_1^{xy}, \dots, \hat{\mathbf{p}}_n^{xy}\}$ , we employ thin-plate spline interpolation [9] to compute the grid sampler with:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
g (\mathbf {q}) = \sum_ {i = 1} ^ {n} w _ {i} \phi \left(\left\| \mathbf {q} - \hat {\mathbf {p}} _ {i} \right\|\right) + \mathbf {v} ^ {T} \mathbf {q} + \mathbf {b}, \tag {2}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $\phi (r) = r^2\log (r)$ is the kernel function and $g(\mathbf{q})$ provides the inverse mapping of the location $\mathbf{p}$ to the original plane coordinates $\mathbf{q}$ . The parameters $\mathbf{v},\mathbf{b}$ are the parameters to minimize a certain definition of curvature. Similarly, by applying such inverse mapping to all three planes, we complete the tri-plane warping and achieve the inherently
|
| 105 |
+
|
| 106 |
+
3D-consistent modification. Compared to the manipulation of the sampled 3D coordinate space [60, 62], our method directly manipulates the 3D representation, empowering to simultaneously edit from multiple viewpoints without additional steps.
|
| 107 |
+
|
| 108 |
+
# 3.3. Warp-Aware Encoding
|
| 109 |
+
|
| 110 |
+
After tri-plane warping, the editing results exhibit 3D consistent modification. However, directly applying warping operation on tri-plane features may not conform to the facial distribution in the latent space, leading to a severely distorted appearance. To solve this problem, our solution is to encode the distorted facial image $\hat{\mathbf{I}}_t$ into a standardized latent space that learns the natural counterpart $\mathbf{w}_t^\prime$ of the distorted face with an encoder $\mathcal{E}$ :
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\mathbf {w} _ {t} ^ {\prime} = \mathcal {E} (\hat {\mathbf {I}} _ {t}). \tag {3}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
To train the encoder, we sample images from the pretrained generator to generate image and latent code pairs. Specifically, the portrait $\mathbf{I}_s$ is generated from the randomly sampled latent code and the camera poses $\mathbf{c}$ . Subsequently, the portrait $\mathbf{I}_s$ is projected to latent code $\mathbf{w}_s^\prime$ by the encoder $\mathcal{E}$ , and then the corresponding image $\mathbf{I}_s^\prime$ is generated by the same frozen generator $\mathcal{G}$ and pose $\mathbf{c}$ . The optimization objective of the encoder is the combination of L1 Loss, LPIPS loss [61], and identity loss [10]:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {L} _ {o} = \mathcal {L} _ {1} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {L P I P S}} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {I D}} \left(\mathbf {I} _ {s}, \mathbf {I} _ {s} ^ {\prime}\right). \tag {4}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
Unfortunately, we find that the encoder trained with the aforementioned method poses difficulties in identifying subtle modifications due to the inherent complexity of 3D-aware generators. Hence, we further introduce the triplane warping as the data augmentation to enhance the overall perception of subtle edits. Similar to the above training pipeline, we apply the encoder onto the warped rendering $\hat{\mathbf{I}}_t$ to obtain the latent code $\mathbf{w}_t'$ , thus generating its inverted image $\mathbf{I}_t'$ . The loss is calculated between $\mathbf{I}_t'$ and $\hat{\mathbf{I}}_t$ :
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\mathcal {L} _ {w} = \mathcal {L} _ {1} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {L P I P S}} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right) + \mathcal {L} _ {\mathbf {I D}} \left(\hat {\mathbf {I}} _ {t}, \mathbf {I} _ {t} ^ {\prime}\right). \tag {5}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
Besides, following GOAE [58], we utilize a discriminator $\mathcal{D}$ to ensure the latent codes $\mathbf{w}_t^\prime$ and $\mathbf{w}_s^\prime$ in the standardized latent space:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\begin{array}{l} \mathcal {L} _ {d} = \mathbb {E} [ f (\mathcal {D} \left(\mathbf {w} _ {t} ^ {\prime}\right)) + f (\mathcal {D} \left(\mathbf {w} _ {s} ^ {\prime}\right)) ] (6) \\ + \mathbb {E} [ f (- \mathcal {D} (\mathbf {w} _ {c})) ] + \gamma | | \nabla \mathcal {D} (\mathbf {w} _ {c}) | | ^ {2}, (6) \\ \end{array}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where $f(x) = -\log (1 + \exp (-x))$ , and $\gamma$ is a hyperparameter in R1 regularization. $\mathbf{w}_{\mathbf{c}}$ are pre-sampled standardized latent codes by the frozen generator. The final objective linearly combines the aforementioned losses:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\mathcal {L} = \mathcal {L} _ {o} + \mathcal {L} _ {w} + \mathcal {L} _ {d}. \tag {7}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
After the training process, the edited rendering is projected into latent space and then passed to the generator to yield a more reasonable editing result in the target view $\mathbf{c}_t$ :
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\mathbf {I} _ {t} = \mathcal {G} \left(\mathbf {w} _ {t} ^ {\prime}, \mathbf {c} _ {t}\right). \tag {8}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
# 3.4. Directional Editing in Latent Space
|
| 147 |
+
|
| 148 |
+
Warp-aware encoder solves the problem of severely distorted appearance caused by the tri-plane warp, however, it additionally introduces identity bias into the latent codes as the encoder cannot faithfully inverse faces. Besides, it is still hard to handle the ambiguity during the point-manipulation. Therefore, we here propose directional editing learning to further overcome these two challenges.
|
| 149 |
+
|
| 150 |
+
To begin with, we adopt the difference between the latent codes that are extracted from the images before and after warping by the encoder as the direction guidance. In this way, we mitigate the identity bias and bypass the problem caused by the encoder. Furthermore, we follow Style-CLIP [36] to explore the semantics of layers in the $W+$ latent space of EG3D [4], empowering our method with the disentangled editing of the expression and shape. According to the hierarchical mechanism, we can obtain free editing results by applying editing directions in the variant layers to the same warping facial image, successfully avoiding the ambiguity caused by the tri-plane warp.
|
| 151 |
+
|
| 152 |
+
The full pipeline is shown in Fig. 2 (b). Given a latent code $\mathbf{w}_s$ and the frozen EG3D generator $\mathcal{G}$ , the facial triplane can be generated. Specifically, the warp-aware encoder projects these two images to standardized latent codes $\mathbf{w}_s'$ and $\mathbf{w}_t'$ with Eq. (3), respectively. The target edited latent code $\mathbf{w}_t$ can be calculated with:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\mathbf {w} _ {t} = \mathbf {w} _ {s} + H \left(\mathbf {w} _ {t} ^ {\prime} - \mathbf {w} _ {s} ^ {\prime}\right), \tag {9}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $H(\cdot)$ is a feature selection module for disentangling latent direction. Finally, the modified portrait $\mathbf{I}_t$ can be rendered from any perspective $\mathbf{c}_t$ with $\mathbf{I}_t = \mathcal{G}(\mathbf{w}_t,\mathbf{c}_t)$ .
|
| 159 |
+
|
| 160 |
+
# 4. Experiments
|
| 161 |
+
|
| 162 |
+
In this section, we evaluate the efficiency and the quality of our 3D-aware face editing model. We first introduce the implementation details of our work (Sec. 4.1). Subsequently, we compare our method with the SOTA 3D face editing methods qualitatively (Sec. 4.2) and quantitatively (Sec. 4.3). Then, we conduct ablation studies to analyze the effect of each component (Sec. 4.4). Finally, we introduce the potential applications of our method (Sec. 4.5).
|
| 163 |
+
|
| 164 |
+
# 4.1. Implementation Details
|
| 165 |
+
|
| 166 |
+
We build our approach on the EG3D [4] pre-trained on the FFHQ dataset [22]. We employ the Mediapipe [30] to detect 2D landmarks and select 29 points for user manipulation. To obtain 3D landmarks, we first detect 2D landmarks
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 3. Qualitative comparisons with current SOTA methods for 3D face shape and expression editing. (a), (b), and (c) are the results of synthetic samples, and (d) showcases the results of a real-world portrait.
|
| 170 |
+
|
| 171 |
+
in the frontal view, and then compute the 3D coordinates by the locations of maximum density value on their corresponding emitted rays. We adopt Swin-transformer [28] as the encoder structure to enhance the detail perception. In the encoder training, the standardized latent codes are sampled to generate the face images under random views, consisting of totally 100000 identities. We adopt the Adam optimizer [25] and set the learning rates as $1e - 4$ for both the encoder and the discriminator. All the implementations are based on the PyTorch and set up on Nvidia A6000 GPUs.
|
| 172 |
+
|
| 173 |
+
# 4.2. Qualitative Evaluation
|
| 174 |
+
|
| 175 |
+
We conduct a qualitative comparison between our work and several SOTA 3D face editing methods with intuitive manipulation, i.e., StyleGAN-NADA [13] guided by the text prompts and IDE-3D [46] controlled by the semantic maps. Besides, we also introduce the point-based warping approach into the qualitative comparison. We adopt similar editing objectives and use their official codes to ensure fairness. Fig. 3 shows the multi-view results of the shape and expression editing, demonstrating the superiority of our method on fine-grained modification. The warp can accomplish obvious editing, but it suffers from facial distortion. IDE-3D [46] achieve satisfied results in most cases. However, the coupling of different facial attributes in the semantic maps leads to changes beyond the target attributes. For
|
| 176 |
+
|
| 177 |
+
instance, the baby in Fig. 3 (c) shows the shift of age and identity when trying to elongate his chin. Besides, IDE-3D only supports single-view editing, limiting its availability. StyleGAN-NADA [13] fails to edit the facial shape based on the EG3D despite its great success in style transfer and texture editing. In contrast, our method supports the user to simultaneously manipulate the face from multiple views and enables intuitive editing for facial shapes, expressions, and poses without the sacrifice of identity and detail. In addition to the editing quality, our method has another advantage that it does not require additional training for generative models, demonstrating its generalization.
|
| 178 |
+
|
| 179 |
+
Furthermore, we also compare our method with a recent 2D method, DragGAN [34], which employs a similar point-guided operation to ours. Since DragGAN is limited to 2D editing, we compare the results in two aspects, i.e., fixed view editing and novel view synthesis, as shown in Fig. 4. In the aspect of fixed-view editing, the results of DragGAN [34] in Fig. 4 (a) show a tendency to open the mouth and change the identity when shortening the nose, although a mask limiting the editable region is applied. In the aspect of novel view synthesis, DragGAN severely changes the identity due to ambiguous point dragging in Fig. 4 (b). Compared to DragGAN, our method succeeds in achieving the expected editing target while maintaining the identity and irrelevant parts unchanged.
|
| 180 |
+
|
| 181 |
+
<table><tr><td>Methods</td><td>Scheme</td><td>Inference Time (s)↓</td><td>MSEi↑</td><td>MSEo↓</td><td>MSEi / MSEo↑</td><td>ID Consistency↑</td></tr><tr><td>DragGAN [34]</td><td>2D</td><td>5.231</td><td>1.992</td><td>0.224</td><td>8.893</td><td>0.579</td></tr><tr><td>Ours</td><td>2D</td><td>0.356</td><td>2.049</td><td>0.186</td><td>11.016</td><td>0.716</td></tr><tr><td>Our warp</td><td>3D</td><td>0.269</td><td>2.455</td><td>0.328</td><td>7.485</td><td>0.707</td></tr><tr><td>IDE-3D [46]</td><td>3D</td><td>0.383</td><td>1.841</td><td>0.987</td><td>1.865</td><td>0.649</td></tr><tr><td>Ours</td><td>3D</td><td>0.624</td><td>1.679</td><td>0.342</td><td>4.909</td><td>0.712</td></tr></table>
|
| 182 |
+
|
| 183 |
+
Table 2. Quantitative comparison with several face editing methods on efficiency and effectiveness. The best results are labeled in bold except for our direct warp due to its distortion results. The unit of $\mathrm{MSE}_i$ and $\mathrm{MSE}_o$ are $10^{-2}$ .
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Figure 4. Qualitative comparisons with DragGAN [34] on portrait editing. Red and blue points represent the source and target points in the manipulations, respectively. The semi-transparent region indicates the mask used for DragGAN, while not in our method.
|
| 187 |
+
|
| 188 |
+
# 4.3. Quantitative Evaluation
|
| 189 |
+
|
| 190 |
+
We also conduct quantitative experiments to verify the efficiency and effectiveness of our method, as shown in Tab. 2. We adopt editing time as the metric to evaluate the efficiency because it severely influences the user experiences. As shown, DragGAN [34] spends a large amount of time on latent optimization, resulting in lower efficiency. IDE-3D [46] and our method exhibit similar efficiency in supporting real-time editing. Despite the fastest method, the method of direct warp causes facial distortion, and thus we exclude it from the comparison.
|
| 191 |
+
|
| 192 |
+
Furthermore, to assess the capability of disentangled editing, we measure the pixel-wise mean square error (MSE) inside and outside the target editing regions as the metric. The main objective is to successfully edit the target regions while preventing the outside regions from modification. As shown, our approach achieves better editing disentanglement than IDE3D [46] with minimized ratio of $\mathrm{MSE}_i$ and $\mathrm{MSE}_o$ . It is worth noting that the editability of 3D GANs is inferior to that of 2D GANs, and thus our method falls behind the DragGAN [34]. Considering the efficiency and the ability to multi-view editing of our method, the gap between ours and the DragGAN is acceptable. To fairly compare these two methods without the interference of base generators, we further extend our method to the same 2D generator and it performs better than DragGAN [34] in this setting. Additionally, we also compare the identity similarity. The results indicate that our method can better maintain
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
Figure 5. The ablation study of our loss functions for training the encoder. The first row aims to widen the double eyelids while keeping the eyes open, and the second is to lengthen the bangs. The numbers in the corners represent the identity similarity measured by ArcFace [10]. Please zoom-in for detailed observation.
|
| 196 |
+
|
| 197 |
+
the identity character than other methods.
|
| 198 |
+
|
| 199 |
+
# 4.4. Ablation Study
|
| 200 |
+
|
| 201 |
+
Effectiveness of Loss Functions. We investigate the effectiveness of each loss function in the encoder training process, as depicted in Fig. 5. The $\mathcal{L}_w$ introduced by the warp-assisted data augmentation facilitates the accurate identification for user's manipulations, and the $\mathcal{L}_d$ helps to maintain identity information. The combination of them achieves the best editing results.
|
| 202 |
+
|
| 203 |
+
Effectiveness of Directional Latent Editing. We conduct an ablation study to verify the effectiveness of our directional latent editing. We begin with applying tri-plane warping on source identities to obtain the warped results. Subsequently, we extract the directions of different layer groups, i.e., shape direction, expression direction, and the combined directions, respectively. Fig. 6 shows that the individual directional latent code has the capacity to disentangle the attributes, while the combination of them can realize integrated editing. However, directly mapping warped rendering to latent space without our directional latent module results in identity shifting and detail deficiency. These results can verify the effectiveness of our directional latent editing.
|
| 204 |
+
|
| 205 |
+
# 4.5. Applications
|
| 206 |
+
|
| 207 |
+
Generalization of Learned Latent Directions. The editing direction learned for one face can be generalized to other instances, and we can further control the degree along the
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 6. The ablation study of our directional editing. "w/o Dir." represents results generated by directly projecting the warped results to latent space.
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
Figure 7. The interpolated editing results along the directions learned in the case of Fig. 3 (a) and (d), i.e., "wider face" and "close mouth" respectively. It shows that the learned editing direction in one face can be generalized to other instances.
|
| 214 |
+
|
| 215 |
+
direction to linearly interpolate the editing results. Fig. 7 shows the interpolation results guided by the directions learned in the cases of Fig. 3, i.e., wider face and closed mouth. With the degree rising from -2.0 to 2.0, both of the two identities show a gradual trend to change along their directions, although the directions are initially learned for other cases, demonstrating the generalization of these learned latent directions.
|
| 216 |
+
|
| 217 |
+
Continuous Editing. Continuous editing is important to real-world applications. Therefore, we conduct an experiment to show our capability of overlying modification. Fig. 8 shows the results with multiple editing targets, i.e., smaller eyes, closed mouth, smaller nose, and wider face. The natural and ID-consistent results demonstrate the effectiveness of our method of continuous editing.
|
| 218 |
+
|
| 219 |
+
Generalization to Other Generators. To show the generalized application of our method, we extend it to 3D cat editing and 2D car editing. We introduce our method to the pre-trained EG3D [4] on AFHQ Cats [8] dataset and StyleGAN [23] trained on Stanford Cars [26] dataset, respectively. As shown in Fig. 9, our approach can also successfully manipulate the 3D cats and 2D cars according to the user's point-based instructions.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Figure 8. We showcase the mixing results with multiple attributes, demonstrating the continuous editing ability of our method.
|
| 225 |
+
Figure 9. The extension of our method to cat and car editing.
|
| 226 |
+
|
| 227 |
+
# 5. Conclusion
|
| 228 |
+
|
| 229 |
+
In this paper, we propose FaceEdit3D, an intuitive method to edit the 3D facial shape and expression from any perspective. Our approach involves a tri-plane warping to ensure the inherent 3D-consistent editing. To mitigate facial distortions led by the warping, we train a warp-aware encoder to project the warped face into standardized distribution and further explore the hierarchical mechanism in latent space to achieve disentangled editing. Extensive experiments demonstrate the effectiveness and efficiency of our method. The additional applications also show the generalization and potential of our method across different applications. To sum up, our method provides a brand new way to manipulate the 3D representation, opening up new avenues for rapid and convenient real-image editing.
|
| 230 |
+
|
| 231 |
+
Limitations. Since our method is based on warping the 3D representation, it is hard for our work to achieve texture editing and some semantic editing, such as wearing glasses. Broader Impacts. Despite not our intention, our 3D-aware facial editing capability could potentially be abused. We are committed to privacy protection, preventing the misuse of facial editing for criminal purposes.
|
| 232 |
+
|
| 233 |
+
# Acknowledgements
|
| 234 |
+
|
| 235 |
+
This work was supported in part by NSFC (62201342, 62101325), and Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102).
|
| 236 |
+
|
| 237 |
+
# References
|
| 238 |
+
|
| 239 |
+
[1] Rameen Abdal, Peihao Zhu, Niloy J Mitra, and Peter Wonka. Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. TOG, pages 1-21, 2021. 2, 3
|
| 240 |
+
[2] Sizhe An, Hongyi Xu, Yichun Shi, Guoxian Song, Umit Y Ogras, and Linjie Luo. Panohed: Geometry-aware 3d fullhead synthesis in 360deg. In CVPR, pages 20950-20959, 2023. 1, 2, 3
|
| 241 |
+
[3] Eric R Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In CVPR, pages 5799-5809, 2021. 3
|
| 242 |
+
[4] Eric R Chan, Connor Z Lin, Matthew A Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas J Guibas, Jonathan Tremblay, Sameh Khamis, et al. Efficient geometry-aware 3d generative adversarial networks. In CVPR, pages 16123-16133, 2022. 1, 2, 3, 5, 8
|
| 243 |
+
[5] Anpei Chen, Ruiyang Liu, Ling Xie, Zhang Chen, Hao Su, and Jingyi Yu. Sofgan: A portrait image generator with dynamic styling. TOG, pages 1-26, 2022. 3
|
| 244 |
+
[6] Zhuo Chen, Xudong Xu, Yichao Yan, Ye Pan, Wenhan Zhu, Wayne Wu, Bo Dai, and Xiaokang Yang. Hyperstyle3d: Text-guided 3d portrait stylization via hypernetworks. arXiv preprint arXiv:2304.09463, 2023. 2, 3
|
| 245 |
+
[7] Yuhao Cheng, Yichao Yan, Wenhan Zhu, Ye Pan, Bowen Pan, and Xiaokang Yang. Head3d: Complete 3d head generation via tri-plane feature distillation. arXiv preprint arXiv:2303.15892, 2023. 4
|
| 246 |
+
[8] Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, pages 8188-8197, 2020. 8
|
| 247 |
+
[9] Forrester Cole, David Belanger, Dilip Krishnan, Aaron Sarna, Inbar Mosseri, and William T Freeman. Synthesizing normalized faces from facial identity features. In CVPR, pages 3703-3712, 2017. 4
|
| 248 |
+
[10] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, pages 4690-4699, 2019. 5, 7
|
| 249 |
+
[11] Yu Deng, Jiaolong Yang, Jianfeng Xiang, and Xin Tong. Gram: Generative radiance manifolds for 3d-aware image generation. In CVPR, pages 10673-10683, 2022. 1, 2
|
| 250 |
+
[12] Yuki Endo. User-controllable latent transformer for stylegan image layout editing. In Computer Graphics Forum, pages 395-406, 2022. 2
|
| 251 |
+
[13] Rinon Gal, Or Patashnik, Haggai Maron, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Stylegan-nada: Clip-guided domain adaptation of image generators. TOG, pages 1-13, 2022. 2, 3, 6
|
| 252 |
+
[14] Stephan J Garbin, Marek Kowalski, Virginia Estellers, Stanislaw Szymanowicz, Shideh RezaEIFar, Jingjing Shen, Matthew Johnson, and Julien Valentin. Voltemorph: Realtime, controllable and generalisable animation of volumetric representations. arXiv preprint arXiv:2208.00949, 2022. 3
|
| 253 |
+
[15] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In ICLR, 2021. 1, 2, 3
|
| 254 |
+
|
| 255 |
+
[16] Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. NeurIPS, pages 9841–9850, 2020. 1, 2
|
| 256 |
+
[17] Clément Jambon, Bernhard Kerbl, Georgios Kopanas, Stavros Diolatzis, George Drettakis, and Thomas Leimkuhler. Nerfshop: Interactive editing of neural radiance fields. CGIT, 6(1), 2023. 3
|
| 257 |
+
[18] Kaiwen Jiang, Shu-Yu Chen, Feng-Lin Liu, Hongbo Fu, and Lin Gao. Nerfaceediting: Disentangled face editing in neural radiance fields. In SIGGRAPH Asia, pages 1-9, 2022. 1, 2
|
| 258 |
+
[19] Yuming Jiang, Ziqi Huang, Xingang Pan, Chen Change Loy, and Ziwei Liu. Talk-to-edit: Fine-grained facial editing via dialog. In ICCV, pages 13799-13808, 2021. 3
|
| 259 |
+
[20] Wonjoon Jin, Nuri Ryu, Geonung Kim, Seung-Hwan Baek, and Sunghyun Cho. Dr. 3d: Adapting 3d gans to artistic drawings. In SIGGRAPH Asia, pages 1-8, 2022. 4
|
| 260 |
+
[21] Kacper Kania, Stephan J Garbin, Andrea Tagliasacchi, Virginia Estellers, Kwang Moo Yi, Julien Valentin, Tomasz Trzciński, and Marek Kowalski. Blendfields: Few-shot example-driven facial modeling. In CVPR, pages 404-415, 2023. 3
|
| 261 |
+
[22] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, pages 4401-4410, 2019. 1, 5
|
| 262 |
+
[23] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, pages 8110-8119, 2020. 1, 8
|
| 263 |
+
[24] Gwanghyun Kim and Se Young Chun. Datid-3d: Diversitypreserved domain adaptation using text-to-image diffusion for 3d generative model. In CVPR, pages 14203–14213, 2023. 3, 4
|
| 264 |
+
[25] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6
|
| 265 |
+
[26] Jonathan Krause, Michael Stark, Jia Deng, and Li Fei-Fei. 3d object representations for fine-grained categorization. In ICCV, pages 554–561, 2013. 8
|
| 266 |
+
[27] Tianye Li, Timo Bolkart, Michael J Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4d scans. TOG, pages 194-1, 2017. 3
|
| 267 |
+
[28] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 6
|
| 268 |
+
[29] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J Black. Smpl: A skinned multiperson linear model. In Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pages 851-866. 2023. 3
|
| 269 |
+
[30] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuoling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 5
|
| 270 |
+
[31] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, pages 99-106, 2020. 2
|
| 271 |
+
|
| 272 |
+
[32] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, pages 11453–11464, 2021. 1, 2, 3
|
| 273 |
+
[33] Roy Or-El, Xuan Luo, Mengyi Shan, Eli Shechtman, Jeong Joon Park, and Ira Kemelmacher-Shlizerman. Stylesdf: High-resolution 3d-consistent image and geometry generation. In CVPR, pages 13503-13513, 2022. 1, 2, 3
|
| 274 |
+
[34] Xingang Pan, Ayush Tewari, Thomas Leimkuhler, Lingjie Liu, Abhinitra Meka, and Christian Theobalt. Drag your gan: Interactive point-based manipulation on the generative image manifold. In ASIGGRAPH, pages 1-11, 2023. 2, 3, 6, 7
|
| 275 |
+
[35] Keunhong Park, Utkarsh Sinha, Jonathan T Barron, Sofien Bouaziz, Dan B Goldman, Steven M Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, pages 5865-5874, 2021. 3
|
| 276 |
+
[36] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. Styleclip: Text-driven manipulation of stylegan imagery. In CVPR, pages 2085–2094, 2021. 1, 2, 3, 5
|
| 277 |
+
[37] Yicong Peng, Yichao Yan, Shengqi Liu, Yuhao Cheng, Shanyan Guan, Bowen Pan, Guangtao Zhai, and Xiaokang Yang. Cagenerf: Cage-based neural radiance field for generalized 3d deformation and animation. NeurIPS, pages 31402-31415, 2022. 3
|
| 278 |
+
[38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 2
|
| 279 |
+
[39] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2
|
| 280 |
+
[40] Shunsuke Saito, Jinlong Yang, Qianli Ma, and Michael J Black. Scintimate: Weakly supervised learning of skinned clothed avatar networks. In CVPR, pages 2886-2897, 2021. 3
|
| 281 |
+
[41] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In NIPS, 2020. 1, 2, 3
|
| 282 |
+
[42] Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. In CVPR, pages 1532-1540, 2021. 2, 3
|
| 283 |
+
[43] Yujun Shen, Ceyuan Yang, Xiaou Tang, and Bolei Zhou. Interfacegan: Interpreting the disentangled face representation learned by gans. TPAMI, pages 2004-2018, 2020. 1, 2, 3
|
| 284 |
+
[44] Enis Simsar, Alessio Tonioni, Evin Pinar Ornek, and Federico Tombari. Latentswap3d: Semantic edits on 3d image gans. In ICCV, pages 2899-2909, 2023. 2
|
| 285 |
+
[45] Ivan Skorokhodov, Sergey Tulyakov, Yiqun Wang, and Peter Wonka. Epigraf: Rethinking training of 3d gans. NeurIPS, pages 24487-24501, 2022. 1, 2, 3
|
| 286 |
+
[46] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled edit
|
| 287 |
+
|
| 288 |
+
ing for high-resolution 3d-aware portrait synthesis. ToG, pages 1-10, 2022. 1, 2, 3, 6, 7
|
| 289 |
+
[47] Jingxiang Sun, Xuan Wang, Yong Zhang, Xiaoyu Li, Qi Zhang, Yebin Liu, and Jue Wang. Fenerf: Face editing in neural radiance fields. In CVPR, pages 7672-7682, 2022. 3
|
| 290 |
+
[48] Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In CVPR, pages 20991-21002, 2023. 1, 2
|
| 291 |
+
[49] Ayush Tewari, Mohamed Elgharib, Gaurav Bharaj, Florian Bernard, Hans-Peter Seidel, Patrick Pérez, Michael Zollhofer, and Christian Theobalt. Stylerig: Rigging stylegan for 3d control over portrait images. In CVPR, pages 6142-6151, 2020. 2
|
| 292 |
+
[50] Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, pages 9786-9796, 2020. 2, 3
|
| 293 |
+
[51] Tengfei Wang, Bo Zhang, Ting Zhang, Shuyang Gu, Jianmin Bao, Tadas Baltrusaitis, Jingjing Shen, Dong Chen, Fang Wen, Qifeng Chen, et al. Rodin: A generative model for sculpting 3d digital avatars using diffusion. In CVPR, pages 4563-4573, 2023. 1
|
| 294 |
+
[52] Sijing Wu, Yichao Yan, Yunhao Li, Yuhao Cheng, Wenhan Zhu, Ke Gao, Xiaobo Li, and Guangtao Zhai. Ganhead: Towards generative animatable neural head avatars. In CVPR, pages 437-447, 2023. 3
|
| 295 |
+
[53] Jianfeng Xiang, Jiaolong Yang, Yu Deng, and Xin Tong. Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. In ICCV, pages 2195-2205, 2023. 1, 2, 3
|
| 296 |
+
[54] Tianhan Xu and Tatsuya Harada. Deforming radiance fields with cages. In ECCV, pages 159-175, 2022. 3
|
| 297 |
+
[55] Yinghao Xu, Sida Peng, Ceyuan Yang, Yujun Shen, and Bolei Zhou. 3d-aware image synthesis via learning structural and textural representations. In CVPR, 2022. 1, 2
|
| 298 |
+
[56] Yan Yichao, Cheng Yuhao, Chen Zhuo, Peng Yicong, Wu Sijing, Zhang Weitian, Li Junjie, Li Yixuan, Gao Jingnan, Zhang Weixia, Zhai Guangtao, and Yang Xiaokang. A survey on generative 3d digital humans based on neural networks: representation, rendering, and learning. _SCIENTIA SINICA Informationis_, pages 1858–, 2023. 1
|
| 299 |
+
[57] Yu-Jie Yuan, Yang-Tian Sun, Yu-Kun Lai, Yuewen Ma, Rongfei Jia, and Lin Gao. Nerf-editing: geometry editing of neural radiance fields. In CVPR, pages 18353-18364, 2022. 3
|
| 300 |
+
[58] Ziyang Yuan, Yiming Zhu, Yu Li, Hongyu Liu, and Chun Yuan. Make encoder great again in 3d gan inversion through geometry and occlusion-aware encoding. In ICCV, pages 2437-2447, 2023. 5
|
| 301 |
+
[59] Chi Zhang, Yiwen Chen, Yijun Fu, Zhenglin Zhou, Gang Yu, Billzb Wang, Bin Fu, Tao Chen, Guosheng Lin, and Chunhua Shen. StyleAvatar3d: Leveraging image-text diffusion models for high-fidelity 3d avatar generation. arXiv preprint arXiv:2305.19012, 2023. 2
|
| 302 |
+
[60] Jianfeng Zhang, Zihang Jiang, Dingdong Yang, Hongyi Xu, Yichun Shi, Guoxian Song, Zhongcong Xu, Xinchao Wang,
|
| 303 |
+
|
| 304 |
+
and Jiashi Feng. Avatargen: a 3d generative model for animatable human avatars. In ECCV, pages 668-685. Springer, 2022. 5
|
| 305 |
+
[61] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, pages 586-595, 2018. 5
|
| 306 |
+
[62] Xuanmeng Zhang, Jianfeng Zhang, Rohan Chacko, Hongyi Xu, Guoxian Song, Yi Yang, and Jiashi Feng. Getavatar: Generative textured meshes for animatable human avatars. In ICCV, pages 2273-2282, 2023. 5
|
| 307 |
+
[63] Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In CVPR, pages 13545-13555, 2022. 3
|
| 308 |
+
[64] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips-3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis. arXiv preprint arXiv:2110.09788, 2021. 1, 2, 3
|
| 309 |
+
[65] Jiapeng Zhu, Ruili Feng, Yujun Shen, Deli Zhao, Zheng-Jun Zha, Jingren Zhou, and Qifeng Chen. Low-rank subspaces in gans. NeurIPS, pages 16648-16658, 2021. 2
|
| 310 |
+
[66] Jiapeng Zhu, Yujun Shen, Yinghao Xu, Deli Zhao, and Qifeng Chen. Region-based semantic factorization in gans. In ICML, pages 27612-27632, 2022.
|
| 311 |
+
[67] Jiapeng Zhu, Ceyuan Yang, Yujun Shen, Zifan Shi, Bo Dai, Deli Zhao, and Qifeng Chen. Linkgan: Linking gan latents to pixels for controllable image synthesis. In ICCV, pages 7656-7666, 2023. 2
|
| 312 |
+
[68] Peihao Zhu, Rameen Abdal, Yipeng Qin, and Peter Wonka. Sean: Image synthesis with semantic region-adaptive normalization. In CVPR, pages 5104-5113, 2020. 3
|
| 313 |
+
[69] Peiye Zhuang, Oluwasanmi Koyejo, and Alexander G Schwing. Enjoy your editing: Controllable gans for image editing via latent space navigation. arXiv preprint arXiv:2102.01187, 2021.3
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af4e7d7c089610670401083170751adfc9e69d28bec83e0ea306a4f5f546dfe1
|
| 3 |
+
size 677753
|
3dawarefaceeditingviawarpingguidedlatentdirectionlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0efda2089fa9262e2930e786877b275b2d6c462dd0c229a298aa0eecff2e5ac
|
| 3 |
+
size 396449
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d217fa2e92c971bb45e2438f018f110fc06312bc2c6549ba1044566b2db3e81
|
| 3 |
+
size 77381
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eba6a3e3c38f42a51f217832e3919bd944fc4893bd209bf89b6666055b52ce76
|
| 3 |
+
size 93604
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/3c7a0eb0-f8af-4281-92e0-1ce5ec55d350_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ad91f98e89e47eb43ca423d633acc5855d7ba57c7efd5b55394622b1a0c8b05
|
| 3 |
+
size 5391341
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/full.md
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Building Reconstruction from Monocular Remote Sensing Images with Multi-level Supervisions
|
| 2 |
+
|
| 3 |
+
Weijia Li $^{1*}$ , Haote Yang $^{2*}$ , Zhenghao Hu $^{1}$ , Juepeng Zheng $^{1}$ , Gui-Song Xia $^{3}$ , Conghui He $^{2,4\dagger}$ , Sun Yat-Sen University, Shanghai AI Laboratory, Wuhan University, SenseTime Research
|
| 4 |
+
|
| 5 |
+
{liweij29, zhengjp8}@mail.sysu.edu.cn, {yanghaote, heconghui}@pjlab.org.cn, huzhh9@mail2.sysu.edu.cn, guisong.xia@whu.edu.cn
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
3D building reconstruction from monocular remote sensing images is an important and challenging research problem that has received increasing attention in recent years, owing to its low cost of data acquisition and availability for large-scale applications. However, existing methods rely on expensive 3D-annotated samples for fully-supervised training, restricting their application to large-scale cross-city scenarios. In this work, we propose MLS-BRN, a multi-level supervised building reconstruction network that can flexibly utilize training samples with different annotation levels to achieve better reconstruction results in an end-to-end manner. To alleviate the demand on full 3D supervision, we design two new modules, Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as new tasks and training strategies for different types of samples. Experimental results on several public and new datasets demonstrate that our proposed MLS-BRN achieves competitive performance using much fewer 3D-annotated samples, and significantly improves the footprint extraction and 3D reconstruction performance compared with current state-of-the-art. The code and datasets of this work will be released at https://github.com/opendatalabMLS-BRN.git.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
3D building reconstruction is a fundamental task for large-scale city modeling and has received increasing attention in recent studies. Among these studies, monocular 3D building reconstruction has become a promising and economic solution for large-scale real-world applications, owing to its lower data acquisition cost and larger data coverage compared to multi-view stereo imagery and LiDAR data [6, 31]. Meanwhile, the limited information of monocular images as well as the diversity of building structures also result in
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Training samples of different annotation levels
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
Monocular 3D building reconstruction
|
| 24 |
+
Figure 1. Our proposed method achieves 3D building reconstruction by training samples of different annotation levels. Large quantity of samples only include building footprint annotations, whereas a small quantity of samples contain extra roof-to-footprint offset and building height annotations.
|
| 25 |
+
|
| 26 |
+
great challenges for large-scale 3D building reconstruction.
|
| 27 |
+
|
| 28 |
+
Inspired by the progress of supervised monocular depth estimation methods, deep neural networks have been broadly applied to monocular 3D building reconstruction studies. Most studies utilize building footprints or other types of semantic labels as prior information to facilitate building height estimation from near-nadir images [15, 24, 25, 29, 37]. Off-nadir images, by contrast, constitute a larger proportion of the remote sensing images and provide additional useful information for building height estimation, which have demonstrated significant potential in several recent studies [4, 5, 19, 32, 33]. Some studies designed geocentric pose estimation task considering the parallax effect of building roof and footprint [4, 5], aiming at estimating the height values instead of reconstruct a 3D model. Other studies leveraged the relation between different components of a building instance (e.g. roof, footprint,
|
| 29 |
+
|
| 30 |
+
and facade) as well as the offset between roof and footprint, which has proven to be an effective solution for 3D building reconstruction and accurate extraction of building footprints [19, 32].
|
| 31 |
+
|
| 32 |
+
In general, existing monocular building reconstruction methods are designed for fully-supervised learning, requiring a large number of fully-annotated 3D labels for network training. However, due to the expensive annotation cost, the available datasets for 3D building reconstruction are still very insufficient, restricting existing 3D reconstruction methods to single city or single dataset scenarios. By contrast, owing to the low annotation cost and the increase of open map data, public building footprints have an extremely large coverage and quantity. Additionally, existing building datasets provide different levels of annotations, such as footprint only, footprint and pixel-wise height [4], footprint and offset vector [19, 32], etc. The large-scale 2D footprints and different levels of annotated datasets can provide new opportunities for enlarging 3D building reconstruction application scenarios and reducing the annotation cost if they are effectively utilized.
|
| 33 |
+
|
| 34 |
+
In this work, we propose MLS-BRN, a Multi-Level Supervised Building Reconstruction Network based on monocular remote sensing images, which is a unified and flexible framework that is capable of utilizing the training samples with different annotation levels. To alleviate the demand on 3D annotations and enhance the building reconstruction performance, we design new tasks regarding the meta information of off-nadir images and two new modules, i.e., Pseudo Building Bbox Calculator and Roof-Offset guided Footprint Extractor, as well as a new training strategy based on different types of samples. Experimental results on several public and new datasets demonstrate that our method achieves competitive performance when only using a small proportion of 3D-annotated samples, and significantly improves the building segmentation and height estimation performance compared with current state-of-the-art. Our main contributions are summarized as follows:
|
| 35 |
+
|
| 36 |
+
- We design MLS-BRN, a multi-level supervised building reconstruction network, which consists of new tasks and modules to enhance the relation between different components of a building instance and alleviate the demand on 3D annotations.
|
| 37 |
+
- We propose a multi-level training strategy that enables the training of MLS-BRN with different supervision levels to further improve the 3D reconstruction performance.
|
| 38 |
+
- We extend the monocular building reconstruction datasets to more cities. Comprehensive experiments under different settings demonstrate the potential of MLS-BRN in large-scale cross-city scenarios.
|
| 39 |
+
|
| 40 |
+
# 2. Related work
|
| 41 |
+
|
| 42 |
+
# 2.1. Building footprint extraction
|
| 43 |
+
|
| 44 |
+
Building footprint extraction is an important prerequisite for monocular 3D building reconstruction. Various instance and semantic segmentation networks have been broadly applied to building extraction tasks. Many studies utilize multi-task segmentation network to improve the building segmentation performance. For instance, Yuan [35] proposed the signed distance representation for building footprint extraction, achieving better performance compared with the single-task fully-connected network. Similarly, in [24], a modified signed distance function was introduced and jointly learned with other tasks for predicting building footprint outlines and heights. To improve the geometry shapes of building extraction results, several methods directly predicted the vertices of a building polygon based on Recurrent Neural Network or Graph Neural Network [22, 36, 39], or combined the pixel-based multi-task segmentation network with a graph-based polygon refinement network using a rule-based module [20]. In addition, some recent studies converted building footprint extraction into roof segmentation and roof-to-footprint offset estimation tasks, which achieved promising performance for building footprint extraction, especially for high-rise buildings in off-nadir images [19, 32].
|
| 45 |
+
|
| 46 |
+
In summary, most existing methods directly extract the building footprints and perform worse for high-rise buildings in off-nadir images. Offset-based methods can effectively alleviate this problem, but the expensive offset annotation efforts and the post-processing process are still inevitable. On the contrary, our work proposes a multi-level supervised solution that is capable of leveraging different types of samples to reduce the demand for offset annotation, achieving promising footprint extraction results in an end-to-end manner.
|
| 47 |
+
|
| 48 |
+
# 2.2. Monocular 3D building reconstruction
|
| 49 |
+
|
| 50 |
+
Inspired by the progress of monocular depth estimation, deep neural networks have been widely used for monocular building height estimation in recent studies [8, 18, 33]. Most of these studies are designed for height estimation from near-nadir images, in which the building roof and footprint are almost overlapped. Some methods used an encoder-decoder network to regress the height values [25], or used a generative adversarial network to simulate a height map [9]. Moreover, the semantic labels have been utilized as effective priors in many existing methods considering the limited information provided from the near-nadir images for height estimation. Some studies designed a multitask network for joint footprint extraction and height estimation [8, 29, 37], while others exploit the semantic labels as prior information for height estimation [15]. In actual
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Figure 2. An overview of our proposed method. Taking a monocular remote sensing image as input, our MLS-BRN generates a set of building bboxes, roof-to-footprint offsets, building heights, and pixel-wise roof masks. The predicted roof masks and their corresponding offsets are further integrated to predict pixel-wise footprint masks. The predicted footprint mask and building height are used to produce the final vectorized 3D model. Two novel modules are introduced: (1) the ROFE predicts footprint masks guided by the predicted roof masks and offsets; (2) the PBC predicts off-nadir and offset angles to calculate pseudo building bboxes for buildingbbox-unknown samples.
|
| 54 |
+
|
| 55 |
+
scenarios, off-nadir images constitute a large proportion of the remote sensing images, in which the parallax effect of roof and footprint results in more challenges for extracting footprints but provides additional information for height estimation as well. Some recent studies [4, 5] design methods to learn the geocentric pose of buildings in off-nadir images for monocular height estimation [28], while others leverage the offset between building roof and footprint and the relation between different components to reconstruct a 3D building model [19, 32].
|
| 56 |
+
|
| 57 |
+
In summary, the monocular building reconstruction methods in existing studies require expensive and fully-annotated 3D labels for supervised learning. Our proposed method, by contrast, is a unified and flexible framework for 3D building reconstruction with different supervision levels, which effectively reduces the demand for the large-scale 3D annotations.
|
| 58 |
+
|
| 59 |
+
# 2.3. Monocular 3D reconstruction with fewer labels
|
| 60 |
+
|
| 61 |
+
In monocular 3D reconstruction in the general computer vision domain, several methods have been proposed for reducing the 3D annotation demand via weakly-supervised or semi-supervised learning [3, 11, 14, 16, 26]. In Yang et al. [34], a unified framework combining two types of supervisions was proposed, i.e., a small number of camera pose annotations and a large number of unlabeled images. In Neverova et al. [27], an intermediate representation containing important topological and structural information of hand was introduced to enable the weakly-supervised training for hand pose estimation. Concurrently, Gwak et al. [10] effectually leveraged a weak supervision type, i.e., foreground mask, as a substitute for costly 3D CAD annota
|
| 62 |
+
|
| 63 |
+
tions, which incorporates a raytrace pooling layer to enable perspective projection and backpropagation.
|
| 64 |
+
|
| 65 |
+
In contrast to the aforementioned studies, our proposed method leverages prior knowledge about the 3D structure of a building instance and the monocular remote sensing image, including the relation between roof, footprint, height, offset angle, and off-nadir angle, enabling multi-level supervised 3D reconstruction with fewer annotation efforts.
|
| 66 |
+
|
| 67 |
+
# 3. Methods
|
| 68 |
+
|
| 69 |
+
# 3.1. Problem statement
|
| 70 |
+
|
| 71 |
+
Given an off-nadir remote sensing image $I$ that includes buildings $B = \{b_{1}, b_{2}, \ldots, b_{N}\}$ , the objective of monocular 3D building reconstruction is to identify all the footprints $F = \{f_{1}, f_{2}, \ldots, f_{N}\}$ and roofs $R = \{r_{1}, r_{2}, \ldots, r_{N}\}$ corresponding to $B$ . The difficulty is that the footprints of buildings may be partially visible from an off-nadir viewing angle. Thus, previous studies, including [19] and [32], typically solve this issue by training a deep neural network with samples annotated with both $F$ and roof-to-footprint offsets $\vec{V} = \{v_{1}, v_{2}, \ldots, v_{N}\}$ .
|
| 72 |
+
|
| 73 |
+
However, the cost of annotating remote sensing images is still high, particularly for offset annotations. Therefore, we suggest addressing this issue by training a deep model that effectively uses samples containing both $F$ and $\vec{V}$ annotations, alongside samples only annotated with $F$ .
|
| 74 |
+
|
| 75 |
+
To facilitate training with offset-unknown samples, two tasks are included; one for predicting the off-nadir angle $\theta_{I}$ and the other for the offset angle $\varphi_{I}$ . Additionally, an instance-wise footprint segmentation task is included to predict the footprint conditioned on the predicted roof and off
|
| 76 |
+
|
| 77 |
+
set. Finally, a task for predicting real-world height is introduced to enhance the comprehension of the correlation between footprint and roof placement. In summary, four additional tasks are added to the original three tasks in LOFT-FOA [32]: (1) off-nadir angle prediction task; (2) offset angle prediction task; (3) footprint segmentation task; (4) real-world height prediction task.
|
| 78 |
+
|
| 79 |
+
# 3.2. Network structure
|
| 80 |
+
|
| 81 |
+
Fig. 2 illustrates the proposed architecture of our MLS-BRN. To facilitate multi-level supervised learning, two novel modules are introduced, namely the Pseudo Building Bbox Calculator (PBC) and the Roof-Offset guided Footprint Extractor (ROFE). The PBC module provides pseudo building boxes to determine the positivity/negativity of the region proposals from the RPN module when offset-unknown (i.e. building bbox-unknown) samples are processed in the MLS-BRN. The ROFE module has two significant functions. Firstly, it provides a more straightforward method to supervise the building footprint segmentation task. Secondly, it offers an indirect method of supervising offset prediction and roof segmentation for offset-unknown samples as they pass through the MLS-BRN. Additionally, a building height prediction task has been included in order to predict the real-world building height.
|
| 82 |
+
|
| 83 |
+
# 3.2.1 Pseudo Building Bbox Calculator (PBC)
|
| 84 |
+
|
| 85 |
+
Samples without the ground truth for building bounding box $b$ -bbox $_{gt}$ cannot be utilized by previous models, like LOFT-FOA [32]. To address this issue, we propose a module that predicts pseudo building bounding boxes to substitute $b$ -bbox $_{gt}$ . For a provided off-nadir remote sensing image $I$ and one building $b$ contained by $I$ , we can describe the connection between the image-wise off-nadir angle $\theta_{I}$ , the offset angle $\varphi_{I}$ , the factor for scaling real-world height to pixel scale $s_{I}$ , and the building's height $h_{b}$ and offset $\vec{v}_{b}$ using the following equation:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\begin{array}{l} \vec {v} _ {b} = | | \vec {v} _ {b} | | _ {2} \times \vec {e} \\ = \left\| \vec {v} _ {b} \right\| _ {2} \times \left[ e _ {x}, e _ {y} \right] \tag {1} \\ = h _ {b} \times s _ {I} \times \tan \theta_ {I} \times [ \cos \varphi_ {I}, \sin \varphi_ {I} ] \\ \end{array}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
where $||\vec{v}_b||_2$ is the $L2$ norm of the offset, $\vec{e}$ is the unit normal vector of $\vec{v}_b$ . The PBC module uses an off-nadir angle head to predict an image-wise off-nadir angle $\theta_{pred}$ and an offset angle head to predict an image-wise offset angle $\varphi_{pred}$ . Then, following Eq. (1), they are combined with the instance-wise building height ground truth $h_{gt}$ , and scale factor $s_{gt}$ to compute the pseudo offset $\vec{v}_{pred}$ . Finally, $f_{gt}$ is translated to get the pseudo building bbox $b$ -bbox $_{pred}$ guided by $\vec{v}_{pred}$ . $b$ -bbox $_{pred}$ will play the role of $b$ -bbox $_{gt}$ during the training of the building bbox-unknown samples.
|
| 92 |
+
|
| 93 |
+
From the perspective of weak supervision, the PBC module extracts the image-wise angle information, i.e. the offset angle and the off-nadir angle, and uses it to supervise the instance-wise task. Note that for building height-unknown samples, the pseudo bounding boxes are calculated by directly enlarge the footprint boxes.
|
| 94 |
+
|
| 95 |
+
# 3.2.2 Roof-Offset guided Footprint Extractor (ROFE)
|
| 96 |
+
|
| 97 |
+
Previous works calculate the footprint mask in the inference stage by translating the inferred roof guided by the inferred offset. The ROFE module, however, predicts the footprint mask directly. It trains a convolutional network to learn the translation process, using the inferred roof mask and offset as inputs. For offset-aware (i.e. roof-aware) samples, this end-to-end training process adds more supervision on the offset head and the roof head. And for offset-unknown samples, which cannot contribute to the training of the offset head and the roof head due to lack of ground truth, ROFE provides an indirect way to supervise these two heads.
|
| 98 |
+
|
| 99 |
+
# 3.3. Network training
|
| 100 |
+
|
| 101 |
+
In this section, we first introduce the loss functions in our MLS-BRN. Then we introduce our three levels of training samples graded by their level of supervision and their training strategies. The total hybrid loss is presented at the end of this section.
|
| 102 |
+
|
| 103 |
+
# 3.3.1 Loss definition
|
| 104 |
+
|
| 105 |
+
The LOFT-FOA [32] is trained by minimising Eq. (2), where $\mathcal{L}_{rp}$ , $\mathcal{L}_{rc}$ , $\mathcal{L}_{mh}$ are the same as those in Mask R-CNN [13], i.e., the losses for the RPN, R-CNN, and mask head, respectively; $\mathcal{L}_o$ is the loss for the offset head, which is a standard smooth L1 Loss.
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathcal {L} _ {L F} = \mathcal {L} _ {r p} + \beta_ {1} \mathcal {L} _ {r c} + \beta_ {2} \mathcal {L} _ {m h} + \beta_ {3} \mathcal {L} _ {o} \tag {2}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
The MLS-BRN model keeps the four losses the same as LOFT-FOA [32] and introduces new losses to train the newly added modules. The footprint mask loss of the ROFE module is the same as $\mathcal{L}_{mh}$ , which is a standard cross entropy loss (Eq. (3)).
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\mathcal {L} _ {f} = \frac {1}{N} \sum_ {i = 1} ^ {N} \sum_ {c = 1} ^ {C} y _ {i, c} \times \log (p \left(y _ {i, c}\right)) \tag {3}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
The loss of the offset angle head of the PBC module is calculated according to Eq. (4), in which $\mathcal{L}_{\text{ova}}$ denotes the offset angle loss; $\vec{v}_{pred}$ denotes the predicted unit normal vector of the offset.
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\begin{array}{l} \mathcal {L} _ {o v a} = \mathcal {L} _ {a n g} + \lambda_ {1} \mathcal {L} _ {r e g} \tag {4} \\ = | | \vec {v} _ {p r e d} - \vec {v} _ {g t} | | _ {1} + \lambda_ {1} | | | | \vec {v} _ {p r e d} | | _ {2} - 1 | | _ {1} \\ \end{array}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
The nadir angle head of the PBC module is trained following Eq. (5), where $\mathcal{L}_{ona}$ is the off-nadir angle loss; $\theta_{pred}$ is the predicted tangent of the off-nadir angle.
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\mathcal {L} _ {\text {o n a}} = \left\| \tan \theta_ {\text {p r e d}} - \tan \theta_ {g t} \right\| _ {1} \tag {5}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
The height head loss of our MLS-BRN is calculated by Eq. (6), in which $\mathcal{L}_h$ denotes the height loss; $h_{pred}$ denotes the predicted building height.
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\mathcal {L} _ {h} = \left| \left| h _ {p r e d} - h _ {g t} \right| \right| _ {1} \tag {6}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
# 3.3.2 Multi-level training strategy
|
| 136 |
+
|
| 137 |
+
In our proposed unified framework, all the training samples can be graded into three levels according to their level of supervision (Fig. 1):
|
| 138 |
+
|
| 139 |
+
- Level 1 samples: samples with only instance-wise footprint annotation, which are denoted by $\mathcal{X}^N = \{x_1^N, x_2^N, \dots, x_{n_3}^N\}$ . $N$ means no additional supervision.
|
| 140 |
+
- Level 2 samples: samples with instance-wise footprint and building height annotation, which are denoted by $\mathcal{X}^H = \{x_1^H,x_2^H,\dots,x_{n_2}^H\}$ .
|
| 141 |
+
- Level 3 samples: samples with instance-wise footprint, offset, and building height annotation, which are denoted by $\mathcal{X}^{OH} = \{x_1^{OH}, x_2^{OH}, \dots, x_{n_1}^{OH}\}$ .
|
| 142 |
+
|
| 143 |
+
Different levels of samples are supervised by different training strategies. As defined in Eq. (7), the loss function for $\mathcal{X}^N$ is only based on $\mathcal{L}_f$ .
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\mathcal {L} _ {\mathcal {X} ^ {N}} = \mathcal {L} _ {f} \tag {7}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
The loss function for $\mathcal{X}^H$ is defined in Eq. (8). In $\mathcal{L}_{\mathcal{X}^H}$ , the $\mathcal{L}_{rp}$ is activated since the PBC module can predict a high-quality pseudo building bbox, which is good enough to supervise the RPN module.
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \mathcal {L} _ {\mathcal {X} ^ {H}} = \mathcal {L} _ {\mathcal {X} ^ {N}} + \alpha_ {1} \mathcal {L} _ {r p} + \alpha_ {2} \mathcal {L} _ {h} \tag {8} \\ = \mathcal {L} _ {f} + \alpha_ {1} \mathcal {L} _ {r p} + \alpha_ {2} \mathcal {L} _ {h} \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
The loss function for $\mathcal{X}^{OH}$ is defined in Eq. (9). Compared with the original $\mathcal{L}_{LF}$ , $\mathcal{L}_{\mathcal{X}^{OH}}$ adds four more losses: $\mathcal{L}_f$ , $\mathcal{L}_h$ , $\mathcal{L}_{ona}$ , $\mathcal{L}_{ova}$ . The $\mathcal{L}_{ona}$ and $\mathcal{L}_{ova}$ are used for training the two angle heads of the PBC module.
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\begin{array}{l} \mathcal {L} _ {\chi^ {O H}} = \mathcal {L} _ {\chi^ {H}} + \alpha_ {3} \mathcal {L} _ {r c} + \alpha_ {4} \mathcal {L} _ {m h} \\ + \alpha_ {5} \mathcal {L} _ {o} + \alpha_ {6} \mathcal {L} _ {o n a} + \alpha_ {7} \mathcal {L} _ {o v a} \tag {9} \\ = \mathcal {L} _ {L F} + \mathcal {L} _ {f} + \alpha_ {2} \mathcal {L} _ {h} + \alpha_ {6} \mathcal {L} _ {o n a} + \alpha_ {7} \mathcal {L} _ {o v a} \\ \end{array}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
The final hybrid loss is defined as the total loss of the three levels of training samples according to Eq. (10).
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\mathcal {L} = \mathcal {L} _ {\mathcal {X} ^ {N}} + \mathcal {L} _ {\mathcal {X} ^ {H}} + \mathcal {L} _ {\mathcal {X} ^ {O H}} \tag {10}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
# 3.4. Implementation details
|
| 168 |
+
|
| 169 |
+
As mentioned in Fig. 2, we use ResNet-50 [12] with FPN [23] pre-trained on the ImageNet as the backbone. All the models are trained with a batch size of 4 using NVIDIA 3090 GPUs. To align with LOFT-FOA [32], we train 24 epochs for all the models, with the learning rate starting from 0.01 and decaying by a factor of 0.1 at the $16^{th}$ and $22^{nd}$ epochs. The SGD algorithm with a weight decay of 0.0001 and a momentum of 0.9 is used for all experiments. LOFT-FOA [32] is used as the basic architecture of the MLS-BRN model, and all the hyperparameters that occur in both LOFT-FOA [32] and MLS-BRN are the same, except for the learning rate mentioned above. All models are built in PyTorch.
|
| 170 |
+
|
| 171 |
+
In Eq. (4), we set $\lambda_{1} = 0.1$ to balance the two loss items. In Eq. (8), we set $\alpha_{1} = 1$ to keep the loss weight of ROFE the same as the roof mask head, and set $\alpha_{2} = 32$ since the absolute building height loss value is relatively small. In Eq. (9), we set $\alpha_{3} = \alpha_{4} = 1, \alpha_{5} = 16$ to keep them the same as LOFT-FOA [32], and set $\alpha_{6} = 1, \alpha_{7} = 8$ to balance the effects of the magnitude of these two losses.
|
| 172 |
+
|
| 173 |
+
# 4. Experiments
|
| 174 |
+
|
| 175 |
+
# 4.1. Datasets
|
| 176 |
+
|
| 177 |
+
In our experiments, we employ multi-supervised datasets for training our methods: (1) BONAI [32] provides building footprint segmentation, offset, and height annotations, which contains 3,000 and 300 images for train-val and test respectively; (2) OmniCity-view3 [21] originally provides satellite images with annotations for footprint segmentation and building height. We add additional offset annotations for 17,092 and 4,929 images from train-val and test sets respectively; (3) Additionally, we release a new dataset named HK, which includes 500 and 119 satellite images specifically captured from Hong Kong for train-val and test sets, along with annotations for footprint segmentation, offset and height.
|
| 178 |
+
|
| 179 |
+
As detailed in Sec. 3, all our training samples are graded into three levels: samples from $\mathcal{X}^N$ , $\mathcal{X}^H$ , and $\mathcal{X}^{OH}$ . To create different levels of training samples, we extract samples from the datasets mentioned above, reorganizing their annotations as necessary. We randomly choose $30\%$ of the samples from the BONAI dataset [32] as a smaller $\mathcal{X}^{OH}$ dataset, which we call $BN_{30}$ . We randomly drop the offset annotations of $70\%$ of the samples in the BONAI dataset [32], regard the entire BONAI [32] dataset as a $\mathcal{X}^{OH} + \mathcal{X}^H$ dataset, and name it $BN_{30/70}$ . Similarly, the original BONAI dataset [32] is regarded as a large $\mathcal{X}^{OH}$ and is named $BN_{100}$ . We use $OC$ to designate the OmniCity-view3 dataset [21]. Naturally, the abbreviations $OC_{30}$ , $OC_{30/70}$ , and $OC_{100}$ have the similar meaning with $BN_{30}$ , $BN_{30/70}$ , and $BN_{100}$ respectively. Moreover, we use $BH$
|
| 180 |
+
|
| 181 |
+
to refer to the combination of BONAI [32] and HK. It is important to note that in $BH_{30/70}$ , $30\%$ of BONAI's [32] samples are $\mathcal{X}^{OH}$ type while the remaining $70\%$ are $\mathcal{X}^H$ type. Additionally, $30\%$ of HK's samples belong to $\mathcal{X}^{OH}$ type and the remaining $70\%$ belong to $\mathcal{X}^N$ type.
|
| 182 |
+
|
| 183 |
+
# 4.2. Performance comparison
|
| 184 |
+
|
| 185 |
+
In this section, we evaluate our method's performance in footprint segmentation, offset prediction, and height prediction against several competitive methods for the single-level supervised learning scenario. In a Multi-level supervised learning scenario, we mainly compare our method with LOFT-FOA [32]. Additionally, we present our method's offset and off-nadir angles prediction performance. More results will be provided in the supplementary materials.
|
| 186 |
+
|
| 187 |
+
Single-level supervised learning. The performance of footprint segmentation and offset prediction for different methods trained on $BN_{100}$ and $OC_{100}$ are listed in Tab. 1 and Tab. 2, respectively. Additionally, Fig. 3 provides a qualitative comparison of footprint segmentation results on the BONAI [32] test set. Note that all the experimental results in this section are obtained using $\mathcal{X}^{OH}$ samples, and the results obtained using $\mathcal{X}^H$ and $\mathcal{X}^N$ samples will be analysed in the following paragraph. For the footprint segmentation task, experimental results tested on $BN_{100}$ demonstrate that our method improves the F1-score by $5.42\% - 8.30\%$ compared with the instance segmentation methods that directly extract the building footprints. Furthermore, our method enhances the F1-score by $2.05\% - 2.76\%$ relative to MTBR-Net [19] and LOFT-FOA [32], which are specifically designed for extracting off-nadir building footprints based on predicted roof and offset, tested on $BN_{100}$ . Regarding the offset prediction task, our experimental findings indicate that our approach betters the EPE by 0.18 - 0.93 in comparison to MTBR-Net [19] and LOFT-FOA [32] tested on $BN_{100}$ . The results show that the direct supervision of the footprint segmentation, the constraint on the building height, and the encouragement of the angular feature extraction can help to achieve better performance in the footprint segmentation and offset prediction tasks in the single-level supervised learning scenario.
|
| 188 |
+
|
| 189 |
+
<table><tr><td>method</td><td>F1</td><td>Precision</td><td>Recall</td><td>EPE</td></tr><tr><td>PANet [17]</td><td>58.06</td><td>59.26</td><td>56.91</td><td>-</td></tr><tr><td>HRNetv2 [30]</td><td>60.81</td><td>61.20</td><td>60.42</td><td>-</td></tr><tr><td>M R-CNN [13]</td><td>58.12</td><td>59.26</td><td>57.03</td><td>-</td></tr><tr><td>CM R-CNN [1]</td><td>60.94</td><td>67.09</td><td>55.83</td><td>-</td></tr><tr><td>MTBR-Net [19]</td><td>63.60</td><td>64.34</td><td>62.87</td><td>5.69</td></tr><tr><td>LOFT-FOA [32]</td><td>64.31</td><td>63.37</td><td>65.29</td><td>4.94</td></tr><tr><td>Ours</td><td>66.36</td><td>65.90</td><td>66.83</td><td>4.76</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 1. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on $BN_{100}$ .
|
| 192 |
+
|
| 193 |
+
<table><tr><td>method</td><td>F1</td><td>Precision</td><td>Recall</td><td>EPE</td></tr><tr><td>M R-CNN [13]</td><td>69.75</td><td>69.74</td><td>69.76</td><td>-</td></tr><tr><td>LOFT-FOA [32]</td><td>70.46</td><td>68.77</td><td>72.23</td><td>6.08</td></tr><tr><td>Ours</td><td>72.25</td><td>69.57</td><td>75.14</td><td>5.38</td></tr></table>
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Figure 3. The results of the baselines and our method trained on $BN_{100}$ and tested on the BONAI test set in terms of the footprint segmentation performance. The yellow, cyan, and red polygons denote the TP, FP, and FN.
|
| 197 |
+
|
| 198 |
+
Table 2. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on $OC_{100}$ .
|
| 199 |
+
|
| 200 |
+
<table><tr><td>method</td><td>dataset</td><td>sample</td><td>F1-score</td><td>EPE</td></tr><tr><td>LOFT-FOA [32]</td><td>BN30</td><td>XOH</td><td>61.35</td><td>5.70</td></tr><tr><td>Ours</td><td>BN30/70</td><td>XOH+XH</td><td>65.49</td><td>5.39</td></tr><tr><td>LOFT-FOA [32]</td><td>BN100</td><td>XOH</td><td>64.31</td><td>4.94</td></tr><tr><td>Ours</td><td>BN100</td><td>XOH</td><td>66.36</td><td>4.76</td></tr><tr><td>LOFT-FOA [32]</td><td>OC30</td><td>XOH</td><td>67.09</td><td>6.08</td></tr><tr><td>Ours</td><td>OC30/70</td><td>XOH+XH</td><td>70.53</td><td>5.92</td></tr><tr><td>LOFT-FOA [32]</td><td>OC100</td><td>XOH</td><td>70.46</td><td>5.38</td></tr><tr><td>Ours</td><td>OC100</td><td>XOH</td><td>72.25</td><td>5.38</td></tr><tr><td>LOFT-FOA [32]</td><td>BH30</td><td>XOH</td><td>54.96</td><td>5.78</td></tr><tr><td>Ours</td><td>BH30/70</td><td>XOH+XH+XN</td><td>58.57</td><td>5.60</td></tr><tr><td>LOFT-FOA [32]</td><td>BH100</td><td>XOH</td><td>60.85</td><td>4.74</td></tr><tr><td>Ours</td><td>BH100</td><td>XOH</td><td>60.92</td><td>4.69</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 3. Building footprint segmentation results of different methods in terms of F1-score (\%) and offset prediction results in terms of EPE trained on different datasets.
|
| 203 |
+
|
| 204 |
+
Multi-level supervised learning. Tab. 3 displays the footprint segmentation and offset prediction performance of LOFT-FOA [32] and our method when trained and tested on multi-level supervision datasets. Our approach's experiment outcomes, trained on $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ , demonstrate a $4.14\%$ , $3.44\%$ and $3.61\%$ improvement in F1-score compared to LOFT-FOA [32] trained on $BN_{30}$ , $OC_{30}$ and $BH_{30}$ . Additionally, our method's experimental results, trained on samples from $BN_{30/70}$ , $OC_{30/70}$ and $BH_{30/70}$ exhibit similar performance to LOFT-FOA [32], which is trained on samples from $BN_{100}$ , $OC_{100}$ and $BH_{100}$ . These findings demonstrate the effectiveness of MLS-BRN in combining samples from $\mathcal{X}^{OH}$ , $\mathcal{X}^H$ and $\mathcal{X}^N$ levels to address the building reconstruction task.
|
| 205 |
+
|
| 206 |
+
Building height and angles prediction. Tab. 4 displays the results of building height prediction performance. The experimental findings indicate that our method enhances the height MAE by 0.22 - 4.33 and the height RMSE by 0.51 - 7.60 in comparison to SARPN [2], DORN [7], and LOFT-FOA+H. It's worth noting that SARPN [2], DORN [7] predicts pixel-wise building height, and MSL-BRN predicts instance-wise building height. As far as we know, MSL-BRN is the first-ever method to predict instance-wise real-world building height. Thus, we add a building height head directly to LOFT-FOA [32] (i.e. LOFT-FOA+H) and compare its prediction results with our own method. Fig. 4 presents the qualitative building height prediction results from our method and LOFT-FOA+H. Regarding the angle prediction tasks, when trained on $BN_{100}$ , the PBC module results in an MAE of 9.92 for offset angle prediction and an MAE of 1.22 for off-nadir angle prediction. The performance increase demonstrates the efficacy of the PBC, ROFE, and the building height prediction module in a single-level supervised learning scenario.
|
| 207 |
+
|
| 208 |
+
<table><tr><td>method</td><td>height MAE</td><td>height RMSE</td></tr><tr><td>SARPN [2]</td><td>15.23</td><td>28.69</td></tr><tr><td>DORN [7]</td><td>13.40</td><td>27.03</td></tr><tr><td>LOFT-FOA+H</td><td>11.12</td><td>21.60</td></tr><tr><td>Ours</td><td>10.90</td><td>21.09</td></tr></table>
|
| 209 |
+
|
| 210 |
+
Table 4. Building height prediction results of different methods in terms of MAE and MSE trained on $OC_{100}$ and tested on the OmniCity-view3 test set.
|
| 211 |
+
|
| 212 |
+
<table><tr><td>method</td><td>F1-score</td><td>Precision</td><td>Recall</td><td>EPE</td></tr><tr><td>baseline</td><td>61.35</td><td>61.84</td><td>61.65</td><td>5.70</td></tr><tr><td>+PBC</td><td>62.32</td><td>62.28</td><td>62.35</td><td>5.53</td></tr><tr><td>+ROFE</td><td>62.87</td><td>63.89</td><td>62.15</td><td>5.63</td></tr><tr><td>+PBC+ROFE</td><td>65.40</td><td>66.74</td><td>64.12</td><td>5.49</td></tr></table>
|
| 213 |
+
|
| 214 |
+
Table 5. Footprint segmentation results of different modules in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE.
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
Figure 4. The visualization results of building height prediction from our method and LOFT-FOA+H on the OmniCity-view3 test set.
|
| 218 |
+
|
| 219 |
+
# 4.3. Ablation study
|
| 220 |
+
|
| 221 |
+
In this section, we examine the impact of the principal new components of our method: (1) the PBC module; (2) the ROFE module; and (3) the building height head. Additionally, we will analyze the outcome of the data ablation experiment in the multi-level supervised learning setting.
|
| 222 |
+
|
| 223 |
+
Module ablation. The outcomes acquired by implementing the aforementioned modules successively on $BN_{30/70}$ are detailed in Tab. 5. The table provides information on F1-score for footprint segmentation and EPE for offset prediction. LOFT-FOA [32] is trained on $BN_{30}$ and serves as the baseline. The second row (+PBC) illustrates the results obtained by applying the PBC module to LOFT-FOA [32]. The results indicate that incorporating the two-angle prediction tasks enhances the F1-score of the footprint extraction by $0.97\%$ . It should be noted that the added offset-unknown $70\%$ samples in $BN_{30/70}$ , which lacks angle ground truth, does not contribute to PBC's training. The third row (+ROFE) displays the outcomes achieved by applying the ROFE module to LOFT-FOA [32]. Results demonstrate that, compared with the baseline, prediction of the footprint segmentation guided by predicted offset and roof, coupled with additional $70\%$ offset-unknown samples from $BN_{30/70}$ , leads to a $1.52\%$ improvement in the F1-score. The fourth row (+PBC+ROFE) indicates that the simultaneous inclusion of the PBC and ROFE modules can improve the F1-score of the footprint extraction by $4.05\%$ . The aforementioned results show that PBC and ROFE modules can help to enhance the accuracy of footprint segmentation and offset prediction.
|
| 224 |
+
|
| 225 |
+
Data ablation. The outcomes of our approach trained on various dataset combinations concerning F1-score for footprint segmentation, and EPE for offset prediction are
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Shanghai
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Xi'an
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Hong Kong
|
| 243 |
+
Figure 5. 3D reconstruction results of Shanghai, Xi'an, Hong Kong, and New York obtained using our method. The remote sensing images for Shanghai and Xi'an are chosen from the BONAI test set, whereas the remote sensing image for New York is chosen from the OmniCity-view3 test set.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
New York
|
| 247 |
+
|
| 248 |
+
shown in Tab. 6. The first line $(\mathcal{X}^{OH})$ displays the results of training LOFT-FOA [32] on $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples $(OC_{30})$ . The second row $(\mathcal{X}^{OH} + \mathcal{X}^{H})$ shows the results of our method trained on a mix of $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples $(OC_{30})$ and $30\%$ of the OmniCity-view3 $\mathcal{X}^{H}$ samples. The results demonstrate a $3.28\%$ improvement in F1-score for footprint extraction compared to LOFT-FOA [32] trained solely on $OC_{30}$ . The third row $(\mathcal{X}^{OH} + \mathcal{X}^{H} + \mathcal{X}^{N})$ presents the outcomes of our methodology, trained on a mix of $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{OH}$ samples, $30\%$ of OmniCity-view3 [21] $\mathcal{X}^{H}$ samples, and the rest $40\%$ of OmniCity-view3 [21] $\mathcal{X}^{N}$ samples. The results demonstrate a $0.44\%$ increase in F1-score compared to our method trained on $\mathcal{X}^{OH} + \mathcal{X}^{H}$ , indicating the effectiveness of including $\mathcal{X}^{N}$ samples. The reason for training LOFT-FOA [32] instead of our method on $OC_{30}$ (first row) is to evaluate the gain in a scenario where $\mathcal{X}^{H}$ and $\mathcal{X}^{N}$ samples are available by using our method.
|
| 249 |
+
|
| 250 |
+
<table><tr><td>data</td><td>F1</td><td>Precision</td><td>Recall</td><td>EPE</td></tr><tr><td>XOH</td><td>67.09</td><td>63.23</td><td>71.47</td><td>6.08</td></tr><tr><td>XOH+XH</td><td>70.37</td><td>65.35</td><td>76.24</td><td>5.99</td></tr><tr><td>XOH+XH+XN</td><td>70.81</td><td>66.15</td><td>76.18</td><td>5.84</td></tr></table>
|
| 251 |
+
|
| 252 |
+
Table 6. Building footprint segmentation results of different methods in terms of F1-score, precision, recall (\%) and offset prediction results in terms of EPE trained on different dataset combinations.
|
| 253 |
+
|
| 254 |
+
# 4.4. 3D reconstruction results of different cities
|
| 255 |
+
|
| 256 |
+
Fig. 5 shows the 3D reconstruction results of four cities (i.e. Shanghai, Xi'an, Hong Kong, and New York) obtained from
|
| 257 |
+
|
| 258 |
+
our method. The results demonstrate the effectiveness of our method on 3D building reconstruction across different cities. Note that we use the method in [38] to regularize the predicted building footprint masks.
|
| 259 |
+
|
| 260 |
+
# 5. Conclusion
|
| 261 |
+
|
| 262 |
+
In this paper, we have presented a new method for multi-level supervised building reconstruction from monocular remote sensing images, which is capable of reconstructing the accurate 3D building models using samples of different annotation levels. Qualitative and quantitative evaluations confirm that our method achieves competitive performance and significantly enhances the 3D building reconstruction capability in comparison to the current state-of-the-art across diverse experimental settings. The effect of the Pseudo Building Bbox Calculator and the Roof-Offset guided Footprint Extractor, as well as the annotation levels of the samples were also analyzed in the ablation study. Furthermore, we expanded the monocular building reconstruction datasets to encompass additional cities. We believe that our approach offers efficient and cost-effective solutions for 3D building reconstruction in complex real-world scenes. In our future work, we would like to investigate more effective strategies to improve the 3D building reconstruction performance whilst exploring more adaptable and practical techniques for large-scale city modeling.
|
| 263 |
+
|
| 264 |
+
Acknowledgements. This project was funded in part by National Natural Science Foundation of China (Grant No. 42201358 and No. 62325111) and Shanghai Artificial Intelligence Laboratory.
|
| 265 |
+
|
| 266 |
+
# References
|
| 267 |
+
|
| 268 |
+
[1] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi, Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4974-4983, 2019. 6
|
| 269 |
+
[2] Xiaotian Chen, Xuejin Chen, and Zheng-Jun Zha. Structure-aware residual pyramid network for monocular depth estimation. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, pages 694-700, 2019. 7
|
| 270 |
+
[3] Yujin Chen, Zhigang Tu, Liuhao Ge, Dejun Zhang, Ruizhi Chen, and Junsong Yuan. So-handnet: Self-organizing network for 3d hand pose estimation with semi-supervised learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6961–6970, 2019. 3
|
| 271 |
+
[4] Gordon Christie, Rodrigo Rene Rai Munoz Abujder, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Learning geocentric object pose in oblique monocular images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14512-14520, 2020. 1, 2, 3
|
| 272 |
+
[5] Gordon Christie, Kevin Foster, Shea Hagstrom, Gregory D Hager, and Myron Z Brown. Single view geocentric pose in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1162-1171, 2021. 1, 3
|
| 273 |
+
[6] Liuyun Duan and Florent Lafarge. Towards large-scale city reconstruction from satellites. In European Conference on Computer Vision (ECCV), 2016. 1
|
| 274 |
+
[7] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011, 2018. 7
|
| 275 |
+
[8] Zhi Gao, Wenbo Sun, Yao Lu, Yichen Zhang, Weiwei Song, Yongjun Zhang, and Ruifang Zhai. Joint learning of semantic segmentation and height estimation for remote sensing image leveraging contrastive learning. IEEE Transactions on Geoscience and Remote Sensing, 2023. 2
|
| 276 |
+
[9] Pedram Ghamisi and Naoto Yokoya. Img2dsm: Height simulation from single imagery using conditional generative adversarial net. IEEE Geoence Remote Sensing Letters, pages 1-5, 2018. 2
|
| 277 |
+
[10] JunYoung Gwak, Christopher B Choy, Manmohan Chandraker, Animesh Garg, and Silvio Savarese. Weakly supervised 3d reconstruction with adversarial constraint. In 2017 International Conference on 3D Vision (3DV), pages 263-272. IEEE, 2017. 3
|
| 278 |
+
[11] Junwei Han, Yang Yang, Dingwen Zhang, Dong Huang, Dong Xu, and Fernando De La Torre. Weakly-supervised learning of category-specific 3d object shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(4): 1423-1437, 2021. 3
|
| 279 |
+
[12] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5
|
| 280 |
+
|
| 281 |
+
[13] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision (CVPR), pages 2961-2969, 2017. 4, 6
|
| 282 |
+
[14] Rongrong Ji, Ke Li, Yan Wang, Xiaoshuai Sun, Feng Guo, Xiaowei Guo, Yongjian Wu, Feiyue Huang, and Jiebo Luo. Semi-supervised adversarial monocular depth estimation. IEEE transactions on pattern analysis and machine intelligence, 42(10):2410-2422, 2019. 3
|
| 283 |
+
[15] Saket Kunwar. U-net ensemble for semantic and height estimation using coarse-map initialization. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4959-4962. IEEE, 2019. 1, 2
|
| 284 |
+
[16] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. Robust model-based face reconstruction through weakly-supervised outlier segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 372–381, 2023. 3
|
| 285 |
+
[17] Muxingzi Li, Florent Lafarge, and Renaud Marlet. Approximating shapes in images with low-complexity polygons. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 6
|
| 286 |
+
[18] Qingyu Li, Lichao Mou, Yuansheng Hua, Yilei Shi, Sining Chen, Yao Sun, and Xiao Xiang Zhu. 3dcentripetalnet: Building height retrieval from monocular remote sensing imagery. International Journal of Applied Earth Observation and Geoinformation, 120:103311, 2023. 2
|
| 287 |
+
[19] Weijia Li, Lingxuan Meng, Jinwang Wang, Conghui He, Gui-Song Xia, and Dahua Lin. 3d building reconstruction from monocular remote sensing images. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12548-12557, 2021. 1, 2, 3, 6
|
| 288 |
+
[20] Weijia Li, Wenqian Zhao, Huaping Zhong, Conghui He, and Dahua Lin. Joint semantic-geometric learning for polygonal building segmentation. In AAAI, 2021. 2
|
| 289 |
+
[21] Weijia Li, Yawen Lai, Linning Xu, Yuanbo Xiangli, Jinhua Yu, Conghui He, Gui-Song Xia, and Dahua Lin. Omnicity: Omnipotent city understanding with multi-level and multiview images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17397-17407, 2023. 5, 8
|
| 290 |
+
[22] Zuoyue Li, Jan Dirk Wegner, and Aurélien Lucchi. Topological map extraction from overhead images. In Proceedings of the IEEE International Conference on Computer Vision (CVPR), pages 1715-1724, 2019. 2
|
| 291 |
+
[23] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 2117-2125, 2017. 5
|
| 292 |
+
[24] Jisan Mahmud, True Price, Akash Bapat, and Jan Michael Frahm. Boundary-aware 3d building reconstruction from a single overhead image. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2
|
| 293 |
+
[25] Yongqiang Mao, Kaiqiang Chen, Liangjin Zhao, Wei Chen, Deke Tang, Wenjie Liu, Zhirui Wang, Wenhui Diao, Xian
|
| 294 |
+
|
| 295 |
+
Sun, and Kun Fu. Elevation estimation-driven building 3d reconstruction from single-view remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2
|
| 296 |
+
[26] Rahul Mitra, Nitesh B Gundavarapu, Abhishek Sharma, and Arjun Jain. Multiview-consistent semi-supervised learning for 3d human pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6907-6916, 2020. 3
|
| 297 |
+
[27] Natalia Neverova, Christian Wolf, Florian Nebout, and Graham W Taylor. Hand pose estimation through semi-supervised and weakly-supervised learning. Computer Vision and Image Understanding, 164:56-67, 2017. 3
|
| 298 |
+
[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 3
|
| 299 |
+
[29] Shivangi Srivastava, Michele Volpi, and Devis Tuia. Joint height estimation and semantic labeling of monocular aerial images with cnns. In Igarss IEEE International Geoscience Remote Sensing Symposium, 2017. 1, 2
|
| 300 |
+
[30] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao, Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and Jingdong Wang. High-resolution representations for labeling pixels and regions. arXiv preprint arXiv:1904.04514, 2019. 6
|
| 301 |
+
[31] Vivek Verma, Rakesh Kumar, and Stephen Hsu. 3d building detection and modeling from aerial lidar data. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2006. 1
|
| 302 |
+
[32] Jinwang Wang, Lingxuan Meng, Weijia Li, Wen Yang, Lei Yu, and Gui-Song Xia. Learning to extract building footprints from off-nadir aerial images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):1294-1301, 2022. 1, 2, 3, 4, 5, 6, 7, 8
|
| 303 |
+
[33] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu. The benchmark: Transferable representation learning for monocular height estimation. IEEE Transactions on Geoscience and Remote Sensing, 2023. 1, 2
|
| 304 |
+
[34] Guandao Yang, Yin Cui, Serge Belongie, and Bharath Hariharan. Learning single-view 3d reconstruction with limited pose supervision. In Proceedings of the European Conference on Computer Vision (ECCV), pages 86-101, 2018. 3
|
| 305 |
+
[35] Jiangye Yuan. Learning building extraction in aerial scenes with convolutional networks. IEEE transactions on pattern analysis and machine intelligence, 40(11):2793-2798, 2017. 2
|
| 306 |
+
[36] Wufan Zhao, Claudio Persello, and Alfred Stein. Building outline delineation: From aerial images to polygons with an improved end-to-end learning framework. ISPRS journal of photogrammetry and remote sensing, 175:119-131, 2021. 2
|
| 307 |
+
[37] Zhuo Zheng, Yanfei Zhong, and Junjue Wang. Pop-net: Encoder-dual decoder for semantic segmentation and single-view height estimation. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pages 4963-4966. IEEE, 2019. 1, 2
|
| 308 |
+
|
| 309 |
+
[38] Stefano Zorzi, Ksenia Bittner, and Friedrich Fraundorfer. Machine-learned regularization and polygonization of building segmentation masks. In 2020 25th International Conference on Pattern Recognition (ICPR), pages 3098–3105. IEEE, 2021. 8
|
| 310 |
+
[39] Stefano Zorzi, Shabab Bazrafkan, Stefan Habenschuss, and Friedrich Fraundorfer. *Polyworld: Polygonal building extraction with graph neural networks in satellite images*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1848-1857, 2022. 2
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b68036e1c9eb460a2d423a8fd83996be0c988a321a81749cbd5a08ad751695f
|
| 3 |
+
size 624987
|
3dbuildingreconstructionfrommonocularremotesensingimageswithmultilevelsupervisions/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e9379abdaaac2210d33b4b584e828a416409ff0cb192ca4f4111113ad3871b5
|
| 3 |
+
size 451031
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3db115be28e476062a76228c20198a587290db52ebb8632ad5bbd87535df0b35
|
| 3 |
+
size 87979
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7ccf84943cd8ba403a942ffb2f33401e780ffb1c5d9096b9ac7121c7a4040c6
|
| 3 |
+
size 110696
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/29911afb-57cf-4105-bc3b-b432a117add8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:688025ef9dd8f345b585c21c97048baf46635525338d14b9dd96a8a356d19c0c
|
| 3 |
+
size 10150991
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/full.md
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Face Reconstruction with the Geometric Guidance of Facial Part Segmentation
|
| 2 |
+
|
| 3 |
+
Zidu Wang $^{1,2}$ , Xiangyu Zhu $^{1,2*}$ , Tianshuo Zhang $^{1,2}$ , Baiqin Wang $^{1,2}$ , Zhen Lei $^{1,2,3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences
|
| 8 |
+
|
| 9 |
+
<sup>3</sup> Centre for Artificial Intelligence and Robotics, Hong Kong Institute of Science & Innovation, Chinese Academy of Sciences
|
| 10 |
+
|
| 11 |
+
{wangzidu2022, wangbaiqin2024}@ia.ac.cn,{xiangyu.zhu, tianshuo.zhang, zlei}@nlpr.ia.ac.cn
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
3D Morphable Models (3DMMs) provide promising 3D face reconstructions in various applications. However, existing methods struggle to reconstruct faces with extreme expressions due to deficiencies in supervisory signals, such as sparse or inaccurate landmarks. Segmentation information contains effective geometric contexts for face reconstruction. Certain attempts intuitively depend on differentiable renderers to compare the rendered silhouettes of reconstruction with segmentation, which is prone to issues like local optima and gradient instability. In this paper, we fully utilize the facial part segmentation geometry by introducing Part Re-projection Distance Loss (PRDL). Specifically, PRDL transforms facial part segmentation into 2D points and re-projects the reconstruction onto the image plane. Subsequently, by introducing grid anchors and computing different statistical distances from these anchors to the point sets, PRDL establishes geometry descriptors to optimize the distribution of the point sets for face reconstruction. PRDL exhibits a clear gradient compared to the renderer-based methods and presents state-of-the-art reconstruction performance in extensive quantitative and qualitative experiments. Our project is available at https://github.com/wang-zidu/3DDFA-V3.
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
Reconstructing 3D faces from 2D images is an essential task in computer vision and graphics, finding diverse applications in fields such as Virtual Reality (VR), Augmented Reality (AR), and Computer-generated Imagery (CGI), etc. In applications like VR makeup and AR emoji, 3DMMs
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Figure 1. We introduce Part Re-projection Distance Loss (PRDL) for 3D face reconstruction, leveraging the geometric guidance provided by facial part segmentation. PRDL enhances the alignment of reconstructed facial features with the original image and excels in capturing extreme expressions.
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
[5] are commonly employed for precise facial feature positioning and capturing expressions. One of the most critical concerns is ensuring that the reconstructed facial components, including the eyes, eyebrows, lips, etc., seamlessly align with their corresponding regions in the input image with pixel-level accuracy, particularly when dealing with extreme facial expressions, as shown in Fig. 1.
|
| 35 |
+
|
| 36 |
+
Although current methods [11, 14, 17, 19, 25] have made notable strides in face reconstruction, some issues persist. On the one hand, existing works often rely on landmarks [17, 60] and photometric-texture [12, 45] to guide face reconstruction. In the case of extreme facial expressions, landmarks are sparse or inaccurate and the gradient from the texture loss cannot directly constrain the shape [59], posing a challenge for existing methods to achieve precise alignment of facial features in 3D face reconstruction, as depicted in Fig. 2(a). On the other hand, many methods
|
| 37 |
+
|
| 38 |
+
primarily adopt 3D errors as a quality metric, overlooking the precise alignment of facial parts. As shown in Fig. 2(b), when evaluating the REALY [7] benchmark in the eye region, comparing the results of 3DDFA-v2 [17] and DECA [14], a lower 3D region error may not lead to better 2D region alignment. We believe in the potential for a more comprehensive utilization of the geometry information inherent in each facial part segmentation to guide 3D face reconstruction, addressing the issues mentioned above.
|
| 39 |
+
|
| 40 |
+
Facial part segmentation [24, 31, 32, 34] has been well developed, offering precise geometry for each facial feature with pixel-level accuracy. Compared with commonly used landmarks, part segmentation provides denser labels covering the whole image. Compared with photometric texture, part segmentation is less susceptible to lighting or shadow interference. Although facial part segmentation occasionally appears in the process of 3D face reconstruction, it is not fully utilized. For instance, it only serves to enhance the reconstruction quality of specific regions [25, 48], or to distinguish the overall texture location for photometric-texture-loss [26], without delving into the specifics of facial parts. Attempts [33, 56] to fit 3D parts with the guidance of segmentation information rely on differentiable renderers [15, 42, 46] to generate the silhouettes of the predicted 3D facial regions and optimize the difference between the rendered silhouettes and the 2D segmentation through Intersection over Union (IoU) loss. However, these renderers fail to provide sufficient and stable geometric signals for face reconstruction due to local optima, rendering error propagation, and gradient instability [22].
|
| 41 |
+
|
| 42 |
+
This paper leverages the precise and rich geometric information in facial part silhouettes to guide face reconstruction, thereby improving the alignment of reconstructed facial features with the original image and excelling in reconstructing extreme facial expressions. Fig.1 provides an overview of the proposed Part Re-projection Distance Loss (PRDL). Firstly, PRDL samples points within the segmented region and transforms the segmentation information into a 2D point set for each facial part. The 3D face reconstruction is also re-projected onto the image plane and transformed into 2D point sets for different regions. Secondly, PRDL samples the image grid anchors and establishes geometric descriptors. These descriptors are constructed by using various statistical distances from the anchors to the point set. Finally, PRDL optimizes the distribution of the same semantic point sets, leading to improved overlap between the regions covered by the target and predicted point sets. In contrast to renderer-based methods, PRDL exhibits a clear gradient. To facilitate the use of PRDL, we provide a new 3D mesh part annotation aligned with semantic regions in 2D face segmentation [24, 55], which differs from the existing annotations [30, 49], as shown in Fig.2(c). Besides the drawbacks of supervisory signals, the challenge of han
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
(a) Performance on extreme expressions
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
(b) 3D error vs. 2D alignment
|
| 49 |
+
Figure 2. Drawbacks of existing research and our results. (a) Present researches fail to reconstruct extreme expressions and perform bad region alignment. (b) Inconsistencies between 3D errors and 2D alignments, such as the eye region in this case. (c) Geometric optimization of each semantically consistent part is only achievable through our annotations.
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
(c) 3D face model annotations
|
| 53 |
+
|
| 54 |
+
dling extreme expressions arises from data limitations. To boost studies and address the lack of emotional expression (e.g., closed-eye, open-mouth, frown, etc.), we synthesize a face dataset using the GAN-based method [24]. To highlight the performance of region overlapping, we propose a new benchmark to quantify the accuracy of 3D reconstruction parts cling to their corresponding image components on the 2D image plane. Our main contributions are as follows:
|
| 55 |
+
|
| 56 |
+
- We introduce a novel Part Re-projection Distance Loss (PRDL) to comprehensively utilize segmentation information for face reconstruction. PRDL transforms the target and prediction into semantic point sets, optimizing the distribution of point sets to ensure that the reconstructed regions and the target share the same geometry.
|
| 57 |
+
- We introduce a new synthetic face dataset including closed-eye, open-mouth, and frown expressions, with more than $200K$ images.
|
| 58 |
+
- Extensive experiments show that the results with PRDL achieve excellent performance and outperform the existing methods. The data and code are available at https://github.com/wang-zidu/3DDFA-V3.
|
| 59 |
+
|
| 60 |
+
# 2. Related Work
|
| 61 |
+
|
| 62 |
+
2D-to-3D Losses for 3D Face Reconstruction. Landmark loss [11, 17, 60] stands out as the most widely employed and effective supervised way for face reconstruction. Some studies [20, 37] reveal that it can generate 3D faces under the guidance of sufficient hundreds or thousands landmarks. Photometric loss is another commonly used loss involving rendering the reconstructed mesh with texture into an image and comparing it to the original input. Some researchers focus on predicting the facial features that need to be fitted while excluding occlusions [12, 45]. The photometric loss is susceptible to factors like texture basis, skin masks, and rendering modes. It emphasizes overall visualization and may not effectively constrain local details. Perception loss
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Figure 3. Overview of Part Re-projection Distance Loss (PRDL). (a): Transforming facial part segmentation into target point sets $\{C_p\}$ . (b): Re-projecting $V_{3d}(\alpha)$ onto the image plane to obtain predicted point sets $\{V_{2d}^p (\alpha)\}$ . (c): Given anchors $\mathbf{A}$ and distance functions $\mathcal{F}$ , the core idea of PRDL is to minimize the difference of every statistical distance from any $\pmb{a}_i\in \pmb{A}$ to the $V_{2d}^{p}(\alpha)$ or $C_p$ , leading to enhanced overlap between the regions covered by the target and predicted point sets.
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+
[11, 14, 16] distinguishes itself from image-level methods by employing pre-trained deep face recognition networks [9] to extract high-level features from the rendered reconstruction results. These features are then compared with the features from the input. Lip segmentation consistency loss [48] employs mouth segmentation to help reconstruction.
|
| 70 |
+
|
| 71 |
+
Differentiable Silhouette Renderers. The development of differentiable renderers [15, 42, 46] has enriched the supervised methods for 3D face reconstruction. These pipelines make the rasterization process differentiable, allowing for the computation of gradients for every pixel in the rendered results. By combining IoU loss with segmentation information, the silhouettes produced by these renderers have been shown to optimize 3D shapes [8, 33, 56]. These rasterization processes typically rely on either local [21, 36] or global [8, 33] geometric distance-based weighted aggregation, generating silhouettes by computing a probability related to the distance from pixels to mesh faces. However, to obtain a suitable sharp silhouette, the weight contribution of each position to the rendered pixel will decrease sharply with the increase of distance, and the gradient generated by the shape difference at the large distance will be small or zero, which makes it difficult to retain accurate geometry guidance. These renderers also encounter issues such as rendering error propagation and gradient instability [22].
|
| 72 |
+
|
| 73 |
+
Synthetic Dataset. Synthetic data [41, 52, 58] is commonly used to train 3D face reconstruction models [11, 17, 25]. However, these synthetic faces either prioritize the diversification of background, illumination, and identities [41, 52], or concentrate on pose variation [58], contributing to achieve good results in reconstructing natural facial expressions but struggling to reconstruct extreme expressions. To overcome these limitations and facilitate the related research, this paper adopts a GAN-based method [24] to synthesize realistic and diverse facial expression data, including closed eyes, open mouths, and frowns.
|
| 74 |
+
|
| 75 |
+
# 3. Methodology
|
| 76 |
+
|
| 77 |
+
# 3.1. Preliminaries
|
| 78 |
+
|
| 79 |
+
We conduct a face model, an illumination model, and a camera model based on [6, 11, 14, 17].
|
| 80 |
+
|
| 81 |
+
Face Model. The vertices and albedo of a 3D face is determined by the following formula:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
V _ {3 d} (\boldsymbol {\alpha}) = \boldsymbol {R} \left(\boldsymbol {\alpha} _ {a}\right) \left(\bar {\boldsymbol {V}} + \boldsymbol {\alpha} _ {i d} \boldsymbol {A} _ {i d} + \boldsymbol {\alpha} _ {\exp} \boldsymbol {A} _ {\exp}\right) + \boldsymbol {\alpha} _ {t} \\ - \left(\frac {\partial}{\partial t}\right) \quad , \tag {1}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
T _ {a l b} (\boldsymbol {\alpha}) = \overline {{\boldsymbol {T}}} + \boldsymbol {\alpha} _ {a l b} \boldsymbol {A} _ {a l b}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
where $V_{3d}(\alpha) \in \mathbb{R}^{3 \times 35709}$ is the 3D face vertices, $\overline{\boldsymbol{V}}$ is the mean shape. $T_{alb}(\alpha) \in \mathbb{R}^{3 \times 35709}$ is the albedo, $\overline{T}$ is the mean albedo. $A_{id}$ , $A_{exp}$ and $A_{alb}$ are the face identity vector bases, the expression vector bases and the albedo vector bases, respectively. $\alpha_{id} \in \mathbb{R}^{80}$ , $\alpha_{exp} \in \mathbb{R}^{64}$ and $\alpha_{alb} \in \mathbb{R}^{80}$ are the identity parameter, the expression parameter and the albedo parameter, respectively. $\alpha_{t} \in \mathbb{R}^{3}$ is the translation parameter. $\pmb{R}(\pmb{\alpha}_{a}) \in \mathbb{R}^{3 \times 3}$ is the rotation matrix corresponding to pitch/raw/roll angles $\alpha_{a} \in \mathbb{R}^{3}$ .
|
| 92 |
+
|
| 93 |
+
Camera. We employ a camera with a fixed perspective projection, which is same as [11, 25]. Using this camera to re-project $V_{3d}(\alpha)$ into the 2D image plane yields $V_{2d}(\alpha) \in \mathbb{R}^{2 \times 35709}$ .
|
| 94 |
+
|
| 95 |
+
Illumination Model. Following [14], we adopt Spherical Harmonics (SH) [40] for the estimation of the shaded texture $T_{tex}(\alpha)$ :
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
T _ {t e x} (\boldsymbol {\alpha}) = T _ {a l b} (\boldsymbol {\alpha}) \odot \sum_ {k = 1} ^ {9} \boldsymbol {\alpha} _ {s h} ^ {k} \boldsymbol {\Psi} _ {k} (\boldsymbol {N}), \tag {2}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $\odot$ denotes the Hadamard product, $N$ is the surface normal of $V_{3d}(\alpha)$ , $\Psi : \mathbb{R}^3 \to \mathbb{R}$ is the SH basis function and $\alpha_{sh} \in \mathbb{R}^9$ is the corresponding SH parameter. In summary, $\alpha = [\alpha_{id}, \alpha_{\mathrm{exp}}, \alpha_a, \alpha_t, \alpha_{sh}]$ is the undetermined parameter.
|
| 102 |
+
|
| 103 |
+
# 3.2. Point Transformation on the Image Plane
|
| 104 |
+
|
| 105 |
+
Transforming Segmentation to 2D Points. For an input RGB face image $I \in \mathbb{R}^{H \times W \times 3}$ , the prediction of a face segmentation method can be represented by a set of binary tensors $M = \{M_p | p \in P\}$ , where $P = \{\text{left-eye, right-eye, left_eyebrow, right_eyebrow, up\_lip, down\_lip, nose, skin}\}$ and $M_p \in \{0,1\}^{H \times W}$ . Specifically, $M_p^{(x,y)} = 1$ only if the 2D pixel position $(x,y)$ of $M_p$ belongs to a certain face part $p$ , and otherwise $M_p^{(x,y)} = 0$ . $M$ can be transformed into a set of point sets $C = \{C_p | p \in P\}$ , where $C_p = \{(x,y) | if M_p^{(x,y)} = 1\}$ . In this step, we employ DML-CSR [55] for face segmentation, excluding the ear regions, filtering out noise from the segmentation, and dynamically removing the forehead region above the eyebrows based on their position. This procedure is illustrated in Fig. 3(a). More implementation details are provided in the supplemental materials.
|
| 106 |
+
|
| 107 |
+
Facial Part Annotation on 3D Face Model. Our objective is to leverage $\{C_p\}$ for guiding 3D face reconstruction. Thus, we should ensure that the reconstructed mesh can be divided into regions consistent with the semantics of the 2D segmentation. Due to the topological consistency of the face model, every vertex on the mesh can be annotated for a specific region. However, existing annotations [27, 30, 49] do not conform to widely accepted 2D face segmentation definitions [24, 32], as shown in Fig.2(c). To address this misalignment, we introduce new part annotations on both BFM [5] and FaceVerse [51]. We partition the vertices based on their indices. $i \in Ind_p$ indicates that the $i$ -th vertex (denoted as $\mathbf{v}$ ) on the mesh belongs to part $p$ . $\{Ind_p|p \in P\}$ can be obtained by:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\begin{array}{l} I ^ {s e g} = \operatorname {S e g} (\operatorname {R e n d e r} (V _ {3 d}, T e x)) \\ i \subset I _ {i d d} - i f, I ^ {s e g} (v) \subset v \end{array} , \tag {3}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
i \in I n d _ {p}, i f I ^ {s e g} (\boldsymbol {v}) \in p
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $\text{Render}(\cdot)$ generates an image by applying texture on the mesh, and $\text{Seg}(\cdot)$ is responsible for segmenting the rendered result. We employ different shape $V_{3d}$ and varying textures $Tex$ to label every $v \in V_{3d}$ with hand-crafted modification. The annotation $\{Ind_p\}$ is pre-completed offline in the training process. Consequently, we utilize $\{Ind_p\}$ to transform the re-projection $V_{2d}(\alpha)$ into semantic point sets $\{V_{2d}^p (\alpha)|p \in P\}$ . Besides, the upper forehead region situated above the eyebrows is dynamically excluded to ensure consistency with target. Points obstructed by hair are removed based on $\{C_p\}$ , as shown in Fig. 3(b). Please refer to supplemental materials for annotation details.
|
| 118 |
+
|
| 119 |
+
# 3.3. Part Re-projection Distance Loss (PRDL)
|
| 120 |
+
|
| 121 |
+
This section describes the design of PRDL, focusing on constructing geometric descriptors and establishing the relation between the prediction $\{V_{2d}^p (\alpha)\}$ and the ground
|
| 122 |
+
|
| 123 |
+
truth $\{C_p\}$ for a given $p \in P$ , which is proved instrumental for face reconstruction.
|
| 124 |
+
|
| 125 |
+
In a more generalized formulation, considering two point sets $C = \{c_1, c_2, \dots, c_{|C|}\}$ and $C^* = \{c_1^*, c_2^*, \dots, c_{|C^*|}^*\}$ , we aim to establish geometry descriptions by quantifying shape alignment between them for reconstruction. $C$ and $C^*$ may not possess the same number of points, and their points lack correspondence. Instead of directly searching the correspondence between the two sets, we use a set of fixed points as anchors $A = \{a_1, a_2, \dots, a_{|A|}\}$ and a collection of statistical distance functions $\mathcal{F} = \{f_1, f_2, \dots, f_{|\mathcal{F}|}\}$ to construct geometry description tensors $\Gamma(C, A, \mathcal{F}) \in \mathbb{R}^{|\mathcal{A}| \times |\mathcal{F}|}$ and $\Gamma(C^*, A, \mathcal{F}) \in \mathbb{R}^{|\mathcal{A}| \times |\mathcal{F}|}$ for $C$ and $C^*$ , respectively (denoted as $\Gamma$ and $\Gamma^*$ for brevity). The value $\Gamma(i, j)$ and $\Gamma^*(i, j)$ at the position $(i, j)$ are determined by:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\left\{ \begin{array}{l} \boldsymbol {\Gamma} (i, j) = f _ {j} (\boldsymbol {C}, \boldsymbol {a} _ {i}) \\ \boldsymbol {\Gamma} ^ {*} (i, j) = f _ {j} (\boldsymbol {C} ^ {*}, \boldsymbol {a} _ {i}), \end{array} \right. \tag {4}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where every function $f_{j}(\pmb {B},\pmb {b})\in \mathcal{F}$ describes the distance from a single point $\pmb{b}$ to a set of points $\pmb{B}$ , and $f_{j}(\pmb {B},\pmb {b})$ can be any statistically meaningful distance.
|
| 132 |
+
|
| 133 |
+
When fitting 3DMM to the segmented silhouettes for part $p$ , we set $\boldsymbol{C} = V_{2d}^{p}(\boldsymbol{\alpha})$ and $C^* = C_p$ with specified anchors $\mathbf{A}$ and a set of distance functions $\mathcal{F}$ . Then we calculate their corresponding geometry descriptor tensors $\Gamma_p = \Gamma(V_{2d}^p(\boldsymbol{\alpha}), \boldsymbol{A}, \mathcal{F})$ and $\Gamma_p^* = \Gamma(C_p, \boldsymbol{A}, \mathcal{F})$ . Part Re-projection Distance Loss (PRDL) $\mathcal{L}_{prdl}$ is defined as:
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathcal {L} _ {p r d l} = \sum_ {p \in P} w _ {p r d l} ^ {p} \left\| \boldsymbol {\Gamma} _ {p} - \boldsymbol {\Gamma} _ {p} ^ {*} \right\| _ {2} ^ {2}, \tag {5}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
where $w_{prdl}^p$ is the weight of each part $p$ . In this paper, we set $\mathcal{F}$ as a collection of the nearest $(f_{min})$ , furthest $(f_{max})$ , and average $(f_{ave})$ distance, i.e. $\mathcal{F} = \{f_{max}, f_{min}, f_{ave}\}$ . We set $\mathbf{A}$ as a $H \times W$ mesh grid. Then for $\forall \mathbf{a}_i \in \mathbf{A}$ , the optimization objective of $\mathcal{L}_{prdl}$ is to:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\left\{ \begin{array}{l} \min | | f _ {m i n} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {m i n} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\ \min | | f _ {m a x} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {m a x} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \\ \min | | f _ {a v e} (\boldsymbol {C} _ {p}, \boldsymbol {a} _ {i}) - f _ {a v e} (V _ {2 d} ^ {p} (\boldsymbol {\alpha}), \boldsymbol {a} _ {i}) | | _ {2} ^ {2} \end{array} . \right. \tag {6}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
This process is shown in Fig. 3(c). When $p =$ left_eye, PRDL minimizes the length difference between the indigo and orange lines (also as shown in Fig. 6(a) when $p =$ right_eybrow). The upper right corner of Fig. 3(c) is a visualization of $\Gamma_{left\_eye}$ with the last channel separately by reshaping it from $\mathbb{R}^{|A| \times |\mathcal{F}|}$ to $\mathbb{R}^{H \times W \times |\mathcal{F}|}$ . It is worth note that, the points number in $V_{2d}^{p}(\alpha)$ , $C_p$ and $A$ can be reduced by using Farthest Point Sampling (FPS) [38] to decrease computational costs.
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Figure 4. Synthesize emotional expression data.
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 5. Examples of our synthetic face dataset.
|
| 152 |
+
|
| 153 |
+
# 3.4. Overall Losses
|
| 154 |
+
|
| 155 |
+
To reconstruct a 3D face from image $I$ , we build frameworks to minimize the total loss $\mathcal{L}$ as follows:
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\begin{array}{l} \mathcal {L} = \lambda_ {p r d l} \mathcal {L} _ {p r d l} + \lambda_ {l m k} \mathcal {L} _ {l m k} + \lambda_ {p h o} \mathcal {L} _ {p h o} \tag {7} \\ + \lambda_ {p e r} \mathcal {L} _ {p e r} + \lambda_ {r e g} \mathcal {L} _ {r e g}, \\ \end{array}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
where $\mathcal{L}_{lmk}$ is the landmark loss, we use detectors to locate 240 2D landmarks for $\mathcal{L}_{lmk}$ and adopt the dynamic landmark marching [57] to handle the non-correspondence between 2D and 3D cheek contour landmarks arising from pose variations. The photometric loss $\mathcal{L}_{pho}$ and the perceptual loss $\mathcal{L}_{per}$ are based on [11, 14]. $\mathcal{L}_{reg}$ is the regularization loss for $\alpha$ . $\lambda_{prdl} = 0.8e - 3$ , $\lambda_{lmk} = 1.6e - 3$ , $\lambda_{pho} = 1.9$ , $\lambda_{per} = 0.2$ , and $\lambda_{reg} = 3e - 4$ are the balance weights. $\mathcal{L}_{prdl}$ and $\mathcal{L}_{lmk}$ are normalized by $H\times W$ .
|
| 162 |
+
|
| 163 |
+
# 3.5. Synthetic Emotional Expression Data
|
| 164 |
+
|
| 165 |
+
Benefiting from recent developments in face editing research [24, 47], we can generate realistic faces through segmentation $M$ . We aim to mass-produce realistic and diverse facial expression data. To achieve this, we start by obtaining the segmentation $M$ and landmarks $lmk$ of the original image $I$ with a segmentation method [55] and a landmark detector, respectively. Leveraging the location of landmarks $lmk$ , we apply affine transformation with various patterns onto the segmentation $M$ , resulting in $M'$ . Subsequently, $M'$ is fed into the generative network [24] to produce a new facial expression image $I'$ , as depicted in Fig. 4. Based on CelebA [35] and CelebAMask-HQ [24], we have generated a dataset comprising more than $200K$ images, including expressions such as closed-eye, open-mouth, and frown, as depicted in Fig. 5. This dataset will be publicly available to facilitate research.
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
(a)
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
(b)
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
(d)
|
| 175 |
+
Figure 6. (a): $p =$ right_eyebrow when the closest distance $(f_{min})$ is compared. (b): The gradient descent of PRDL for (a). (c): $\mathbf{\Gamma}_p^*$ is the regression target of PRDL in $f_{min}$ channel. (d): $M_p$ is the regression target of renderer-based methods. $\mathbf{\Gamma}_p^*$ is more informative than $M_p$ .
|
| 176 |
+
|
| 177 |
+
# 4. Analysis of PRDL and Related Methods
|
| 178 |
+
|
| 179 |
+
The Gradient of PRDL. With anchors and distance functions as the bridge, PRDL establishes the geometry descriptions of the two point sets. In Fig. 6, we take $p =$ right_eyebrow as an example to analyze the gradient of PRDL. When considering $f_{min}$ and a specific anchor $\pmb{a}_i \in \pmb{A}$ , $f_{min}$ identifies $\pmb{c}_m$ and $\pmb{v}_n$ from $C_p$ and $V_{2d}^p(\alpha)$ , respectively, by selecting the ones closest to $\pmb{a}_i$ :
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
m = \underset {j} {\arg \min } \| \boldsymbol {a} _ {i} - \boldsymbol {c} _ {j} \| _ {2}, \quad \boldsymbol {c} _ {j} \in C _ {p}, \tag {8}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
n = \underset {j} {\arg \min } \| \boldsymbol {a} _ {i} - \boldsymbol {v} _ {j} \| _ {2}, \quad \boldsymbol {v} _ {j} \in V _ {2 d} ^ {p} (\boldsymbol {\alpha}). \tag {9}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
Under the definition of PRDL, the corresponding energy function $E_{i,m,n}$ for $\pmb{a}_i,\pmb{c}_m$ and $\pmb{v}_n$ is:
|
| 190 |
+
|
| 191 |
+
$$
|
| 192 |
+
\begin{array}{l} E _ {i, m, n} = \left(\left\| \boldsymbol {a} _ {i} - \boldsymbol {c} _ {m} \right\| _ {2} - \left\| \boldsymbol {a} _ {i} - \boldsymbol {v} _ {n} \right\| _ {2}\right) ^ {2} \tag {10} \\ = \left(d _ {i, m} - d _ {i, n}\right) ^ {2}, \\ \end{array}
|
| 193 |
+
$$
|
| 194 |
+
|
| 195 |
+
where $d_{i,m} = ||\pmb{a}_i - \pmb{c}_m||_2, d_{i,n} = ||\pmb{a}_i - \pmb{v}_n||_2$ . The gradient descent of $E_{i,m,n}$ on $\pmb{v}_n$ is:
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
- \frac {\partial E _ {i , m , n}}{\partial \boldsymbol {v} _ {n}} = 2 (\boldsymbol {v} _ {n} - \boldsymbol {a} _ {i}) \left(\frac {d _ {i , m}}{d _ {i , n}} - 1\right). \tag {11}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
The physical explanation of Eqn. 11 is comprehensible and concise: the direction of $-\nabla E_{i,m,n}$ always aligns with the line connecting $\pmb{a}_i$ and $\pmb{v}_n$ , if $d_{i,n} > d_{i,m}$ , the direction of $-\nabla E_{i,m,n}$ is from $\pmb{v}_n$ to $\pmb{a}_i$ (as shown in Fig. 6(b)), and vice versa. In the context of gradient descent, the effect of $-\nabla E_{i,m,n}$ is to make $d_{i,n} = d_{i,m}$ as much as possible. Given $\pmb{A}$ and $f_{min}$ , the gradient descent of $\mathcal{L}_{prdl}$ on $\pmb{v}_n$ is the aggregation of all anchors:
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {p r d l}}{\partial \boldsymbol {v} _ {n}} = - w _ {p r d l} ^ {p} \sum \frac {\partial E _ {i , m , n}}{\partial \boldsymbol {v} _ {n}} \\ = - w _ {p r d l} ^ {p} \sum_ {i, m} ^ {i, m} \nabla E _ {i, m, n}. \tag {12} \\ \end{array}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
The scenario with $f_{max}$ is similar to that of $f_{min}$ , with the only distinction lying in the selection of points. $f_{max}$
|
| 208 |
+
|
| 209 |
+
Table 1. Quantitative comparison on Part IoU benchmark. The best and runner-up are highlighted in bold and underlined, respectively. R_eye denotes the right eye, and similar definitions for the rest are omitted.
|
| 210 |
+
|
| 211 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="8">Part IoU(%)↑</td></tr><tr><td>R_eyeavg.± std.</td><td>L_eyeavg.± std.</td><td>R_browavg.± std.</td><td>L_browavg.± std.</td><td>Noseavg.± std.</td><td>Up_lipavg.± std.</td><td>Down_lipavg.± std.</td><td>avg.</td></tr><tr><td>PRNet [13]</td><td>65.87±16.36</td><td>66.73±14.74</td><td>61.46±15.89</td><td>59.18±16.31</td><td>83.34±4.57</td><td>50.88±18.35</td><td>58.16±17.72</td><td>63.66</td></tr><tr><td>MGCNet [45]</td><td>64.42±16.02</td><td>64.81±16.91</td><td>55.25±15.29</td><td>61.30±15.58</td><td>87.40±3.51</td><td>41.16±19.70</td><td>66.22±13.83</td><td>62.94</td></tr><tr><td>Deep3D [11]</td><td>71.87±12.00</td><td>70.52±12.19</td><td>64.66±11.31</td><td>64.70±11.98</td><td>87.69±3.51</td><td>61.21±15.60</td><td>65.95±13.08</td><td>69.51</td></tr><tr><td>3DDFA-v2 [17]</td><td>61.39±15.98</td><td>57.51±18.09</td><td>43.38±25.25</td><td>38.85±24.38</td><td>80.83±4.92</td><td>50.20±17.17</td><td>59.01±15.23</td><td>55.88</td></tr><tr><td>HRN [25]</td><td>73.31±11.39</td><td>73.61±11.50</td><td>67.91±8.26</td><td>66.78±10.27</td><td>90.00±2.60</td><td>63.80±14.16</td><td>66.40±11.94</td><td>71.69</td></tr><tr><td>DECA [14]</td><td>58.09±21.40</td><td>62.56±19.41</td><td>55.27±19.49</td><td>51.86±19.93</td><td>86.54±9.11</td><td>56.39±16.96</td><td>62.81±17.66</td><td>61.93</td></tr><tr><td>Ours (w/o Lprdl)</td><td>70.72±9.44</td><td>75.69±10.79</td><td>71.11±8.58</td><td>71.69±8.73</td><td>88.35±4.60</td><td>57.26±15.97</td><td>69.71±10.68</td><td>72.08</td></tr><tr><td>Ours (w/o Syn. Data)</td><td>73.81±10.12</td><td>72.55±10.68</td><td>72.24±9.23</td><td>70.90±8.55</td><td>88.71±4.11</td><td>57.43±14.37</td><td>69.87±10.54</td><td>72.22</td></tr><tr><td>Ours</td><td>74.55±11.46</td><td>76.06±10.32</td><td>74.00±7.72</td><td>74.05±7.70</td><td>89.06±3.53</td><td>58.16±12.76</td><td>70.86±10.34</td><td>73.82</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 2. Quantitative comparison on Realty benchmark. Lower values indicate better results. The best and runner-up are highlighted in bold and underlined, respectively.
|
| 214 |
+
|
| 215 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">Frontal-view (mm) ↓</td><td colspan="5">Side-view (mm) ↓</td></tr><tr><td>Nose avg.± std.</td><td>Mouth avg.± std.</td><td>Forehead avg.± std.</td><td>Cheek avg.± std.</td><td>avg.</td><td>Nose avg.± std.</td><td>Mouth avg.± std.</td><td>Forehead avg.± std.</td><td>Cheek avg.± std.</td><td>avg.</td></tr><tr><td>PRNet [13]</td><td>1.923±0.518</td><td>1.838±0.637</td><td>2.429±0.588</td><td>1.863±0.698</td><td>2.013</td><td>1.868±0.510</td><td>1.856±0.607</td><td>2.445±0.570</td><td>1.960±0.731</td><td>2.032</td></tr><tr><td>MGCNet [45]</td><td>1.771±0.380</td><td>1.417±0.409</td><td>2.268±0.503</td><td>1.639±0.650</td><td>1.774</td><td>1.827±0.383</td><td>1.409±0.418</td><td>2.248±0.508</td><td>1.665±0.644</td><td>1.787</td></tr><tr><td>Deep3D[11]</td><td>1.719±0.354</td><td>1.368±0.439</td><td>2.015±0.449</td><td>1.528±0.501</td><td>1.657</td><td>1.749±0.343</td><td>1.411±0.395</td><td>2.074±0.486</td><td>1.528±0.517</td><td>1.691</td></tr><tr><td>3DDFA-v2 [17]</td><td>1.903±0.517</td><td>1.597±0.478</td><td>2.447±0.647</td><td>1.757±0.642</td><td>1.926</td><td>1.883±0.499</td><td>1.642±0.501</td><td>2.465±0.622</td><td>1.781±0.636</td><td>1.943</td></tr><tr><td>HRN [25]</td><td>1.722±0.330</td><td>1.357±0.523</td><td>1.995±0.476</td><td>1.072±0.333</td><td>1.537</td><td>1.642±0.310</td><td>1.285±0.528</td><td>1.906±0.479</td><td>1.038±0.322</td><td>1.468</td></tr><tr><td>DECA [14]</td><td>1.694±0.355</td><td>2.516±0.839</td><td>2.394±0.576</td><td>1.479±0.535</td><td>2.010</td><td>1.903±1.050</td><td>2.472±1.079</td><td>2.423±0.720</td><td>1.630±1.135</td><td>2.107</td></tr><tr><td>Ours (w/o Lpr dl)</td><td>1.671±0.332</td><td>1.460±0.474</td><td>2.001±0.428</td><td>1.142±0.315</td><td>1.568</td><td>1.665±0.349</td><td>1.297±0.400</td><td>2.016±0.448</td><td>1.134±0.342</td><td>1.528</td></tr><tr><td>Ours (w/o Syn. Data)</td><td>1.592±0.327</td><td>1.339±0.433</td><td>1.823±0.407</td><td>1.119±0.332</td><td>1.468</td><td>1.628±0.320</td><td>1.229±0.433</td><td>1.872±0.407</td><td>1.091±0.312</td><td>1.455</td></tr><tr><td>Ours</td><td>1.586±0.306</td><td>1.238±0.373</td><td>1.810±0.394</td><td>1.111±0.327</td><td>1.436</td><td>1.623±0.313</td><td>1.205±0.366</td><td>1.864±0.424</td><td>1.076±0.315</td><td>1.442</td></tr></table>
|
| 216 |
+
|
| 217 |
+
also has the capability to constrain $V_{2d}^{p}(\alpha)$ within the confines of $C_p$ . $f_{ave}$ acts on the entire $V_{2d}^{p}(\alpha)$ , striving to bring its centroid as close as possible to the centroid of $C_p$ . The introduction of additional anchors and the integration of diverse statistical distances in PRDL prevent the optimization from local optima and provide sufficient geometric signals. Please refer to supplementary materials for more details.
|
| 218 |
+
|
| 219 |
+
PRDL vs. Renderer-Based Loss: An intuitive approach for fitting segmentation is to use the renderer-based IoU loss, where differentiable silhouette renderers play a crucial role. Consequently, we delve into the distinctions between PRDL and renderers. We can reshape $\Gamma_p^*$ ( $\mathbb{R}^{|A| \times |\mathcal{F}|} \to \mathbb{R}^{H \times W \times |\mathcal{F}|}$ ) to visualize it with the last channel separately. Fig. 6(c) illustrates the visualization of the $f_{min}$ channel for $p =$ right_eyebrow, while Fig. 6(d) represents the silhouette rendered by [33] or [8]. In comparison with the regression target $M_p$ utilized in renderer-based methods, $\Gamma_p^*$ in PRDL is more informative and more conducive to fitting. Please refer to supplementary materials for more details.
|
| 220 |
+
|
| 221 |
+
Furthermore, considering existing theoretical analyses [8, 22, 56], PRDL exhibits several notable advantages. First, in these renderers, all triangles constituting the object influence every pixel within the silhouettes, making it intricate to isolate specific geometric features. In contrast, $f_{min}$ or $f_{max}$ in PRDL matches the nearest or furthest point on the object, allowing for a more straightforward measurement of the shape's boundary characteristics. Secondly, these renderers either neglect pixels outside any triangles of
|
| 222 |
+
|
| 223 |
+
the 3D object or assign minimal weights to them, emphasizing the rendered object region. However, this operation is equivalent to selectively choosing anchors $A$ in the interior of the rendered shape, while the external anchors are either not chosen or treated differently by assigning small weights, thereby diminishing descriptive power. In Eqn. 11, Eqn. 12 and Fig. 6(b), we have analyzed that external anchors play a significant role in the fitting process. Ablation study (Fig.8) also proves that PRDL is more effective than renderer-based methods like [8, 33, 56].
|
| 224 |
+
|
| 225 |
+
# 5. Experiments
|
| 226 |
+
|
| 227 |
+
# 5.1. Experimental Settings
|
| 228 |
+
|
| 229 |
+
Reconstruction Frameworks. We implement PRDL based on PyTorch [39] and PyTorch3D [42]. We use ResNet-50 [18] as the backbone to predict $\alpha$ . The input image is cropped and aligned by [10], and resized into $224 \times 224$ .
|
| 230 |
+
|
| 231 |
+
Data. The face images are from publicly available datasets: Dad-3dheads [37], CelebA [35], RAF-ML [28], RAF-DB [29] and 300W [43]. Our synthetic images are mainly from [24, 35]. We use [58] for face pose augmentation. In total, our training data contained about $600K$ face images. We employ DML-CSR [55] to predict 2D face segmentation.
|
| 232 |
+
|
| 233 |
+
Implementation Details. Considering the inherent feature of 2D segmentation, if part $p$ of a face is invisible or occluded, it may lead to $C_p = \varnothing$ . In such a situation during training, we set $w_{prdl}^p = 0$ for these samples. We use Adam
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Figure 7. Qualitative comparison with the other methods. Our method achieves realistic reconstructions, particularly in the eye region.
|
| 237 |
+
|
| 238 |
+
[23] as the optimizer with an initial learning rate of $1e - 4$ . We use Farthest Point Sampling (FPS) [38] to reduce the point number of $V_{2d}^{skin}(\alpha)$ and $C_{skin}$ to 3000, reducing computational consumption. Please refer to supplemental materials for more details.
|
| 239 |
+
|
| 240 |
+
# 5.2. Metric
|
| 241 |
+
|
| 242 |
+
In various VR/AR applications, 3DMMs are crucial in capturing facial motions or providing fine-grained regions covering facial features. One crucial objective in such applications is to ensure the alignment of overlapping facial parts between prediction and input. Widely used benchmarks [7, 44] typically rely on the 3D accuracy performance of reconstructions. However, there are instances where inconsistencies arise between 3D errors and 2D alignments. As shown in Fig.2(b), comparing with 3DDFA-v2 [17], DECA [14] have better 2D eye region overlapping IoU (70.29% vs. 39.37%) but a higher 3D forehead error (1.88mm vs. 1.75mm). To address this, we introduce Part IoU to emphasize the performance of overlap.
|
| 243 |
+
|
| 244 |
+
Part IoU is a new benchmark to quantify how well the part
|
| 245 |
+
|
| 246 |
+
reconstruction $V_{3d}^{p}(\alpha)$ aligns with their corresponding parts from the original face. The core idea is to measure the overlap of facial components between the reconstruction and the original image using IoU. The ground truth is a binary tensor $\{M_p\}$ (as defined above). We render $V_{3d}(\alpha)$ with a mean texture as an image, generate the predicted segmentation $\{M_p^{pred}\}$ with [55]. The use of mean texture focuses the metric more on overlap effects than other factors, making it applicable to methods without texture-fitting [13, 17]. Part IoU $IoU_p$ of part $p$ can be obtained by:
|
| 247 |
+
|
| 248 |
+
$$
|
| 249 |
+
I o U _ {p} = I o U \left(M _ {p} ^ {p r e d}, M _ {p}\right). \tag {13}
|
| 250 |
+
$$
|
| 251 |
+
|
| 252 |
+
MEAD [50] is an emotional talking-face dataset. We test Part IoU by selecting 10 individuals from MEAD, each contributing 50 random different images. Part IoU measures the overlap performance between each part of the reconstruction and the ground truth. More detail is in the supplemental materials.
|
| 253 |
+
|
| 254 |
+
REALY [7] benchmark consists of 100 scanned neutral expression faces, which are divided into four parts: nose, mouth, forehead (eyes and eyebrows), and cheek for 3D alignment and distance error calculation.
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
Input
|
| 258 |
+
Figure 8. Comparison with the renderer-based geometric guidance of segmentation.
|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
SoftRas
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
DIB-R
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
ReDA
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
PRDL
|
| 271 |
+
|
| 272 |
+
# 5.3. Qualitative Comparison
|
| 273 |
+
|
| 274 |
+
We conduct a comprehensive evaluation of our method with the state-of-the-art approaches, including PRNet [13], MGCNet [45], Deep3D [11], 3DDFA-V2 [17], HRN [25] and DECA [14]. The visualization of HRN and DECA uses the mid-frequency details and coarse shape (denoted as HRN-m and DECA-c) since their further steps only change the renderer's normal map, while no 3D refinement is made. As shown in Fig. 7, our results excel in capturing extreme expressions, even better than HRN-m which has fine reconstruction steps.
|
| 275 |
+
|
| 276 |
+
# 5.4.Quantitative Comparison
|
| 277 |
+
|
| 278 |
+
On both the Part IoU and REALY [7] benchmarks, our results outperforms the existing state-of-the-art methods. As shown in Tab. 1, our method is almost always the highest overlap IoU across various facial parts with $73.82\%$ total average, demonstrating PRDL enhances the part alignment of reconstruction. PRDL also performs the best average 3D error on the REALY benchmark (1.436mm in frontal-view and 1.442mm in side-view), as shown in Tab. 2.
|
| 279 |
+
|
| 280 |
+
# 5.5. Ablation Study
|
| 281 |
+
|
| 282 |
+
Ablation for PRDL and Synthetic Data. We conduct quantitative ablation experiments for PRDL and synthetic data on REALY and Part IoU. As depicted in Table 1 and Table 2, only introducing PRDL already yields superior results compared to all other methods (72.22%, 1.468mm, and 1.455mm). Introducing synthetic data without PRDL demonstrates a significant improvement in Part IoU, but not as effectively as PRDL (72.08% vs. 72.22%). Using both synthetic data and PRDL could lead to the best result.
|
| 283 |
+
|
| 284 |
+
Compare with the Differentiable Silhouette Renderers. SoftRas [33] and DIB-R [8] are the two most widely used renderers, which serve as the basis for PyTorch3D [42] and Kaolin [15], respectively. Based on the image-fitting framework [1], we use them to render a silhouette of each face part and calculate the IoU loss with the ground truth. ReDA [56] is also a renderer-based method using the geometric guidance of segmentation. Fig.8 shows that PRDL is significantly better than these methods. It is essential to em
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
Input
|
| 288 |
+
Chamfer Distance
|
| 289 |
+
Figure 9. Comparison with the other point-driven-based geometric guidance of segmentation.
|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
Density Aware
|
| 293 |
+
|
| 294 |
+

|
| 295 |
+
ICP
|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
PRDL
|
| 301 |
+
|
| 302 |
+
phasize that all the results in Fig.8 and Fig.9 do not include $\mathcal{L}_{lmk}$ , $\mathcal{L}_{pho}$ , and $\mathcal{L}_{per}$ .
|
| 303 |
+
|
| 304 |
+
Compare with the Other Point-Driven Optimization Methods. One of the key insights of PRDL is transforming segmentation into points. Thus the 3DMM fitting becomes an optimization of two 2D point clouds until they share the same geometry. While an intuitive idea is incorporating the point-driven optimization methods like iterative closest points (ICP) [2-4] or chamfer distance [53], these methods are predominantly rooted in nearest-neighbor principles, and solely opting for the minimum distance potentially leads to local optima. We compare PRDL with ICP [54], chamfer distance and density aware chamfer distance [53] based on [1]. Since the ICP distance can be calculated from target to prediction or vice versa, we provide both methods. As depicted in Fig.9, PRDL outperforms other methods, producing outputs that align more accurately with the desired geometry. This superiority is attributed to the use of additional anchors and diverse statistical distances in PRDL. Referring to Fig.8 and Fig.9, PRDL stands out as the only loss capable of reconstructing effective results when the segmentation information is used independently. More comparison is in the supplemental materials.
|
| 305 |
+
|
| 306 |
+
# 6. Conclusions
|
| 307 |
+
|
| 308 |
+
This paper proposes a novel Part Re-projection Distance Loss (PRDL) to reconstruct 3D faces with the geometric guidance of facial part segmentation. Analysis proves that PRDL is superior to renderer-based and other point-driven optimization methods. We also provide a new emotional face expression dataset and a new 3D mesh part annotation to facilitate studies. Experiments further highlight the state-of-the-art performance of PRDL in achieving high-fidelity and better part alignment in 3D face reconstruction.
|
| 309 |
+
|
| 310 |
+
# Acknowledgement
|
| 311 |
+
|
| 312 |
+
This work was supported in part by Chinese National Natural Science Foundation Projects 62176256, U23B2054, 62276254, 62206280, the Beijing Science and Technology Plan Project Z231100005923033, Beijing Natural Science Foundation L221013, the Youth Innovation Promotion Association CAS Y2021131 and InnoHK program.
|
| 313 |
+
|
| 314 |
+
# References
|
| 315 |
+
|
| 316 |
+
[1] 3dmm model fitting using pytorch. https://github.com/ascust/3DMM-Fitting-Pytorch, 2021.8
|
| 317 |
+
[2] Brian Amberg, Sami Romdhani, and Thomas Vetter. Optimal step nonrigid icp algorithms for surface registration. In 2007 IEEE conference on computer vision and pattern recognition, pages 1-8. IEEE, 2007. 8
|
| 318 |
+
[3] K. S. Arun, T. S. Huang, and S. D. Blostein. Least-squares fitting of two 3-d point sets. IEEE Transactions on Pattern Analysis and Machine Intelligence, PAMI-9(5):698-700, 1987.
|
| 319 |
+
[4] P.J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Transactions on Pattern Analysis and Machine Intelligence, 14(2):239-256, 1992. 8
|
| 320 |
+
[5] Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pages 187-194, 1999. 1, 4
|
| 321 |
+
[6] Volker Blanz and Thomas Vetter. Face recognition based on fitting a 3d morphable model. IEEE Transactions on pattern analysis and machine intelligence, 25(9):1063-1074, 2003. 3
|
| 322 |
+
[7] Zenghao Chai, Haoxian Zhang, Jing Ren, Di Kang, Zhengzhuo Xu, Xuefei Zhe, Chun Yuan, and Linchao Bao. Really: Rethinking the evaluation of 3d face reconstruction. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part VIII, pages 74-92. Springer, 2022. 2, 7, 8
|
| 323 |
+
[8] Wenzheng Chen, Huan Ling, Jun Gao, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Advances in neural information processing systems, 32, 2019. 3, 6, 8
|
| 324 |
+
[9] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2019. 3
|
| 325 |
+
[10] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kotsia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-level face localisation in the wild. In CVPR, 2020. 6
|
| 326 |
+
[11] Yu Deng, Jiaolong Yang, Sicheng Xu, Dong Chen, Yunde Jia, and Xin Tong. Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pages 0–0, 2019. 1, 2, 3, 5, 6, 8
|
| 327 |
+
[12] Bernhard Egger, Sandro Schonborn, Andreas Schneider, Adam Kortylewski, Andreas Morel-Forster, Clemens Blumer, and Thomas Vetter. Occlusion-aware 3d morphable models and an illumination prior for face image analysis. International Journal of Computer Vision, 126:1269-1287, 2018. 1, 2
|
| 328 |
+
[13] Yao Feng, Fan Wu, Xiaohu Shao, Yanfeng Wang, and Xi Zhou. Joint 3d face reconstruction and dense alignment with position map regression network. In Proceedings of the European conference on computer vision (ECCV), pages 534-551, 2018. 6, 7, 8
|
| 329 |
+
|
| 330 |
+
[14] Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. 2021. 1, 2, 3, 5, 6, 7, 8
|
| 331 |
+
[15] Clement Fuji Tsang, Maria Shugrina, Jean Francois Lafleche, Towaki Takikawa, Jiehan Wang, Charles Loop, Wenzheng Chen, Krishna Murthy Jatavallabhula, Edward Smith, Artem Rozantsev, Or Perel, Tianchang Shen, Jun Gao, Sanja Fidler, Gavriel State, Jason Gorski, Tommy Xiang, Jianing Li, Michael Li, and Rev Lebaredian. Kaolin: A pytorch library for accelerating 3d deep learning research. https://github.com/NVIDIAGames/kaolin, 2022.2,3,8
|
| 332 |
+
[16] Kyle Genova, Forrester Cole, Aaron Maschinot, Aaron Sarna, Daniel Vlasic, and William T Freeman. Unsupervised training for 3d morphable model regression. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8377-8386, 2018. 3
|
| 333 |
+
[17] Jianzhu Guo, Xiangyu Zhu, Yang Yang, Fan Yang, Zhen Lei, and Stan Z Li. Towards fast, accurate and stable 3d dense face alignment. pages 152-168, 2020. 1, 2, 3, 6, 7, 8
|
| 334 |
+
[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 6
|
| 335 |
+
[19] Yueying Kao, Bowen Pan, Miao Xu, Jiangjing Lyu, Xiangyu Zhu, Yuanzhang Chang, Xiaobo Li, and Zhen Lei. Toward 3d face reconstruction in perspective projection: Estimating 6dof face pose from monocular image. IEEE Transactions on Image Processing, 32:3080-3091, 2023. 1
|
| 336 |
+
[20] Yury Kartynnik, Artsiom Ablavatski, Ivan Grishchenko, and Matthias Grundmann. Real-time facial surface geometry from monocular video on mobile gpus. arXiv preprint arXiv:1907.06724, 2019. 2
|
| 337 |
+
[21] Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3907-3916, 2018. 3
|
| 338 |
+
[22] Hiroharu Kato, Deniz Beker, Mihai Morariu, Takahiro Ando, Toru Matsuoka, Wadim Kehl, and Adrien Gaidon. Differentiable rendering: A survey. arXiv preprint arXiv:2006.12057, 2020. 2, 3, 6
|
| 339 |
+
[23] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 7
|
| 340 |
+
[24] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 2, 3, 4, 5, 6
|
| 341 |
+
[25] Biwen Lei, Jianqiang Ren, Mengyang Feng, Miaomiao Cui, and Xuansong Xie. A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 394-403, 2023. 1, 2, 3, 6, 8
|
| 342 |
+
[26] Chunlu Li, Andreas Morel-Forster, Thomas Vetter, Bernhard Egger, and Adam Kortylewski. To fit or not to fit: Model-based face reconstruction and occlusion segmentation from
|
| 343 |
+
|
| 344 |
+
weak supervision. arXiv preprint arXiv:2106.09614, 2021. 2
|
| 345 |
+
[27] Ruilong Li, Karl Bladin, Yajie Zhao, Chinmay Chinara, Owen Ingraham, Pengda Xiang, Xinglei Ren, Pratusha Prasad, Bipin Kishore, Jun Xing, and Hao Li. Learning formation of physically-based face attributes. 2020. 4
|
| 346 |
+
[28] Shan Li and Weihong Deng. Blended emotion in-the-wild: Multi-label facial expression recognition using crowdsourced annotations and deep locality feature learning. International Journal of Computer Vision, 127(6-7):884–906, 2019. 6
|
| 347 |
+
[29] Shan Li and Weihong Deng. Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Transactions on Image Processing, 28(1):356-370, 2019. 6
|
| 348 |
+
[30] Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), 36(6):194:1-194:17, 2017. 2, 4
|
| 349 |
+
[31] Jinpeng Lin, Hao Yang, Dong Chen, Ming Zeng, Fang Wen, and Lu Yuan. Face parsing with roi tanh-warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5654-5663, 2019. 2
|
| 350 |
+
[32] Yiming Lin, Jie Shen, Yujiang Wang, and Maja Pantic. Roi tanh-polar transformer network for face parsing in the wild. Image and Vision Computing, 112:104190, 2021. 2, 4
|
| 351 |
+
[33] Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7708-7717, 2019. 2, 3, 6, 8
|
| 352 |
+
[34] Yinglu Liu, Hailin Shi, Hao Shen, Yue Si, Xiaobo Wang, and Tao Mei. A new dataset and boundary-attention semantic segmentation for face parsing. In AAAI, pages 11637–11644, 2020. 2
|
| 353 |
+
[35] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), 2015. 5, 6
|
| 354 |
+
[36] Matthew M Loper and Michael J Black. Opendr: An approximate differentiable renderer. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part VII 13, pages 154-169. Springer, 2014. 3
|
| 355 |
+
[37] Tetiana Martyniuk, Orest Kupyn, Yana Kurlyak, Igor Krashenyi, Jiri Matas, and Viktoriya Sharmanska. Dad-3heads: A large-scale dense, accurate and diverse dataset for 3d head alignment from a single image. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2022. 2, 6
|
| 356 |
+
[38] Carsten Moenning and Neil A Dodgson. Fast marching farthest point sampling. Technical report, University of Cambridge, Computer Laboratory, 2003. 4, 7
|
| 357 |
+
[39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019. 6
|
| 358 |
+
|
| 359 |
+
[40] Ravi Ramamoorthi and Pat Hanrahan. An efficient representation for irradiance environment maps. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 497-500, 2001. 3
|
| 360 |
+
[41] Chirag Raman, Charlie Hewitt, Erroll Wood, and Tadas Baltrusaitis. Mesh-tension driven expression-based wrinkles for synthetic faces. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3515-3525, 2023. 3
|
| 361 |
+
[42] Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501, 2020. 2, 3, 6, 8
|
| 362 |
+
[43] Christos Sagonas, Georgios Tzimiropoulos, Stefanos Zafeiriou, and Maja Pantic. 300 faces in-the-wild challenge: The first facial landmark localization challenge. In Proceedings of the IEEE international conference on computer vision workshops, pages 397-403, 2013. 6
|
| 363 |
+
[44] Soubhik Sanyal, Timo Bolkart, Haiwen Feng, and Michael J Black. Learning to regress 3d face shape and expression from an image without 3d supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7763-7772, 2019. 7
|
| 364 |
+
[45] Jiaxiang Shang, Tianwei Shen, Shiwei Li, Lei Zhou, Mingmin Zhen, Tian Fang, and Long Quan. Self-supervised monocular 3d face reconstruction by occlusion-aware multiview geometry consistency. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XV, pages 53-70. Springer, 2020. 1, 2, 6, 8
|
| 365 |
+
[46] Dave Shreiner, Bill The Khronos OpenGL ARB Working Group, et al. OpenGL programming guide: the official guide to learning OpenGL, versions 3.0 and 3.1. Pearson Education, 2009. 2, 3
|
| 366 |
+
[47] Jingxiang Sun, Xuan Wang, Yichun Shi, Lizhen Wang, Jue Wang, and Yebin Liu. Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG), 41(6):1-10, 2022. 5
|
| 367 |
+
[48] Ayush Tewari, Hans-Peter Seidel, Mohamed Elgharib, Christian Theobalt, et al. Learning complete 3d morphable face models from images and videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3361-3371, 2021. 2, 3
|
| 368 |
+
[49] Graphics University of Basel and Vision Research. parametric-face-image-generator. https://github.com/unibas-gravis/parametric-face-image-generator, 2017.2,4
|
| 369 |
+
[50] Kaisiyuan Wang, Qianyi Wu, Linsen Song, Zhuoqian Yang, Wayne Wu, Chen Qian, Ran He, Yu Qiao, and Chen Change Loy. Mead: A large-scale audio-visual dataset for emotional talking-face generation. In ECCV, 2020. 7
|
| 370 |
+
[51] Lizhen Wang, Zhiyuan Chen, Tao Yu, Chenguang Ma, Liang Li, and Yebin Liu. Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20333-20342, 2022. 4
|
| 371 |
+
[52] Erroll Wood, Tadas Baltrusaitis, Charlie Hewitt, Sebastian Dziadzio, Thomas J Cashman, and Jamie Shotton. Fake it till
|
| 372 |
+
|
| 373 |
+
you make it: face analysis in the wild using synthetic data alone. In Proceedings of the IEEE/CVF international conference on computer vision, pages 3681-3691, 2021. 3
|
| 374 |
+
[53] Tong Wu, Liang Pan, Junzhe Zhang, Tai Wang, Ziwei Liu, and Dahua Lin. Density-aware chamfer distance as a comprehensive metric for point cloud completion. arXiv preprint arXiv:2111.12702, 2021. 8
|
| 375 |
+
[54] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: A globally optimal solution to 3d icp point-set registration. IEEE transactions on pattern analysis and machine intelligence, 38(11):2241–2254, 2015. 8
|
| 376 |
+
[55] Qi Zheng, Jiankang Deng, Zheng Zhu, Ying Li, and Stefanos Zafeiriou. Decoupled multi-task learning with cyclical self-regulation for face parsing. In Computer Vision and Pattern Recognition, 2022. 2, 4, 5, 6, 7
|
| 377 |
+
[56] Wenbin Zhu, HsiangTao Wu, Zeyu Chen, Noranart Vesdapunt, and Baoyuan Wang. Reda: reinforced differentiable attribute for 3d face reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4958-4967, 2020. 2, 3, 6, 8
|
| 378 |
+
[57] Xiangyu Zhu, Zhen Lei, Junjie Yan, Dong Yi, and Stan Z Li. High-fidelity pose and expression normalization for face recognition in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 787-796, 2015. 5
|
| 379 |
+
[58] Xiangyu Zhu, Xiaoming Liu, Zhen Lei, and Stan Z Li. Face alignment in full pose range: A 3d total solution. IEEE transactions on pattern analysis and machine intelligence, 41(1): 78-92, 2017. 3, 6
|
| 380 |
+
[59] Xiangyu Zhu, Chang Yu, Di Huang, Zhen Lei, Hao Wang, and Stan Z Li. Beyond 3dmm: Learning to capture high-fidelity 3d face shape. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 1
|
| 381 |
+
[60] Wojciech Zielonka, Timo Bolkart, and Justus Thies. Towards metrical reconstruction of human faces. In European Conference on Computer Vision, pages 250–269. Springer, 2022. 1, 2
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bfc575d4e9a17df339df169b413d2dd2d3b90989ccc4873d616a0db73d286621
|
| 3 |
+
size 782763
|
3dfacereconstructionwiththegeometricguidanceoffacialpartsegmentation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d73b65d6c29d6c64d35f9f4df2def9cc5b876ff61225e59f90e973abbdef1b7
|
| 3 |
+
size 554902
|
3dfacetrackingfrom2dvideothroughiterativedenseuvtoimageflow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6f272e4c7169112e107fb4289224f22226cb3c8556b70d70cfcc408da216b4c
|
| 3 |
+
size 82918
|
3dfacetrackingfrom2dvideothroughiterativedenseuvtoimageflow/658f2c5b-c12d-479a-94f3-61e9ffc8e1df_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:feded95c17866fbc537c17ca42a2ba52470961129bcf9d58b67a880eb2038c90
|
| 3 |
+
size 103059
|