Add Batch 6c17c774-667b-4e48-b5e3-743d3ffd38f7
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_content_list.json +3 -0
- 1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_model.json +3 -0
- 1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_origin.pdf +3 -0
- 1000fps4dgaussiansplattingfordynamicscenerendering/full.md +618 -0
- 1000fps4dgaussiansplattingfordynamicscenerendering/images.zip +3 -0
- 1000fps4dgaussiansplattingfordynamicscenerendering/layout.json +3 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_content_list.json +3 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_model.json +3 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_origin.pdf +3 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/full.md +786 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/images.zip +3 -0
- 1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/layout.json +3 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_content_list.json +3 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_model.json +3 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_origin.pdf +3 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/full.md +0 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/images.zip +3 -0
- 3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/layout.json +3 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_content_list.json +3 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_model.json +3 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_origin.pdf +3 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/full.md +0 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/images.zip +3 -0
- 3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/layout.json +3 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_content_list.json +3 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_model.json +3 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_origin.pdf +3 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/full.md +0 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/images.zip +3 -0
- 3dequivariantvisuomotorpolicylearningviasphericalprojection/layout.json +3 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_content_list.json +3 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_model.json +3 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_origin.pdf +3 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/full.md +619 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/images.zip +3 -0
- 3dgaussianflatshybrid2d3dphotometricscenereconstruction/layout.json +3 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_content_list.json +3 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_model.json +3 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_origin.pdf +3 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/full.md +705 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/images.zip +3 -0
- 3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/layout.json +3 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_content_list.json +3 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_model.json +3 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_origin.pdf +3 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/full.md +0 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/images.zip +3 -0
- 3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/layout.json +3 -0
- 3dhumanposeestimationwithmuscles/63b1c657-eda7-47cd-9f63-6882cee2567e_content_list.json +3 -0
- 3dhumanposeestimationwithmuscles/63b1c657-eda7-47cd-9f63-6882cee2567e_model.json +3 -0
1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0c17532f90ea2210aa8e56b69ae75bb7cdbb55b845e32122091898d980ca262
|
| 3 |
+
size 121997
|
1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1aeefca0a8d970fea3e167ae05985e5e10bfa84275cba7330f2b644364e44fcd
|
| 3 |
+
size 161505
|
1000fps4dgaussiansplattingfordynamicscenerendering/b10427d1-bee4-4ee0-a8a7-46eb2a4b8e6a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1be91691d19bcd6fcc14615f8672947540c758ac4aeb197b67f18f8d9c5c18de
|
| 3 |
+
size 13959299
|
1000fps4dgaussiansplattingfordynamicscenerendering/full.md
ADDED
|
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1000+ FPS 4D Gaussian Splating for Dynamic Scene Rendering
|
| 2 |
+
|
| 3 |
+
Yuheng Yuan $^{1,*}$ Qiuhong Shen $^{1,*}$ Xingyi Yang $^{2,1}$ Xinchao Wang $^{1,\dagger}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ National University of Singapore $^{2}$ The Hong Kong Polytechnic University
|
| 6 |
+
|
| 7 |
+
{yuhengyuan,qiuhong.shen}@u.nus.edu,xingyi.yang@polyu.edu.hk,xinchao@nus.edu.sgProject page:https://4dgs-1k.github.io/
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1: Compressibility and Rendering Speed. We introduce 4DGS-1K, a novel compact representation with high rendering speed. In contrast to 4D Gaussian Splatting (4DGS) [1], we can achieve rasterization at $1000+$ FPS while maintaining comparable photorealistic quality with only $2\%$ of the original storage size. The right figure is the result tested on the N3V [2] datasets, where the radius of the dot corresponds to the storage size.
|
| 11 |
+
|
| 12 |
+

|
| 13 |
+
|
| 14 |
+

|
| 15 |
+
|
| 16 |
+
# Abstract
|
| 17 |
+
|
| 18 |
+
4D Gaussian Splitting (4DGS) has recently gained considerable attention as a method for reconstructing dynamic scenes. Despite achieving superior quality, 4DGS typically requires substantial storage and suffers from slow rendering speed. In this work, we delve into these issues and identify two key sources of temporal redundancy. (Q1) Short-Lifespan Gaussians: 4DGS uses a large portion of Gaussians with short temporal span to represent scene dynamics, leading to an excessive number of Gaussians. (Q2) Inactive Gaussians: When rendering, only a small subset of Gaussians contributes to each frame. Despite this, all Gaussians are processed during rasterization, resulting in redundant computation overhead. To address these redundancies, we present 4DGS-1K, which runs at over 1000 FPS on modern GPUs. For Q1, we introduce the Spatial-Temporal Variation Score, a new pruning criterion that effectively removes short-lifespan Gaussians while encouraging 4DGS to capture scene dynamics using Gaussians with longer temporal spans. For Q2, we store a mask for active Gaussians across consecutive frames, significantly reducing redundant computations. Compared to vanilla 4DGS, our method achieves a $41 \times$ reduction in storage and $9 \times$ faster rasterization on complex dynamic scenes, while maintaining comparable visual quality.
|
| 19 |
+
|
| 20 |
+
# 1 Introduction
|
| 21 |
+
|
| 22 |
+
Novel view synthesis for dynamic scenes allows for the creation of realistic representations of 4D environments, which is essential in fields like computer vision, virtual reality, and augmented reality. Traditionally, this area has been led by neural radiance fields (NeRF) [2-6], which model opacity and
|
| 23 |
+
|
| 24 |
+
color over time to depict dynamic scenes. While effective, these NeRF-based methods come with high training and rendering costs, limiting their practicality, especially in real-time applications and on devices with limited resources.
|
| 25 |
+
|
| 26 |
+
Recently, point-based representations like 4D Gaussian Splatting (4DGS) [1] have emerged as strong alternatives. 4DGS models a dynamic scene using a set of 4D Gaussian primitives, each with a 4-dimensional mean and a $4 \times 4$ covariance matrix. At any given timestamp, a 4D Gaussian is decomposed into a set of conditional 3D Gaussians and a marginal 1D Gaussian, the latter controlling the opacity at that moment. This mechanism allows 4DGS to effectively capture both static and dynamic features of a scene, enabling high-fidelity dynamic scene reconstruction.
|
| 27 |
+
|
| 28 |
+
However, representing dynamic scenes with 4DGS is both storage-intensive and slow. Specifically, 4DGS often requires millions of Gaussians, leading to significant storage demands (averaging 2GB for each scene on the N3V [2] dataset) and suboptimal rendering speed. In comparison, mainstream deformation field methods [7] require only about 90MB for the same dataset. Therefore, reducing the storage size of 4DGS [1] and improving rendering speed are essential for efficiently representing complex dynamic scenes.
|
| 29 |
+
|
| 30 |
+
We look into the cause of such an explosive number of Gaussian and place a specific emphasis on two key issues. (Q1) A large portion of Gaussians exhibit a short temporal span. In empirical experiments, 4DGS tends to favor "flicking" Gaussians to fit complex dynamic scenes, which just influence a short portion of the temporal domain. This necessitates that 4DGS relies on a large number of Gaussians to reconstruct a high-fidelity scene. As a result, substantial storage is needed to record the attributes of these Gaussians. (Q2) Inactive Gaussians lead to redundant computation. During rendering, 4DGS needs to process all Gaussians. However, only a very small portion of Gaussians are active at that moment. Therefore, most of the computation time is spent on inactive Gaussians. This phenomenon greatly hampers the rendering speed. In this paper, we introduce 4DGS-1K, a framework that significantly reduces the number of Gaussians to minimize storage requirements and speedup rendering while maintaining high-quality reconstruction. To address these issues, 4DGS-1K introduces a two-step pruning approach:
|
| 31 |
+
|
| 32 |
+
- Pruning Short-Lifespan Gaussians. We propose a novel pruning criterion called the spatial-temporal variation score, which evaluates the temporal impact of each Gaussian. Gaussians with minimal influence are identified and pruned, resulting in a more compact scene representation with fewer Gaussians with short temporal span.
|
| 33 |
+
- Filtering Inactive Gaussians. To further reduce redundant computations during rendering, we use a key-frame temporal filter that selects the Gaussians needed for each frame. On top of this, we share the masks for adjacent frames. This is based on our observation that Gaussians active in adjacent frames often overlap significantly.
|
| 34 |
+
|
| 35 |
+
Besides, the pruning in step 1 enhances the masking process in step 2. By pruning Gaussians, we increase the temporal influence of each Gaussian, which allows us to select sparser key frames and further reduce storage requirements.
|
| 36 |
+
|
| 37 |
+
We have extensively tested our proposed model on various dynamic scene datasets including real and synthetic scenes. As shown in Figure 1, 4DGS-1K reduces storage costs by $41\times$ on the Neural 3D Video datasets [2] while maintaining equivalent scene representation quality. Crucially, it enables real-time rasterization speeds exceeding 1,000 FPS. These advancements collectively position 4DGS-1K as a practical solution for high-fidelity dynamic scene modeling without compromising efficiency.
|
| 38 |
+
|
| 39 |
+
In summary, our contributions are three-fold:
|
| 40 |
+
|
| 41 |
+
- We delve into the temporal redundancy of 4D Gaussian Splatting, and explain the main reason for the storage pressure and suboptimal rendering speed.
|
| 42 |
+
- We introduce 4DGS-1K, a compact and memory-efficient framework to address these issues. It consists of two key components, a spatial-temporal variation score-based pruning strategy and a temporal filter.
|
| 43 |
+
- Extensive experiments demonstrate that 4DGS-1K not only achieves a substantial storage reduction of approximately $41 \times$ but also accelerates rasterization to $1000+$ FPS while maintaining high-quality reconstruction.
|
| 44 |
+
|
| 45 |
+
# 2 Related Work
|
| 46 |
+
|
| 47 |
+
# 2.1 Novel view synthesis for static scenes
|
| 48 |
+
|
| 49 |
+
Recently, neural radiance fields(NeRF) [3] have achieved encouraging results in novel view synthesis. NeRF [3] represents the scene by mapping 3D coordinates and view dependency to color and opacity. Since NeRF [3] requires sampling each ray by querying the MLP for hundreds of points, this significantly limits the training and rendering speed. Subsequent studies [8-15] have attempted to speed up the rendering by introducing specialized designs. However, these designs also constrain the widespread application of these models. In contrast, 3D Gaussian Splatting(3DGS) [16] has gained significant attention, which utilizes anisotropic 3D Gaussians to represent scenes. It achieves high-quality results with intricate details, while maintaining real-time rendering performance.
|
| 50 |
+
|
| 51 |
+
# 2.2 Novel view synthesis for dynamic scenes
|
| 52 |
+
|
| 53 |
+
Dynamic NVs pose new challenges due to the temporal variations in the input images. Previous NeRF-based dynamic scene representation methods [2, 4-6, 17-22] handle dynamic scenes by learning a mapping from spatiotemporal coordinates to color and density. Unfortunately, these NeRF-based models are constrained in their applications due to low rendering speeds. Recently, 3D Gaussians Splatting [16] has emerged as a novel explicit representation, with many studies [7, 23-27] attempting to model the dynamic scenes based on it. 4D Gaussian Splatting(4DGS) [1] is one of the representatives. It utilizes a set of 4D Gaussian primitives. However, 4DGS often requires a huge redundant number of Gaussians for dynamic scenes. These Gaussians lead to tremendous storage and suboptimal rendering speed. To this end, we focus on analyzing the temporal redundancy of 4DGS [1] in hopes of developing a novel framework to achieve lower storage requirements and higher rendering speeds.
|
| 54 |
+
|
| 55 |
+
# 2.3 Gaussian Splitting Compression
|
| 56 |
+
|
| 57 |
+
3D Gaussian-based large-scale scene reconstruction typically requires millions of Gaussians, resulting in the requirement of up to several gigabytes of storage. Therefore, subsequent studies have attempted to tackle these issues. Specifically, Compgs [28] and Compact3D [29] employ vector quantization to store Gaussians within codebooks. Concurrently, inspired by model pruning, some studies [30-35] have proposed criterion to prune Gaussians by a specified ratio. However, compared to 3DGS [16], 4DGS [1] introduces an extra temporal dimension to enable dynamic representation. Previous 3DGS-based methods may therefore be unsuitable for 4DGS. Consequently, we first identify a key limitation leading to this problem, referred as temporal redundancy. Furthermore, we propose a novel pruning criterion leveraging spatial-temporal variation, and a temporal filter to achieve more efficient storage requirements and higher rendering speed.
|
| 58 |
+
|
| 59 |
+
# 3 Preliminary of 4D Gaussian Splitting
|
| 60 |
+
|
| 61 |
+
Our framework builds on 4D Gaussian Splitting (4DGS) [1], which reconstructs dynamic scenes by optimizing a collection of anisotropic 4D Gaussian primitives. For each Gaussian, it is characterized by a 4D mean $\mu = (\mu_x,\mu_y,\mu_z,\mu_t)\in \mathbb{R}^4$ coupled with a covariance matrix $\Sigma \in \mathbb{R}^{4\times 4}$ .
|
| 62 |
+
|
| 63 |
+
By treating time and space dimensions equally, the 4D covariance matrix $\Sigma$ can be decomposed into a scaling matrix $S_{4D} = (s_x,s_y,s_z,s_t)\in \mathbb{R}^4$ and a rotation matrix $R_{4D}\in \mathbb{R}^{4\times 4}$ . $R_{4D}$ is represented by a pair of left quaternion $q_{l}\in \mathbb{R}^{4}$ and right quaternion $q_{r}\in \mathbb{R}^{4}$ .
|
| 64 |
+
|
| 65 |
+
During rendering, each 4D Gaussian is decomposed into a conditional 3D Gaussian and a 1D Gaussian at a specific time $t$ . Moreover, the conditional 3D Gaussian can be derived from the properties of the multivariate Gaussian with:
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\mu_ {x y z \mid t} = \mu_ {1: 3} + \Sigma_ {1: 3, 4} \Sigma_ {4, 4} ^ {- 1} (t - \mu_ {t})
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\Sigma_ {x y z | t} = \Sigma_ {1: 3, 1: 3} - \Sigma_ {1: 3, 4} \Sigma_ {4, 4} ^ {- 1} \Sigma_ {4, 1: 3} \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
Here, $\mu_{1:3} \in \mathbb{R}^3$ and $\Sigma_{1:3,1:3} \in \mathbb{R}^{3 \times 3}$ denote the spatial mean and covariance, while $\mu_t$ and $\Sigma_{4,4}$ are scalars representing the temporal components. To perform rasterization, given a pixel under view $\mathcal{I}$
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
(a)
|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
(b)
|
| 82 |
+
Figure 2: Temporal redundancy Study. (a) The $\Sigma_{t}$ distribution of 4DGS. The red line shows the result of vanilla 4DGS. The other two lines represent our model has effectively reduced the number of transient Gaussians with small $\Sigma_{t}$ . (b) The active ratio during rendering at different timestamps. It demonstrates that most of the computation time is spent on inactive Gaussians in vanilla 4DGS. However, 4DGS-1K can significantly reduce the occurrence of inactive Gaussians during rendering to avoid unnecessary computations. (c) This figure shows the IoU between the set of active Gaussians in the first frame and frame t. It proves that active Gaussians tend to overlap significantly across adjacent frames.
|
| 83 |
+
|
| 84 |
+

|
| 85 |
+
(c)
|
| 86 |
+
|
| 87 |
+
and timestamp $t$ , its color $\mathcal{I}(u, v, t)$ can be computed by blending visible Gaussians that are sorted by their depth:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathcal {I} (u, v, t) = \sum_ {i} ^ {N} c _ {i} (d) \alpha_ {i} \prod_ {j = 1} ^ {i - 1} (1 - \alpha_ {j}) \tag {2}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
with
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\alpha_ {i} = p _ {i} (t) p _ {t} (u, v | t) \sigma_ {i}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
p _ {i} (t) \sim \mathcal {N} \left(t; \mu_ {t}, \Sigma_ {4, 4}\right) \tag {3}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $c_{i}(d)$ is the color of each Gaussian, and $\alpha_{i}$ is given by evaluating a 2D Gaussian with covariance $\Sigma_{2D}$ multiplied with a learned per-point opacity $\sigma_{i}$ and temporal Gaussian distribution $p_{i}(t)$ . In the following discussion, we denote $\Sigma_{4,4}$ as $\Sigma_{t}$ for simplicity.
|
| 104 |
+
|
| 105 |
+
Temporal Redundancy. Despite achieving high quality, 4DGS requires a huge number of Gaussians to model dynamic scenes. We identify a key limitation leading to this problem: 4DGS represents scenes through temporally independent Gaussians that lack explicit correlation across time. This means that, even static objects are redundantly represented by hundreds of Gaussians, which inconsistently appear or vanish across timesteps. We refer to this phenomenon as temporal redundancy. As a result, scenes end up needing more Gaussians than they should, leading to excessive storage demands and suboptimal rendering speeds. In Section 4, we analyze the root causes of this issue and propose a set of solutions to reduce the count of Gaussians.
|
| 106 |
+
|
| 107 |
+
# 4 Methodology
|
| 108 |
+
|
| 109 |
+
Our goal is to compress 4DGS by reducing the number of Gaussians while preserving rendering quality. To achieve this, we first analyze the redundancies present in 4DGS, as detailed in Section 4.1. Building on this analysis, we introduce 4DGS-1K in Section 4.2, which incorporates a set of compression techniques designed for 4DGS. 4DGS-1K enables rendering speeds of over 1,000 FPS on modern GPUs.
|
| 110 |
+
|
| 111 |
+
# 4.1 Understanding Redundancy in 4DGS
|
| 112 |
+
|
| 113 |
+
This section investigates why 4DGS requires an excessive number of Gaussians to represent dynamic scenes. In particular, we identify two key factors. First, 4DGS models object motion using a large number of transient Gaussians that inconsistently appear and disappear across timesteps, leading to redundant temporal representations. Second, for each frame, only a small fraction of Gaussians actually contribute to the rendering. We discuss those problems below.
|
| 114 |
+
|
| 115 |
+
Massive Short-Lifespan Gaussians. We observe that 4DGS tends to store numerous Gaussians that flicker in time. We refer to these as Short-Lifespan Gaussians. To investigate this property,
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
(a) Transient Gaussian Pruning
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
(b) Temporal Filter
|
| 122 |
+
Figure 3: Overview of 4DGS-1K. (a) We first calculate the spatial-temporal variation score for each 4D Gaussian on training views, to prune Gaussians with short lifespan (The Red Gaussian). (b) The temporal filter is introduced to filter out inactive Gaussians before the rendering process to alleviate suboptimal rendering speed. At a given timestamp $t$ , the set of Gaussians participating in rendering is derived from the two adjacent key-frames, $t_0$ and $t_{0 + \Delta_t}$ .
|
| 123 |
+
|
| 124 |
+
we analyze the Gaussians' opacity, which controls visibility. Intuitively, Short-Lifespan Gaussians exhibit an opacity pattern that rapidly increases and then suddenly decreases. In 4DGS, this behavior is typically reflected in the time variance parameter $\Sigma_{t}$ —small $\Sigma_{t}$ values indicate a short lifespan.
|
| 125 |
+
|
| 126 |
+
Observations. Specifically, we plot the distribution of $\Sigma_t$ for all Gaussians in the Sear Steak scene. As shown in Figure 2a, most of Gaussians has small $\Sigma_t$ values (e.g. $70\%$ have $\Sigma_t < 0.25$ ).
|
| 127 |
+
|
| 128 |
+
Therefore, in 4DGS, nearly all Gaussians have a short lifespan. This property leads to high storage needs and slower rendering.
|
| 129 |
+
|
| 130 |
+
Inactive Gaussians. Another finding is that, during the forward rendering, actually, only a small fraction of Gaussians are contributing. Interestingly, active ones tend to overlap significantly across adjacent frames. To quantify this, we introduce two metrics: (1) Active ratio. This ratio is defined as the proportion of the total number of active Gaussians across all views at any moment relative to the total number of Gaussians. (2) Activation Intersection-over-Union (IoU). This is computed as IoU between the set of active Gaussians in the first frame and in frame $t$ .
|
| 131 |
+
|
| 132 |
+
Observations. Again, we plot the two metrics from Sear Steak scene. As shown in Figure 2b, nearly $85\%$ of Gaussians are inactive at each frame, even though all Gaussians are processed during rendering. Moreover, Figure 2c demonstrates that the active Gaussians remain quite consistent over time, with an IoU above $80\%$ over a 20-frame window.
|
| 133 |
+
|
| 134 |
+
The inactive gaussians bring a significant issue in 4DGS, because each 4D Gaussian must be decomposed into a 3D Gaussian and a 1D Gaussian before rasterization (see eq. (1)). Therefore, a large portion of computational resources is wasted on inactive Gaussians.
|
| 135 |
+
|
| 136 |
+
In summary, redundancy in 4DGS comes from massive Short-Lifespan Gaussians and inactive Gaussians. These insights motivate our compression strategy to eliminate redundant computations while preserving rendering quality.
|
| 137 |
+
|
| 138 |
+
# 4.2 4DGS-1K for Fast Dynamic Scene Rendering
|
| 139 |
+
|
| 140 |
+
Building on the analysis above, we introduce 4DGS-1K, a suite of compression techniques specifically designed for 4DGS to eliminate redundant Gaussians. As shown in Figure 3, this process involves two key steps. First, we identify and globally prune unimportant Gaussians with low Spatial-Temporal Variation Score in Section 4.2.1. Second, we apply local pruning using a temporal filter to inactive Gaussians that are not needed at each timestep in Section 4.2.2.
|
| 141 |
+
|
| 142 |
+
# 4.2.1 Pruning with Spatial-Temporal Variation Score
|
| 143 |
+
|
| 144 |
+
We first prune unimportant 4D Gaussians to improve efficiency. Like 3DGS, we remove those that have a low impact on rendered pixels. Besides, we additionally remove short-lifespan Gaus-
|
| 145 |
+
|
| 146 |
+
sians—those that persist only briefly over time. To achieve this, we introduce a novel spatial-temporal variation score as the pruning criterion for 4DGS. It is composed of two parts, spatial score that measures the Gaussians contributions to the pixels in rendering, and temporal score considering the lifespan of Gaussians.
|
| 147 |
+
|
| 148 |
+
Spatial score. Inspired by the previous method [30, 31] and $\alpha$ -blending in 3DGS [16], we define the spatial score by aggregating the ray contribution of Gaussian $g_{i}$ along all rays $r$ across all input images at a given timestamp. It can accurately capture the contribution of each Gaussian to one pixel. Consequently, the spatial contribution score $S^S$ is obtained by traversing all pixels:
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\mathcal {S} _ {i} ^ {S} = \sum_ {k = 1} ^ {N H W} \alpha_ {i} \prod_ {j = 1} ^ {i - 1} \left(1 - \alpha_ {j}\right) \tag {4}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where $\mathbf{N}$ denotes the number of training views, $\mathrm{H},\mathrm{W}$ denote the height and width of the images, and $\alpha_{i}\prod_{j = 1}^{i - 1}(1 - \alpha_{j})$ reflects the contribution of $i^{th}$ Gaussian to the final color of all pixels according to the alpha composition in eq. (2).
|
| 155 |
+
|
| 156 |
+
Temporal score. It is expected to assign a higher temporal score to Gaussians with a longer lifespan. To quantify this, we compute the second derivative of temporal opacity function $p_i(t)$ defined in eq. (3). The second derivative $p_i^{(2)}(t)$ is computed as
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
p _ {i} ^ {(2)} (t) = \left(\frac {\left(t - \mu_ {t}\right) ^ {2}}{\Sigma_ {t} ^ {2}} - \frac {1}{\Sigma_ {t}}\right) p _ {i} (t) \tag {5}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
Intuitively, large second derivative magnitude corresponds to unstable, short-lived Gaussians, while low second derivative indicates smooth, persistent ones.
|
| 163 |
+
|
| 164 |
+
Moreover, since the second derivative spans the real number domain $\mathbb{R}$ , we apply tanh function to map it to the interval $(0,1)$ . Consequently, the score for opacity variation, $S_{i}^{TV}$ , of each Gaussian $g_{i,t}$ is expressed as:
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\mathcal {S} _ {i} ^ {T V} = \sum_ {t = 0} ^ {T} \frac {1}{0 . 5 \cdot \operatorname {t a n h} \left(\left| p _ {i} ^ {(2)} (t) \right|\right) + 0 . 5}. \tag {6}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
In addition to the opacity range rate, the volume of 4D Gaussians is necessary to be considered, as described in eq. (1). The volume should be normalized following the method in [30], denoted as $\gamma(S^{4D}) = \text{Norm}(V(S^{4D}))$ . Therefore, the final temporal score $S_{i}^{T} = S_{i}^{TV}\gamma(S_{i}^{4D})$
|
| 171 |
+
|
| 172 |
+
Finally, by aggregating both spatial and temporal score, the spatial-temporal variation score $S_{i}$ can be written as:
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\mathcal {S} _ {i} = \sum_ {t = 0} ^ {T} \mathcal {S} _ {i} ^ {T} \mathcal {S} _ {i} ^ {S} \tag {7}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
Pruning. All 4D Gaussians are ranked based on their spatial-temporal variation score $S_{i}$ , and Gaussians with lower scores are pruned to reduce the storage burden of 4DGS [1]. The remaining Gaussians are optimized over a set number of iterations to compensate for minor losses resulting from pruning.
|
| 179 |
+
|
| 180 |
+
# 4.2.2 Fast rendering with temporal filtering
|
| 181 |
+
|
| 182 |
+
Our analysis reveals that inactive Gaussians induces unnecessary computations in 4DGS, significantly slowing down rendering. To address this issue, we introduce a temporal filter that dynamically selects active Gaussians. We observed that active Gaussians in adjacent frames overlap considerably (as detailed in Section 4.1), which allows us to share their corresponding masks across a window of frames.
|
| 183 |
+
|
| 184 |
+
Key-frame based Temporal Filtering. Based on this observation, we design a key-frame based temporal filtering for active Gaussians. We select sparse key-frames at even intervals and share their masks with surrounding frames.
|
| 185 |
+
|
| 186 |
+
Specifically, we select a list of key-frame timestamps $\{t_i\}_{i=0}^T$ , where $T$ depends on the chosen interval $\Delta_t$ . For each $t_i$ , we render the images from all training views at current timestamp and calculate the visibility list $\{m_{i,j}\}_{j=1}^N$ , where $m_{i,j}$ is the visibility mask obtained by eq. (2) from the $j^{th}$ training
|
| 187 |
+
|
| 188 |
+
Table 1: Quantitative comparisons on the Neural 3D Video Dataset.
|
| 189 |
+
|
| 190 |
+
<table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>Storage(MB)↓</td><td>FPS↑</td><td>Raster FPS↑</td><td>#Gauss↓</td></tr><tr><td>Neural Volume1[4]</td><td>22.80</td><td>-</td><td>0.295</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DyNeRF1[2]</td><td>29.58</td><td>-</td><td>0.083</td><td>28</td><td>0.015</td><td>-</td><td>-</td></tr><tr><td>StreamRF[18]</td><td>28.26</td><td>-</td><td>-</td><td>5310</td><td>10.90</td><td>-</td><td>-</td></tr><tr><td>HyperReel[5]</td><td>31.10</td><td>0.927</td><td>0.096</td><td>360</td><td>2.00</td><td>-</td><td>-</td></tr><tr><td>K-Planes[6]</td><td>31.63</td><td>-</td><td>0.018</td><td>311</td><td>0.30</td><td>-</td><td>-</td></tr><tr><td>4K4D[36]</td><td>21.29</td><td>-</td><td>-</td><td>2519</td><td>290</td><td>-</td><td>-</td></tr><tr><td>Dynamic 3DGS[37]</td><td>30.67</td><td>0.930</td><td>0.099</td><td>2764</td><td>460</td><td>-</td><td>-</td></tr><tr><td>4D Gaussian[7]</td><td>31.15</td><td>0.940</td><td>0.049</td><td>90</td><td>30</td><td>-</td><td>-</td></tr><tr><td>E-D3DGS[26]</td><td>31.31</td><td>0.945</td><td>0.037</td><td>35</td><td>74</td><td>-</td><td>-</td></tr><tr><td>Swift4D[38]</td><td>32.23</td><td>-</td><td>0.043</td><td>120</td><td>125</td><td>-</td><td>-</td></tr><tr><td>Grid4D[39]</td><td>31.49</td><td>-</td><td>-</td><td>146</td><td>116</td><td>-</td><td>-</td></tr><tr><td>STG[40]</td><td>32.05</td><td>0.946</td><td>0.044</td><td>200</td><td>140</td><td>-</td><td>-</td></tr><tr><td>4D-RotorGS[41]</td><td>31.62</td><td>0.940</td><td>0.140</td><td>-</td><td>277</td><td>-</td><td>-</td></tr><tr><td>MEGA[42]</td><td>31.49</td><td>-</td><td>0.056</td><td>25</td><td>77</td><td>-</td><td>-</td></tr><tr><td>Compact3D[29]</td><td>31.69</td><td>0.945</td><td>0.054</td><td>15</td><td>186</td><td>-</td><td>-</td></tr><tr><td>4DGS[1]</td><td>32.01</td><td>-</td><td>0.055</td><td>-</td><td>114</td><td>-</td><td>-</td></tr><tr><td>4DGS2[1]</td><td>31.91</td><td>0.946</td><td>0.052</td><td>2085</td><td>90</td><td>118</td><td>3333160</td></tr><tr><td>Ours</td><td>31.88</td><td>0.946</td><td>0.052</td><td>418</td><td>805</td><td>1092</td><td>666632</td></tr><tr><td>Ours-PP</td><td>31.87</td><td>0.944</td><td>0.053</td><td>50</td><td>805</td><td>1092</td><td>666632</td></tr></table>
|
| 191 |
+
|
| 192 |
+
<sup>1</sup> The metrics of the model are tested without "coffee martini" and the resolution is set to $1024 \times 768$ .
|
| 193 |
+
2 The retrained model from the official implementation.
|
| 194 |
+
|
| 195 |
+
Table 2: Quantitative comparisons on the D-NeRF Dataset.
|
| 196 |
+
|
| 197 |
+
<table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>Storage(MB)↓</td><td>FPS↑</td><td>Raster FPS↑</td><td>#Gauss↓</td></tr><tr><td>DNeRF[19]</td><td>29.67</td><td>0.95</td><td>0.08</td><td>-</td><td>0.1</td><td>-</td><td>-</td></tr><tr><td>TiNeuVox[43]</td><td>32.67</td><td>0.97</td><td>0.04</td><td>-</td><td>1.6</td><td>-</td><td>-</td></tr><tr><td>K-Planes[6]</td><td>31.07</td><td>0.97</td><td>0.02</td><td>-</td><td>1.2</td><td>-</td><td>-</td></tr><tr><td>4D Gaussian[7]</td><td>32.99</td><td>0.97</td><td>0.05</td><td>18</td><td>104</td><td>-</td><td>-</td></tr><tr><td>Deformable3DGS[23]</td><td>40.43</td><td>0.99</td><td>0.01</td><td>27</td><td>70</td><td>-</td><td>131428</td></tr><tr><td>SC-GS[44]</td><td>40.65</td><td>-</td><td>-</td><td>28</td><td>126</td><td>-</td><td>-</td></tr><tr><td>Grid4D[39]</td><td>39.91</td><td>-</td><td>-</td><td>93</td><td>166</td><td>-</td><td>-</td></tr><tr><td>4D-RotorGS[41]</td><td>34.26</td><td>0.97</td><td>0.03</td><td>112</td><td>1257</td><td>-</td><td>-</td></tr><tr><td>4DGS[1]</td><td>34.09</td><td>0.98</td><td>0.02</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>4DGS1[1]</td><td>32.99</td><td>0.97</td><td>0.03</td><td>278</td><td>376</td><td>1232</td><td>445076</td></tr><tr><td>Ours</td><td>33.34</td><td>0.97</td><td>0.03</td><td>42</td><td>1462</td><td>2482</td><td>66460</td></tr><tr><td>Ours-PP</td><td>33.37</td><td>0.97</td><td>0.03</td><td>7</td><td>1462</td><td>2482</td><td>66460</td></tr></table>
|
| 198 |
+
|
| 199 |
+
<sup>1</sup> The retrained model from the official implementation.
|
| 200 |
+
|
| 201 |
+
viewpoint at timestamp $t_i$ and $N$ is the number of training views at current timestamp. The final set of active Gaussian masks is given by $\left\{\bigcup_{j=1}^{N} m_{i,j}\right\}_{i=0}^{T}$ .
|
| 202 |
+
|
| 203 |
+
Filter based Rendering. To render the images from any viewpoint at a given timestamp $t_{test}$ , we consider its two nearest key-frames, denoted as $t_l$ and $t_r$ . Then, we perform rasterization while only considering the Gaussians marked by mask $\left\{\bigcup_{j=1}^{N} m_{i,j}\right\}_{i=l,r}$ . This method explicitly filters out inactive Gaussians to speed up rendering.
|
| 204 |
+
|
| 205 |
+
Note that using long intervals may overlook some Gaussians, reducing rendering quality. Therefore, we fine-tune Gaussians recorded by the masks to compensate for losses.
|
| 206 |
+
|
| 207 |
+
# 5 Experiment
|
| 208 |
+
|
| 209 |
+
# 5.1 Experimental Settings
|
| 210 |
+
|
| 211 |
+
Datasets. We utilize two dynamic scene datasets to demonstrate the effectiveness of our method: (1) Neural 3D Video Dataset (N3V) [2]. This dataset consists of six dynamic scenes, and the resolution is $2704 \times 2028$ . For a fair comparison, we align with previous work [1, 40] by conducting evaluations at a half-resolution of 300 frames. (2) D-NeRF Dataset [19]. This dataset is a monocular video dataset comprising eight videos of synthetic scenes. We choose standard test views that originate from novel camera positions not encountered during the training process.
|
| 212 |
+
|
| 213 |
+
Evaluation Metrics. To evaluate the quality of rendering dynamic scenes, we employ several commonly used image quality assessment metrics: Peak Signal-to-Noise Ratio (PSNR), Structural
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
(a) Results on Sear Steak Scene.
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
Figure 4: Qualitative comparisons of 4DGS and our method.
|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
(b) Results on Trex Scene.
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
|
| 260 |
+
Table 3: Ablation study of per-component contribution.
|
| 261 |
+
|
| 262 |
+
<table><tr><td>ID</td><td colspan="3">Method\Dataset</td><td rowspan="2">PSNR↑</td><td rowspan="2">SSIM↑</td><td rowspan="2">LPIPS↓</td><td rowspan="2">Storage(MB)↓</td><td rowspan="2">FPS↑</td><td rowspan="2">Raster FPS↑</td><td rowspan="2">#Gauss↓</td></tr><tr><td></td><td>Filter</td><td>Pruning</td><td>PP</td></tr><tr><td>a</td><td colspan="3">vanilla 4DGS1</td><td>31.91</td><td>0.9458</td><td>0.0518</td><td>2085</td><td>90</td><td>118</td><td>3333160</td></tr><tr><td>b</td><td colspan="3">✓1,2</td><td>31.51</td><td>0.9446</td><td>0.0539</td><td>2091</td><td>242</td><td>561</td><td>3333160</td></tr><tr><td>c</td><td colspan="3">✓2</td><td>29.56</td><td>0.9354</td><td>0.0605</td><td>2091</td><td>300</td><td>561</td><td>3333160</td></tr><tr><td>d</td><td></td><td>✓</td><td></td><td>31.92</td><td>0.9462</td><td>0.0513</td><td>417</td><td>312</td><td>600</td><td>666632</td></tr><tr><td>e</td><td>✓</td><td>✓</td><td></td><td>31.88</td><td>0.9457</td><td>0.0524</td><td>418</td><td>805</td><td>1092</td><td>666632</td></tr><tr><td>f</td><td>✓2</td><td>✓</td><td></td><td>31.63</td><td>0.9452</td><td>0.0524</td><td>418</td><td>789</td><td>1080</td><td>666632</td></tr><tr><td>g</td><td>✓</td><td>✓</td><td>✓</td><td>31.87</td><td>0.9444</td><td>0.0532</td><td>50</td><td>805</td><td>1092</td><td>666632</td></tr></table>
|
| 263 |
+
|
| 264 |
+
<sup>1</sup> The result with environment map. <sup>2</sup> The result without finetuning.
|
| 265 |
+
|
| 266 |
+
Similarity Index Measure (SSIM), and Learned Perceptual Image Patch Similarity (LPIPS) [45]. Following the previous work, LPIPS [45] is computed using AlexNet [46] and VGGNet [47] on the N3V dataset and the D-NeRF dataset, respectively. Moreover, we report the number of Gaussians and storage. To demonstrate the improvement in rendering speed, we report two types of FPS: (1) FPS. It considers the entire rendering function. Due to interference from other operations, it can't effectively demonstrate the acceleration achieved by our method. (2) Raster FPS. It only considers the rasterization, the most computationally intensive component during rendering.
|
| 267 |
+
|
| 268 |
+
Baselines. Our primary baseline for comparison is 4DGS [1], which serves as the foundation of our model. Moreover, we compare 4DGS-1K with two concurrent works on 4D compression, MEGA [42] and Compact3D [29]. Certainly, we conduct comparisons with 4D-RotorGS [41] which is another form of representation for 4D Gaussian Splating with the capability for real-time rendering speed and high-fidelity rendering results. In addition, we also compare our work against NeRF-based methods, like Neural Volume [4], DyNeRF [2], StreamRF [18], HyperReel [5], DNeRF [19], K-Planes [6] and 4K4D [36]. Furthermore, other recent competitive Gaussian-based methods are also considered in our comparison, including Dynamic 3DGS [37], STG [40], 4DGAaussian [7], E-D3DGS [26], Swift4D [38], Grid4D [39] and SC-GS [44].
|
| 269 |
+
|
| 270 |
+
Implementation Details. Our method is tested in a single RTX 3090 GPU. We train our model following the experiment setting in 4DGS [1]. After training, we perform the pruning and filtering strategy. Then, we fine-tune 4DGS-1K for 5,000 iterations while disabling additional clone/split operations. For pruning strategy, the pruning ratio is set to $80\%$ on the N3V Dataset, and $85\%$ on the D-NeRF Dataset. For the temporal filtering, we set the interval $\Delta_t$ between key-frames to 20 frames
|
| 271 |
+
|
| 272 |
+
on the N3V Dataset. Considering the varying capture speeds on the D-NeRF dataset, we select 6 key-frames rather than a specific frame interval. Additionally, to further compress the storage of 4DGS [1], we implement post-processing techniques in our model, denoted as Ours-PP. It includes vector quantization [28] on SH of Gaussians and compressing the mask of filter into bits.
|
| 273 |
+
|
| 274 |
+
Note that we don't apply environment maps implemented by 4DGS on Coffee Martini and Flame Salmon scenes, which significantly affects the rendering speed. Subsequent results indicate that removing it for 4DGS-1K does not significantly degrade the rendering quality.
|
| 275 |
+
|
| 276 |
+
# 5.2 Results and Comparisons
|
| 277 |
+
|
| 278 |
+
Comparisons on real-world dataset. Table 1 presents a quantitative evaluation on the N3V dataset. 4DGS-1K achieves rendering quality comparable to the current baseline. Compared to 4DGS [1], we achieve a $41\times$ compression and $9\times$ faster in rendering speed at the cost of a $0.04dB$ reduction in PSNR. In addition, compared to MEGA [42] and Compact3D [29], two concurrent works on 4D compression, the rendering speeds are $10\times$ and $4\times$ faster respectively while maintaining a comparable storage requirement and high quality reconstruction. Moreover, the FPS of 4DGS-1K far exceeds the current state-of-the-art levels. It is nearly twice as fast as the current fastest model, Dynamic 3DGS [37] while requiring only $1\%$ of the storage size. Additionally, 4DGS-1K achieves better visual quality than that of Dynamic 3DGS [37], with an increase of about $1.2dB$ in PSNR. Compared to the storage-efficient model, E-D3DGS [26] and DyNeRF [2] we achieve an increase of over $0.5dB$ in PSNR and fast rendering speed. Figure 4 offers qualitative comparisons for the Sear Steak, demonstrating that our results contain more vivid details.
|
| 279 |
+
|
| 280 |
+
Comparisons on synthetic dataset. In our experiments, we benchmarked 4DGS-1K against several baselines using the monocular synthetic dataset introduced by D-NeRF [19]. The result is shown in Table 2. Compared to 4DGS [1], our method achieves up to $40\times$ compression and $4\times$ faster rendering speed. It is worth noting that the rendering quality of our model even surpasses that of the original 4DGS, with an increase of about $0.38dB$ in PSNR. Furthermore, our approach exhibits higher rendering quality and smaller storage overhead compared to most Gaussian-based methods. We provide qualitative results in Figure 4 for a more visual assessment.
|
| 281 |
+
|
| 282 |
+
# 5.3 Ablation Study
|
| 283 |
+
|
| 284 |
+
To evaluate the contribution of each component, we conducted ablation experiments on the N3V dataset [2]. More ablations are provided in the supplement(See appendix B).
|
| 285 |
+
|
| 286 |
+
Pruning. As shown in Table 3, our pruning strategy achieves $5 \times$ compression ratio and $5 \times$ faster rasterization speed while slightly improving rendering quality. As shown in Figure 2a, our pruning strategy also reduces the presence of Gaussians with short lifespan. As such, 4DGS-1k processes far fewer unnecessary Gaussians (See Figure 2b) during rendering. Moreover, as shown in Figure 2c, the pruning process expands the range of adjacent frames. It allows larger intervals for the temporal filter.
|
| 287 |
+
|
| 288 |
+
Temporal Filtering. As illustrated in Table 3, the results of b and c are obtained by directly applying the filter without fine-tuning. It proves that this component can enhance the rendering speed of 4DGS. However, as mentioned in Section 4.1, the 4DGS contains a huge number of short lifespan Gaussians. It results in some Gaussians being overlooked in the filter, causing a slight decrease in rendering quality. However, through pruning, most Gaussians are ensured to have long lifespan, making them visible even at large intervals. Therefore, it alleviates the issue of Gaussians being overlooked (See f). Furthermore, appropriate fine-tuning allows the Gaussians in the active Gaussians list to relearn the scene features to compensate for the loss incurred by the temporal filter (See e and f).
|
| 289 |
+
|
| 290 |
+
# 6 Conclusion
|
| 291 |
+
|
| 292 |
+
In this paper, we present 4DGS-1K, a compact and memory-efficient dynamic scene representation capable of running at over 1000 FPS on modern GPUs. We introduce a novel pruning criterion called the spatial-temporal variation score, which eliminates a significant number of redundant Gaussian points in 4DGS, drastically reducing storage requirements. Additionally, we propose a temporal filter that selectively activates only a subset of Gaussians during each frame's rendering. This approach enables our rendering speed to far surpass that of existing baselines. Compared to vanilla 4DGS,
|
| 293 |
+
|
| 294 |
+
4DGS-1K achieves a $41\times$ reduction in storage and $9\times$ faster rasterization speed while maintaining high-quality reconstruction.
|
| 295 |
+
|
| 296 |
+
# Acknowledgement
|
| 297 |
+
|
| 298 |
+
This project is supported by the National Research Foundation, Singapore, under its Medium Sized Center for Advanced Robotics Technology Innovation.
|
| 299 |
+
|
| 300 |
+
# References
|
| 301 |
+
|
| 302 |
+
[1] Zeyu Yang, Hongye Yang, Zijie Pan, and Li Zhang. Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting. arXiv preprint arXiv:2310.10642, 2023.
|
| 303 |
+
[2] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, Richard Newcombe, et al. Neural 3d video synthesis from multi-view video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5521-5531, 2022.
|
| 304 |
+
[3] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65 (1):99-106, 2021.
|
| 305 |
+
[4] Stephen Lombardi, Tomas Simon, Jason Saragih, Gabriel Schwartz, Andreas Lehrmann, and Yaser Sheikh. Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751, 2019.
|
| 306 |
+
[5] Benjamin Attal, Jia-Bin Huang, Christian Richardt, Michael Zollhoefer, Johannes Kopf, Matthew O'Toole, and Changil Kim. Hyperreel: High-fidelity 6-dof video with ray-conditioned sampling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16610-16620, 2023.
|
| 307 |
+
[6] Sara Fridovich-Keil, Giacomo Meanti, Frederik Rahbæk Warburg, Benjamin Recht, and Angjoo Kanazawa. K-planes: Explicit radiance fields in space, time, and appearance. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12479–12488, 2023.
|
| 308 |
+
[7] Guanjun Wu, Taoran Yi, Jiemin Fang, Lingxi Xie, Xiaopeng Zhang, Wei Wei, Wenyu Liu, Qi Tian, and Xinggang Wang. 4d gaussian splatting for real-time dynamic scene rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20310-20320, 2024.
|
| 309 |
+
[8] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European conference on computer vision, pages 333-350. Springer, 2022.
|
| 310 |
+
[9] Katja Schwarz, Axel Sauer, Michael Niemeyer, Yiyi Liao, and Andreas Geiger. Voxgraf: Fast 3d-aware image synthesis with sparse voxel grids. Advances in Neural Information Processing Systems, 35:33999-34011, 2022.
|
| 311 |
+
[10] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5459-5469, 2022.
|
| 312 |
+
[11] Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. Fourier plenoc trees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13524-13534, 2022.
|
| 313 |
+
[12] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5501–5510, 2022.
|
| 314 |
+
[13] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM transactions on graphics (TOG), 41(4):1-15, 2022.
|
| 315 |
+
[14] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlp's. In Proceedings of the IEEE/CVF international conference on computer vision, pages 14335-14345, 2021.
|
| 316 |
+
[15] Huan Wang, Jian Ren, Zeng Huang, Kyle Olszewski, Mengei Chai, Yun Fu, and Sergey Tulyakov. R2l: Distilling neural radiance field to neural light field for efficient novel view synthesis. In European Conference on Computer Vision, pages 612-629. Springer, 2022.
|
| 317 |
+
|
| 318 |
+
[16] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1, 2023.
|
| 319 |
+
[17] Ben Mildenhall, Pratul P Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (ToG), 38(4):1-14, 2019.
|
| 320 |
+
[18] Lingzhi Li, Zhen Shen, Zhongshu Wang, Li Shen, and Ping Tan. Streaming radiance fields for 3d video synthesis. Advances in Neural Information Processing Systems, 35:13485-13498, 2022.
|
| 321 |
+
[19] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-nerf: Neural radiance fields for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10318–10327, 2021.
|
| 322 |
+
[20] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 130-141, 2023.
|
| 323 |
+
[21] Liangchen Song, Anpei Chen, Zhong Li, Zhang Chen, Lele Chen, Junsong Yuan, Yi Xu, and Andreas Geiger. Nerfplayer: A streamable dynamic scene representation with decomposed neural radiance fields. IEEE Transactions on Visualization and Computer Graphics, 29(5):2732-2742, 2023.
|
| 324 |
+
[22] Feng Wang, Sinan Tan, Xinghang Li, Zeyue Tian, Yafei Song, and Huaping Liu. Mixed neural voxels for fast multi-view video synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19706-19716, 2023.
|
| 325 |
+
[23] Ziyi Yang, Xinyu Gao, Wen Zhou, Shaohui Jiao, Yuqing Zhang, and Xiaogang Jin. Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20331-20341, 2024.
|
| 326 |
+
[24] Zhiyang Guo, Wengang Zhou, Li Li, Min Wang, and Houqiang Li. Motion-aware 3d gaussian splatting for efficient dynamic scene reconstruction. arXiv preprint arXiv:2403.11447, 2024.
|
| 327 |
+
[25] Zhicheng Lu, Xiang Guo, Le Hui, Tianrui Chen, Min Yang, Xiao Tang, Feng Zhu, and Yuchao Dai. 3d geometry-aware deformable gaussian splatting for dynamic view synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8900-8910, 2024.
|
| 328 |
+
[26] Jeongmin Bae, Seoha Kim, Youngsik Yun, Hahyun Lee, Gun Bang, and Youngjung Uh. Per-gaussian embedding-based deformation for deformable 3d gaussian splatting. arXiv preprint arXiv:2404.03613, 2024.
|
| 329 |
+
[27] Devikalyan Das, Christopher Wewer, Raza Yunus, Eddy Ilg, and Jan Eric Lenssen. Neural parametric gaussians for monocular non-rigid object reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10715-10725, 2024.
|
| 330 |
+
[28] KL Navaneet, Kossar Pourahmadi Meibodi, Soroush Abbasi Koohpayegani, and Hamed Pirsiavash. Compgs: Smaller and faster gaussian splatting with vector quantization. In European Conference on Computer Vision, 2024.
|
| 331 |
+
[29] Joo Chan Lee, Daniel Rho, Xiangyu Sun, Jong Hwan Ko, and Eunbyung Park. Compact 3d gaussian representation for radiance field. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21719-21728, 2024.
|
| 332 |
+
[30] Zhiwen Fan, Kevin Wang, Kairun Wen, Zehao Zhu, Dejia Xu, and Zhangyang Wang. Lightgaussian: Unbounded 3d gaussian compression with $15\mathrm{x}$ reduction and $200+$ fps. arXiv preprint arXiv:2311.17245, 2023.
|
| 333 |
+
[31] Guangchi Fang and Bing Wang. Mini-splatting: Representing scenes with a constrained number of gaussians. arXiv preprint arXiv:2403.14166, 2024.
|
| 334 |
+
[32] Michael Niemeyer, Fabian Manhardt, Marie-Julie Rakotosaona, Michael Oechsle, Daniel Duckworth, Rama Gosula, Keisuke Tateno, John Bates, Dominik Kaeser, and Federico Tombari. Radsvat: Radiance field-informed gaussian splatting for robust real-time rendering with $900+$ fps. arXiv preprint arXiv:2403.13806, 2024.
|
| 335 |
+
[33] Muhammad Salman Ali, Maryam Qamar, Sung-Ho Bae, and Enzo Tartaglione. Trimming the fat: Efficient compression of 3d gaussian splats through pruning. arXiv preprint arXiv:2406.18214, 2024.
|
| 336 |
+
[34] Panagiotis Papantonakis, Georgios Kopanas, Bernhard Kerbl, Alexandre Lanvin, and George Drettakis. Reducing the memory footprint of 3d gaussian splattering. Proceedings of the ACM on Computer Graphics and Interactive Techniques, 7(1):1-17, 2024.
|
| 337 |
+
|
| 338 |
+
[35] Wenkai Liu, Tao Guan, Bin Zhu, Lili Ju, Zikai Song, Dan Li, Yuesong Wang, and Wei Yang. Efficientgs: Streamlining gaussian splatting for large-scale high-resolution scene representation. arXiv preprint arXiv:2404.12777, 2024.
|
| 339 |
+
[36] Zhen Xu, Sida Peng, Haotong Lin, Guangzhao He, Jiaming Sun, Yujun Shen, Hujun Bao, and Xiaowei Zhou. 4k4d: Real-time 4d view synthesis at 4k resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 20029-20040, 2024.
|
| 340 |
+
[37] Jonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. arXiv preprint arXiv:2308.09713, 2023.
|
| 341 |
+
[38] Jiahao Wu, Rui Peng, Zhiyan Wang, Lu Xiao, Luyang Tang, Jinbo Yan, Kaiqiang Xiong, and Ronggang Wang. Swift4d: Adaptive divide-and-conquer gaussian splattering for compact and efficient reconstruction of dynamic scene. arXiv preprint arXiv:2503.12307, 2025.
|
| 342 |
+
[39] Jiawei Xu, Zexin Fan, Jian Yang, and Jin Xie. Grid4d: 4d decomposed hash encoding for high-fidelity dynamic gaussian splatting. Advances in Neural Information Processing Systems, 37:123787-123811, 2024.
|
| 343 |
+
[40] Zhan Li, Zhang Chen, Zhong Li, and Yi Xu. Spacetime gaussian feature splatting for real-time dynamic view synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8508-8520, 2024.
|
| 344 |
+
[41] Yuanxing Duan, Fangyin Wei, Qiyu Dai, Yuhang He, Wenzheng Chen, and Baoquan Chen. 4d-rotor gaussian splatting: towards efficient novel view synthesis for dynamic scenes. In ACM SIGGRAPH 2024 Conference Papers, pages 1–11, 2024.
|
| 345 |
+
[42] Xinjie Zhang, Zhening Liu, Yifan Zhang, Xingtong Ge, Dailan He, Tongda Xu, Yan Wang, Zehong Lin, Shuicheng Yan, and Jun Zhang. Mega: Memory-efficient 4d gaussian splatting for dynamic scenes. arXiv preprint arXiv:2410.13613, 2024.
|
| 346 |
+
[43] Jiemin Fang, Taoran Yi, Xinggang Wang, Lingxi Xie, Xiaopeng Zhang, Wenyu Liu, Matthias Nießner, and Qi Tian. Fast dynamic radiance fields with time-aware neural voxels. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022.
|
| 347 |
+
[44] Yi-Hua Huang, Yang-Tian Sun, Ziyi Yang, Xiaoyang Lyu, Yan-Pei Cao, and Xiaojuan Qi. Sc-gs: Sparse-controlled gaussian splatting for editable dynamic scenes. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4220-4230, 2024.
|
| 348 |
+
[45] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018.
|
| 349 |
+
[46] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25, 2012.
|
| 350 |
+
[47] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.
|
| 351 |
+
|
| 352 |
+
# NeurIPS Paper Checklist
|
| 353 |
+
|
| 354 |
+
# 1. Claims
|
| 355 |
+
|
| 356 |
+
Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
|
| 357 |
+
|
| 358 |
+
Answer: [Yes]
|
| 359 |
+
|
| 360 |
+
Justification: The main claims made in the abstract and introduction accurately reflect the paper's contributions and scope.
|
| 361 |
+
|
| 362 |
+
Guidelines:
|
| 363 |
+
|
| 364 |
+
- The answer NA means that the abstract and introduction do not include the claims made in the paper.
|
| 365 |
+
- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers.
|
| 366 |
+
- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings.
|
| 367 |
+
- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper.
|
| 368 |
+
|
| 369 |
+
# 2. Limitations
|
| 370 |
+
|
| 371 |
+
Question: Does the paper discuss the limitations of the work performed by the authors?
|
| 372 |
+
|
| 373 |
+
Answer: [Yes]
|
| 374 |
+
|
| 375 |
+
Justification: We discuss it in supplementary material.
|
| 376 |
+
|
| 377 |
+
Guidelines:
|
| 378 |
+
|
| 379 |
+
- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper.
|
| 380 |
+
- The authors are encouraged to create a separate "Limitations" section in their paper.
|
| 381 |
+
- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be.
|
| 382 |
+
- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated.
|
| 383 |
+
- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon.
|
| 384 |
+
- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size.
|
| 385 |
+
- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness.
|
| 386 |
+
- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations.
|
| 387 |
+
|
| 388 |
+
# 3. Theory assumptions and proofs
|
| 389 |
+
|
| 390 |
+
Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?
|
| 391 |
+
|
| 392 |
+
Answer: [NA]
|
| 393 |
+
|
| 394 |
+
Justification: This paper does not involve theoretical result.
|
| 395 |
+
|
| 396 |
+
# Guidelines:
|
| 397 |
+
|
| 398 |
+
- The answer NA means that the paper does not include theoretical results.
|
| 399 |
+
- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced.
|
| 400 |
+
- All assumptions should be clearly stated or referenced in the statement of any theorems.
|
| 401 |
+
- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition.
|
| 402 |
+
- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material.
|
| 403 |
+
- Theorems and Lemmas that the proof relies upon should be properly referenced.
|
| 404 |
+
|
| 405 |
+
# 4. Experimental result reproducibility
|
| 406 |
+
|
| 407 |
+
Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?
|
| 408 |
+
|
| 409 |
+
# Answer: [Yes]
|
| 410 |
+
|
| 411 |
+
Justification: The paper discloses all the information needed to reproduce the main experimental results of the paper in Section 5.
|
| 412 |
+
|
| 413 |
+
# Guidelines:
|
| 414 |
+
|
| 415 |
+
- The answer NA means that the paper does not include experiments.
|
| 416 |
+
- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not.
|
| 417 |
+
- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable.
|
| 418 |
+
- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed.
|
| 419 |
+
- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example
|
| 420 |
+
(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm.
|
| 421 |
+
(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully.
|
| 422 |
+
(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset).
|
| 423 |
+
(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results.
|
| 424 |
+
|
| 425 |
+
# 5. Open access to data and code
|
| 426 |
+
|
| 427 |
+
Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?
|
| 428 |
+
|
| 429 |
+
# Answer: [No]
|
| 430 |
+
|
| 431 |
+
Justification: The code will be open-sourced upon acceptance.
|
| 432 |
+
|
| 433 |
+
# Guidelines:
|
| 434 |
+
|
| 435 |
+
- The answer NA means that paper does not include experiments requiring code.
|
| 436 |
+
- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 437 |
+
- While we encourage the release of code and data, we understand that this might not be possible, so "No" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark).
|
| 438 |
+
- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 439 |
+
- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc.
|
| 440 |
+
- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why.
|
| 441 |
+
- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable).
|
| 442 |
+
- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted.
|
| 443 |
+
|
| 444 |
+
# 6. Experimental setting/details
|
| 445 |
+
|
| 446 |
+
Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?
|
| 447 |
+
|
| 448 |
+
# Answer: [Yes]
|
| 449 |
+
|
| 450 |
+
Justification: The detailed experiment settings are listed in Section 5.
|
| 451 |
+
|
| 452 |
+
# Guidelines:
|
| 453 |
+
|
| 454 |
+
- The answer NA means that the paper does not include experiments.
|
| 455 |
+
- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them.
|
| 456 |
+
- The full details can be provided either with the code, in appendix, or as supplemental material.
|
| 457 |
+
|
| 458 |
+
# 7. Experiment statistical significance
|
| 459 |
+
|
| 460 |
+
Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?
|
| 461 |
+
|
| 462 |
+
# Answer: [NA]
|
| 463 |
+
|
| 464 |
+
Justification: This paper follows the existing work about 4DGS and lists the essential detailed quantitative results in Section 5. None of them report error bars.
|
| 465 |
+
|
| 466 |
+
# Guidelines:
|
| 467 |
+
|
| 468 |
+
- The answer NA means that the paper does not include experiments.
|
| 469 |
+
- The authors should answer "Yes" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper.
|
| 470 |
+
- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions).
|
| 471 |
+
- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)
|
| 472 |
+
- The assumptions made should be given (e.g., Normally distributed errors).
|
| 473 |
+
|
| 474 |
+
- It should be clear whether the error bar is the standard deviation or the standard error of the mean.
|
| 475 |
+
- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\%$ CI, if the hypothesis of Normality of errors is not verified.
|
| 476 |
+
- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates).
|
| 477 |
+
- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text.
|
| 478 |
+
|
| 479 |
+
# 8. Experiments compute resources
|
| 480 |
+
|
| 481 |
+
Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?
|
| 482 |
+
|
| 483 |
+
Answer: [Yes]
|
| 484 |
+
|
| 485 |
+
Justification: The paper provides sufficient information on the computer resources in Section 5 and supplemental material.
|
| 486 |
+
|
| 487 |
+
Guidelines:
|
| 488 |
+
|
| 489 |
+
- The answer NA means that the paper does not include experiments.
|
| 490 |
+
- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage.
|
| 491 |
+
- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute.
|
| 492 |
+
- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper).
|
| 493 |
+
|
| 494 |
+
supplemental material.
|
| 495 |
+
|
| 496 |
+
# 9. Code of ethics
|
| 497 |
+
|
| 498 |
+
Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?
|
| 499 |
+
|
| 500 |
+
Answer: [Yes]
|
| 501 |
+
|
| 502 |
+
Justification: The research conducted in this paper conform the NeurIPS Code of Ethics.
|
| 503 |
+
|
| 504 |
+
Guidelines:
|
| 505 |
+
|
| 506 |
+
- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics.
|
| 507 |
+
- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics.
|
| 508 |
+
- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction).
|
| 509 |
+
|
| 510 |
+
# 10. Broader impacts
|
| 511 |
+
|
| 512 |
+
Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?
|
| 513 |
+
|
| 514 |
+
Answer: [Yes]
|
| 515 |
+
|
| 516 |
+
Justification: : This paper discusses the potential societal impacts in Section 6 and supplemental material.
|
| 517 |
+
|
| 518 |
+
Guidelines:
|
| 519 |
+
|
| 520 |
+
- The answer NA means that there is no societal impact of the work performed.
|
| 521 |
+
- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact.
|
| 522 |
+
- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations.
|
| 523 |
+
|
| 524 |
+
- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster.
|
| 525 |
+
- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology.
|
| 526 |
+
- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML).
|
| 527 |
+
|
| 528 |
+
# 11. Safeguards
|
| 529 |
+
|
| 530 |
+
Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?
|
| 531 |
+
|
| 532 |
+
Answer: [NA]
|
| 533 |
+
|
| 534 |
+
Justification: This paper does not pose such risk.
|
| 535 |
+
|
| 536 |
+
Guidelines:
|
| 537 |
+
|
| 538 |
+
- The answer NA means that the paper poses no such risks.
|
| 539 |
+
- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters.
|
| 540 |
+
- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images.
|
| 541 |
+
- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort.
|
| 542 |
+
|
| 543 |
+
# 12. Licenses for existing assets
|
| 544 |
+
|
| 545 |
+
Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?
|
| 546 |
+
|
| 547 |
+
Answer: [Yes]
|
| 548 |
+
|
| 549 |
+
Justification: This paper follows the applicable licenses and terms of usage.
|
| 550 |
+
|
| 551 |
+
Guidelines:
|
| 552 |
+
|
| 553 |
+
- The answer NA means that the paper does not use existing assets.
|
| 554 |
+
- The authors should cite the original paper that produced the code package or dataset.
|
| 555 |
+
- The authors should state which version of the asset is used and, if possible, include a URL.
|
| 556 |
+
- The name of the license (e.g., CC-BY 4.0) should be included for each asset.
|
| 557 |
+
- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided.
|
| 558 |
+
- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset.
|
| 559 |
+
- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided.
|
| 560 |
+
|
| 561 |
+
- If this information is not available online, the authors are encouraged to reach out to the asset's creators.
|
| 562 |
+
|
| 563 |
+
# 13. New assets
|
| 564 |
+
|
| 565 |
+
Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?
|
| 566 |
+
|
| 567 |
+
Answer: [NA]
|
| 568 |
+
|
| 569 |
+
Justification: The paper does not release new assets.
|
| 570 |
+
|
| 571 |
+
Guidelines:
|
| 572 |
+
|
| 573 |
+
- The answer NA means that the paper does not release new assets.
|
| 574 |
+
- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc.
|
| 575 |
+
- The paper should discuss whether and how consent was obtained from people whose asset is used.
|
| 576 |
+
- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file.
|
| 577 |
+
|
| 578 |
+
# 14. Crowdsourcing and research with human subjects
|
| 579 |
+
|
| 580 |
+
Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?
|
| 581 |
+
|
| 582 |
+
Answer: [NA]
|
| 583 |
+
|
| 584 |
+
Justification: This paper does not involve crowdsourcing nor research with human subjects.
|
| 585 |
+
|
| 586 |
+
Guidelines:
|
| 587 |
+
|
| 588 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 589 |
+
- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper.
|
| 590 |
+
- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector.
|
| 591 |
+
|
| 592 |
+
# 15. Institutional review board (IRB) approvals or equivalent for research with human subjects
|
| 593 |
+
|
| 594 |
+
Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?
|
| 595 |
+
|
| 596 |
+
Answer: [NA]
|
| 597 |
+
|
| 598 |
+
Justification: The paper does not involve crowdsourcing nor research with human subjects.
|
| 599 |
+
|
| 600 |
+
Guidelines:
|
| 601 |
+
|
| 602 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 603 |
+
- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper.
|
| 604 |
+
- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution.
|
| 605 |
+
- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review.
|
| 606 |
+
|
| 607 |
+
# 16. Declaration of LLM usage
|
| 608 |
+
|
| 609 |
+
Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required.
|
| 610 |
+
|
| 611 |
+
# Answer: [NA]
|
| 612 |
+
|
| 613 |
+
Justification: The core method development in this research does not involve LLMs.
|
| 614 |
+
|
| 615 |
+
# Guidelines:
|
| 616 |
+
|
| 617 |
+
- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components.
|
| 618 |
+
- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described.
|
1000fps4dgaussiansplattingfordynamicscenerendering/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:443e970c38c973f4ec90d11145c14df221faba993c10a87eac15463b3981a1be
|
| 3 |
+
size 409186
|
1000fps4dgaussiansplattingfordynamicscenerendering/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b4d0ba8b34b6ef2ce9a14be2c18d02c0c9df75aa3a7023289f3dc36f9dacf07
|
| 3 |
+
size 673299
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67afaac15c3b90e135b45650e0ca276e19965cec5ec542cdda4f4f9bfdeeefd4
|
| 3 |
+
size 164834
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a852a85e6ecc2358ba7089e00fc61ea69af7bc05b718e58f038ccdf4955828f
|
| 3 |
+
size 206763
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/4a8d9df5-287c-441c-8654-78be23752307_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:504fc52ac9fa74457f9de82d143b39c762deaee8b5d8eb790406b4a3efea1342
|
| 3 |
+
size 4312939
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/full.md
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1000 Layer Networks for Self-Supervised RL: Scaling Depth Can Enable New Goal-Reaching Capabilities
|
| 2 |
+
|
| 3 |
+
Kevin Wang
|
| 4 |
+
|
| 5 |
+
Princeton University
|
| 6 |
+
|
| 7 |
+
kw6487@princeton.edu
|
| 8 |
+
|
| 9 |
+
Ishaan Javali
|
| 10 |
+
|
| 11 |
+
Princeton University
|
| 12 |
+
|
| 13 |
+
ijavali@princeton.edu
|
| 14 |
+
|
| 15 |
+
Michał Bortkiewicz
|
| 16 |
+
|
| 17 |
+
Warsaw University of Technology
|
| 18 |
+
|
| 19 |
+
michal.bortkiewicz.dokt@pw.edu.pl
|
| 20 |
+
|
| 21 |
+
Tomasz Trzciński
|
| 22 |
+
|
| 23 |
+
Warsaw University of Technology,
|
| 24 |
+
|
| 25 |
+
Tooploox, IDEAS Research Institute
|
| 26 |
+
|
| 27 |
+
Benjamin Eysenbach
|
| 28 |
+
|
| 29 |
+
Princeton University
|
| 30 |
+
|
| 31 |
+
eysenbach@princeton.edu
|
| 32 |
+
|
| 33 |
+
# Abstract
|
| 34 |
+
|
| 35 |
+
Scaling up self-supervised learning has driven breakthroughs in language and vision, yet comparable progress has remained elusive in reinforcement learning (RL). In this paper, we study building blocks for self-supervised RL that unlock substantial improvements in scalability, with network depth serving as a critical factor. Whereas most RL papers in recent years have relied on shallow architectures (around 2 - 5 layers), we demonstrate that increasing the depth up to 1024 layers can significantly boost performance. Our experiments are conducted in an unsupervised goal-conditioned setting, where no demonstrations or rewards are provided, so an agent must explore (from scratch) and learn how to maximize the likelihood of reaching commanded goals. Evaluated on simulated locomotion and manipulation tasks, our approach increases performance on the self-supervised contrastive RL algorithm by $2 \times -50 \times$ , outperforming other goal-conditioned baselines. Increasing the model depth not only increases success rates but also qualitatively changes the behaviors learned. The project webpage and code can be found here: https://wang-kevin3290.github.io/scaling-crl/.
|
| 36 |
+
|
| 37 |
+
# 1 Introduction
|
| 38 |
+
|
| 39 |
+
While scaling model size has been an effective recipe in many areas of machine learning, its role and impact in reinforcement learning (RL) remain unclear. The typical model size for state-based RL tasks is between 2 to 5 layers (Raffin et al., 2021; Huang et al., 2022). In contrast, it is not uncommon to use very deep networks in other domain areas; Llama 3 (Dubey et al., 2024) and Stable Diffusion 3 (Esser et al., 2024) have hundreds of layers. In fields such as vision (Radford et al., 2021; Zhai et al., 2021; Dehghani et al., 2023) and language (Srivastava et al., 2023), models often only acquire the ability to solve certain tasks once they are larger than a critical scale. In the RL setting, many researchers have searched for similar emergent phenomena (Srivastava et al., 2023), but these papers typically report only small marginal benefits and typically only on tasks where small models already achieve some degree of success (Nauman et al., 2024b; Lee et al., 2024; Farebrother et al., 2024). A key open question in RL today is whether it is possible to achieve similar jumps in performance by scaling RL networks.
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Figure 1: Scaling network depth yields performance gains across a suite of locomotion, navigation, and manipulation tasks, ranging from doubling performance to $50 \times$ improvements on Humanoid-based tasks. Notably, rather than scaling smoothly, performance often jumps at specific critical depths (e.g., 8 layers on Ant Big Maze, 64 on Humanoid U-Maze), which correspond to the emergence of qualitatively distinct policies (see Section 4).
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
|
| 64 |
+
At first glance, it makes sense why training very large RL networks should be difficult: the RL problem provides very few bits of feedback (e.g., only a sparse reward after a long sequence of observations), so the ratio of feedback to parameters is very small. The conventional wisdom (LeCun, 2016), reflected in many recent models (Radford, 2018; Chen et al., 2020; Goyal et al., 2019), has been that large AI systems must be trained primarily in a self-supervised fashion and that RL should only be used to finetune these models. Indeed, many of the recent breakthroughs in other fields have been primarily achieved with self-supervised methods, whether in computer vision (Caron et al., 2021; Radford et al., 2021; Liu et al., 2024), NLP (Srivastava et al., 2023), or multimodal learning (Zong et al., 2024). Thus, if we hope to scale reinforcement learning methods, self-supervision will likely be a key ingredient.
|
| 65 |
+
|
| 66 |
+
In this paper, we will study building blocks for scaling reinforcement learning. Our first step is to rethink the conventional wisdom above: "reinforcement learning" and "self-supervised learning" are not diametric learning rules, but rather can be married together into self-supervised RL systems that explore and learn policies without reference to a reward function or demonstrations (Eysenbach et al., 2021, 2022; Lee et al., 2022). In this work, we use one of the simplest self-supervised RL algorithms, contrastive RL (CRL) (Eysenbach et al., 2022). The second step is to recognize the importance of increasing data availability. We will do this by building on recent GPU-accelerated RL frameworks (Makoviychuk et al., 2021; Rutherford et al., 2023; Rudin et al., 2022; Bortkiewicz et al., 2024). The third step is to increase network depth, using networks that are up to $100 \times$ deeper than those typically found in prior work. Stabilizing the training of such networks will require incorporating architectural techniques from prior work, including residual connections (He et al., 2015), layer normalization (Ba et al., 2016), and Swish activation (Ramachandran et al., 2018). Our experiments will also study the relative importance of batch size and network width.
|
| 67 |
+
|
| 68 |
+
The primary contribution of this work is to show that a method that integrates these building blocks into a single RL approach exhibits strong scalability:
|
| 69 |
+
|
| 70 |
+
- Empirical Scalability: We observe a significant performance increase, more than $20 \times$ in half of the environments and outperforming other standard goal-conditioned baselines. These performance gains correspond to qualitatively distinct policies that emerge as the scale increases.
|
| 71 |
+
- Scaling Depth in Network Architecture: While many prior RL works have primarily focused on increasing network width, they often report limited or even negative returns
|
| 72 |
+
|
| 73 |
+
when expanding depth (Lee et al., 2024; Nauman et al., 2024b). In contrast, our approach unlocks the ability to scale along the axis of depth, yielding performance improvements that surpass those from scaling width alone (see Sec. 4).
|
| 74 |
+
|
| 75 |
+
- Empirical Analysis: We conduct an extensive analysis of the key components in our scaling approach, uncovering critical factors and offering new insights.
|
| 76 |
+
|
| 77 |
+
We anticipate that future research may build on this foundation by uncovering additional building blocks.
|
| 78 |
+
|
| 79 |
+
# 2 Related Work
|
| 80 |
+
|
| 81 |
+
Natural Language Processing (NLP) and Computer Vision (CV) have recently converged in adopting similar architectures (i.e. transformers) and shared learning paradigms (i.e self-supervised learning), which together have enabled transformative capabilities of large-scale models (Vaswani et al., 2017; Srivastava et al., 2023; Zhai et al., 2021; Dehghani et al., 2023; Wei et al., 2022). In contrast, achieving similar advancements in reinforcement learning (RL) remains challenging. Several studies have explored the obstacles to scaling large RL models, including parameter underutilization (Obando-Ceron et al., 2024), plasticity and capacity loss (Lyle et al., 2024, 2022), data sparsity (Andrychowicz et al., 2017; LeCun, 2016), and training instabilities (Ota et al., 2021; Henderson et al., 2018; Van Hasselt et al., 2018; Nauman et al., 2024a). As a result, current efforts to scale RL models are largely restricted to specific problem domains, such as imitation learning (Tuyls et al., 2024), multi-agent games (Neumann and Gros, 2022), language-guided RL (Driess et al., 2023; Ahn et al., 2022), and discrete action spaces (Obando-Ceron et al., 2024; Schwarzer et al., 2023).
|
| 82 |
+
|
| 83 |
+
Recent approaches suggest several promising directions, including new architectural paradigms (Obando-Ceron et al., 2024), distributed training approaches (Ota et al., 2021; Espeholt et al., 2018), distributional RL (Kumar et al., 2023), and distillation (Team et al., 2023). Compared to these approaches, our method makes a simple extension to an existing self-supervised RL algorithm. The most recent works in this vein include Lee et al. (2024) and Nauman et al. (2024b), which leverage residual connections to facilitate the training of wider networks. These efforts primarily focus on network width, noting limited gains from additional depth, thus both works use architectures with only four MLP layers. In our method, we find that scaling width indeed improves performance (Section 4.4); however, our approach also enables scaling along depth, proving to be more powerful than width alone.
|
| 84 |
+
|
| 85 |
+
One notable effort to train deeper networks is described by Farebrother et al. (2024), who cast value-based RL into a classification problem by discretizing the TD objective into a categorical cross-entropy loss. This approach draws on the conjecture that classification-based methods can be more robust and stable and thus may exhibit better scaling properties than their regressive counterparts (Torgo and Gama, 1996; Farebrother et al., 2024). The CRL algorithm that we use effectively uses a cross-entropy loss as well (Eysenbach et al., 2022). Its InfoNCE objective is a generalization of the cross-entropy loss, thereby performing RL tasks by effectively classifying whether current states and actions belong to the same or different trajectory that leads toward a goal state. In this vein, our work serves as a second piece of evidence that classification, much like cross-entropy's role in the scaling success in NLP, could be a potential building block in RL.
|
| 86 |
+
|
| 87 |
+
# 3 Preliminaries
|
| 88 |
+
|
| 89 |
+
This section introduces notation and definitions for goal-conditioned RL and contrastive RL. Our focus is on online RL, where a replay buffer stores the most recent trajectories, and the critic is trained in a self-supervised manner.
|
| 90 |
+
|
| 91 |
+
Goal-Conditioned Reinforcement Learning We define a goal-conditioned MDP as tuple $\mathcal{M}_g = (\mathcal{S},\mathcal{A},p_0,p,p_g,r_g,\gamma)$ , where the agent interacts with the environment to reach arbitrary goals (Kaelbling, 1993; Andrychowicz et al., 2017; Blier et al., 2021). At every time step $t$ , the agent observes state $s_t\in S$ and performs a corresponding action $a_{t}\in \mathcal{A}$ . The agent starts interaction in states sampled from $p_0(s_0)$ , and the interaction dynamics are defined by the transition probability distribution $p(s_{t + 1}\mid s_t,a_t)$ . Goals $g\in \mathcal{G}$ are defined in a goal space $\mathcal{G}$ , which is related to $\mathcal{S}$ via a mapping $f:\mathcal{S}\to \mathcal{G}$ . For example, $\mathcal{G}$ may correspond to a subset of state dimensions. The prior distribution
|
| 92 |
+
|
| 93 |
+
over goals is defined by $p_{g}(g)$ . The reward function is defined as the probability density of reaching the goal in the next time step $r_{g}(s_{t},a_{t})\triangleq (1 - \gamma)p(s_{t + 1} = g\mid s_{t},a_{t})$ , with discount factor $\gamma$ .
|
| 94 |
+
|
| 95 |
+
In this setting, the goal-conditioned policy $\pi(a \mid s, g)$ receives both the current observation of the environment as well as a goal. We define the discounted state visitation distribution as $p_{\gamma}^{\pi(\cdot \mid \cdot, g)}(s) \triangleq (1 - \gamma) \sum_{t=0}^{\infty} \gamma^{t} p_{t}^{\pi(\cdot \mid \cdot, g)}(s)$ , where $p_{t}^{\pi}(s)$ is the probability that policy $\pi$ visits $s$ after exactly $t$ steps, when conditioned with $g$ . This last expression is precisely the $Q$ -function of the policy $\pi(\cdot \mid \cdot, g)$ for the reward $r_{g} \colon Q_{g}^{\pi}(s, a) \triangleq p_{\gamma}^{\pi(\cdot \mid \cdot, g)}(g \mid s, a)$ . The objective is to maximize the expected reward:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\max _ {\pi} \mathbb {E} _ {p _ {0} (s _ {0}), p _ {g} (g), \pi (\cdot | \cdot , g)} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {t} r _ {g} \left(s _ {t}, a _ {t}\right) \right]. \tag {1}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
Contrastive Reinforcement Learning. Our experiments will use the contrastive RL algorithm (Eysenbach et al., 2022) to solve goal-conditioned problems. Contrastive RL is an actor-critic method; we will use $f_{\phi, \psi}(s, a, g)$ to denote the critic and $\pi_{\theta}(a \mid s, g)$ to denote the policy. The critic is parametrized with two neural networks that return state, action pair embedding $\phi(s, a)$ and goal embedding $\psi(g)$ . The critic's output is defined as the $l^2$ -norm between these embeddings: $f_{\phi, \psi}(s, a, g) = \| \phi(s, a) - \psi(g) \|_2$ . The critic is trained with the InfoNCE objective (Sohn, 2016) as in previous works (Eysenbach et al., 2022, 2021; Zheng et al., 2023, 2024; Myers et al., 2024; Bortkiewicz et al., 2024). Training is conducted on batches $\mathcal{B}$ , where $s_i, a_i, g_i$ represent the state, action, and goal (future state) sampled from the same trajectory, while $g_j$ represents a goal sampled from a different, random trajectory. The objective function is defined as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\min _ {\phi , \psi} \mathbb {E} _ {\mathcal {B}} \left[ - \sum_ {i = 1} ^ {| \mathcal {B} |} \log \left(\frac {e ^ {f _ {\phi , \psi} \left(s _ {i} , a _ {i} , g _ {i}\right)}}{\sum_ {j = 1} ^ {K} e ^ {f _ {\phi , \psi} \left(s _ {i} , a _ {i} , g _ {j}\right)}}\right) \right].
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
The policy $\pi_{\theta}(a\mid s,g)$ is trained to maximize the critic:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\max_{\pi_{\theta}}\mathbb{E}_{\substack{p_{0}(s_{0}),p(s_{t + 1}|s_{t},a_{t}),\\ p_{g}(g),\pi_{\theta}(a|s,g)}}\left[f_{\phi ,\psi}(s,a,g)\right].
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Residual Connections We incorporate residual connections (He et al., 2015) into our architecture, following their successful use in RL (Farebrother et al., 2024; Lee et al., 2024; Nauman et al., 2024b). A residual block transforms a given representation $\mathbf{h}_i$ by adding a learned residual function $F_{i}(\mathbf{h}_{i})$ to the original representation. Mathematically, this is expressed as:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathbf {h} _ {i + 1} = \mathbf {h} _ {i} + F _ {i} \left(\mathbf {h} _ {i}\right)
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $\mathbf{h}_{i + 1}$ is the output representation, $\mathbf{h}_i$ is the input representation, and $F_{i}(\mathbf{h}_{i})$ is a transformation learned through the network (e.g., using one or more layers). The addition ensures that the network learns modifications to the input rather than entirely new transformations, helping to preserve useful features from earlier layers. Residual con
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 2: Architecture. Our approach integrates residual connections into both the actor and critic networks of the Contrastive RL algorithm. The depth of this residual architecture is defined as the total number of Dense layers across the residual blocks, which, with our residual block size of 4, equates to $4N$ .
|
| 123 |
+
|
| 124 |
+
nections improve gradient propagation by introducing shortcut paths (He et al., 2016; Veit et al., 2016), enabling more effective training of deep models.
|
| 125 |
+
|
| 126 |
+
# 4 Experiments
|
| 127 |
+
|
| 128 |
+
# 4.1 Experimental Setup
|
| 129 |
+
|
| 130 |
+
**Environments.** All RL experiments use the JaxGCRL codebase (Bortkiewicz et al., 2024), which facilitates fast online GCRL experiments based on Brax (Freeman et al., 2021) and MJX (Todorov
|
| 131 |
+
|
| 132 |
+
et al., 2012) environments. The specific environments used are a range of locomotion, navigation, and robotic manipulation tasks, for details see Appendix B. We use a sparse reward setting, with $r = 1$ only when the agent is in the goal proximity. For evaluation, we measure the number of time steps (out of 1000) that the agent is near the goal. When reporting an algorithm's performance as a single number, we compute the average score over the last five epochs of training.
|
| 133 |
+
|
| 134 |
+
Architectural Components We employ residual connections from the ResNet architecture (He et al., 2015), with each residual block consisting of four repeated units of a Dense layer, a Layer Normalization (Ba et al., 2016) layer, and Swish activation (Ramachandran et al., 2018). We apply the residual connections immediately following the final activation of the residual block, as shown in Figure 2. In this paper, we define the depth of the network as the total number of Dense layers across all residual blocks in the architecture. In all experiments, the depth refers to the configuration of the actor network and both critic encoder networks, which are scaled jointly, except for the ablation experiment in Section 4.4.
|
| 135 |
+
|
| 136 |
+
# 4.2 Scaling Depth in Contrastive RL
|
| 137 |
+
|
| 138 |
+
We start by studying how increasing network depth can increase performance. Both the JaxGCRL benchmark and relevant prior work (Lee et al., 2024; Nauman et al., 2024b; Zheng et al., 2024) use MLPs with a depth of 4, and as such we adopt it as our baseline. In contrast, we will study networks of depth 8, 16, 32, and 64. The results in Figure 1 demonstrate that deeper networks achieve significant performance improvements across a diverse range of locomotion, navigation, and manipulation tasks. Compared to the 4-layer models typical in prior work, deeper networks achieve $2 - 5 \times$ gains in robotic manipulation tasks, over $20 \times$ gains in long-horizon maze tasks such as Ant U4-Maze and Ant U5-Maze, and over $50 \times$ gains in humanoid-based tasks. The full table of performance increases up to depth 64 is provided in Table 1.
|
| 139 |
+
|
| 140 |
+
In Figure 12, we present results the same 10 environments, but compared against SAC, SAC+HER, TD3+HER, GCBC, and GCSL. Scaling CRL leads to substantial performance improvements, outperforming all other baselines in 8 out of 10 tasks. The only exception is SAC on the Humanoid Maze environments, where it exhibits greater sample efficiency early on; however, scaled CRL eventually reaches comparable performance. These results highlight that scaling the depth of the CRL algorithm enables state-of-the-art performance in goal-conditioned reinforcement learning.
|
| 141 |
+
|
| 142 |
+
# 4.3 Emergent Policies Through Depth
|
| 143 |
+
|
| 144 |
+
A closer examination of the results from the performance curves in Figure 1 reveals a notable pattern: instead of a gradual improvement in performance as depth increases, there are pronounced jumps that occur once a critical depth threshold is reached (also shown in Figure 5). The critical depths vary by environment, ranging from 8 layers (e.g. Ant Big Maze) to 64 layers in the Humanoid U-Maze task, with further jumps occurring even at depths of 1024 layers (see the Testing Limits section, Section 4.4).
|
| 145 |
+
|
| 146 |
+
Prompted by this observation, we visualized the learned policies at various depths and found qualitatively distinct skills and behaviors exhibited. This is particularly pronounced in the humanoid-based tasks, as illustrated in Figure 3. Networks with a depth of 4 exhibit rudimentary policies where the agent either falls or throws itself toward the target. Only at a critical depth
|
| 147 |
+
|
| 148 |
+
of 16 does the agent develop the ability to walk upright into the goal. In the Humanoid U-Maze environment, networks of depth 64 struggle to navigate around the intermediary wall, collapsing on the ground. Remarkably at a depth of 256, the agent learns unique behaviors on Humanoid U-Maze. These behaviors include folding forward into a leveraged position to propel itself over walls and
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 3: Increasing depth results in new capabilities: Row 1: A depth-4 agent collapses and throws itself toward the goal. Row 2: A depth-16 agent walks upright. Row 3: A depth-64 agent struggles and falls. Row 4: A depth-256 agent vaults the wall acrobatically.
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
Figure 5: Critical depth and residual connections. Incrementally increasing depth results in marginal performance gains (left). However, once a critical threshold is reached, performance improves dramatically (right) for networks with residual connections.
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
Figure 6: Actor vs. Critic. In Arm Push Easy, scaling the critic is more effective; in Ant Big Maze, the actor matters more. For Humanoid, scaling both is necessary. These results suggest that actor and critic scaling can complement each other for CRL.
|
| 158 |
+
|
| 159 |
+
shifting into a seated posture over the intermediary obstacle to worm its way toward the goal (one of these policies is illustrated in the fourth row of Figure 3). To the best of our knowledge, this is the first goal-conditioned approach to document such behaviors on the humanoid environment.
|
| 160 |
+
|
| 161 |
+
# 4.4 What Matters for CRL Scaling
|
| 162 |
+
|
| 163 |
+
Width vs. Depth Past literature has shown that scaling network width can be effective (Lee et al., 2024; Nauman et al., 2024b). In Figure 4, we find that scaling width is also helpful in our experiments: wider networks consistently outperform narrower networks (depth held constant at 4). However, depth seems to be a more effective axis for scaling: simply doubling the depth to 8 (width held constant at 256) outperforms the widest networks in all three environments. The advantage of depth scaling is most pronounced in the Humanoid environment (observation dimension 268), followed by Ant Big Maze (dimension 29) and Arm Push Easy (dimension 17), suggesting that the comparative benefit may increase with higher observation dimensionality.
|
| 164 |
+
|
| 165 |
+
Note additionally that the parameter count scales linearly with width but quadratically with depth. For comparison, a network with 4 MLP layers
|
| 166 |
+
|
| 167 |
+
and 2048 hidden units has roughly $35\mathrm{M}$ parameters, while one with a depth of 32 and 256 hidden units has only around 2M. Therefore, when operating under a fixed FLOP compute budget or specific memory constraints, depth scaling may be a more computationally efficient approach to improving network performance.
|
| 168 |
+
|
| 169 |
+
Scaling the Actor vs. Critic Networks To investigate the role of scaling in the actor and critic networks, Figure 6 presents the final performance for various combinations of actor and critic depths across three environments. Prior work (Nauman et al., 2024b; Lee et al., 2024) focuses on scaling the critic network, finding that scaling the actor degrades performance. In contrast, while we do find that scaling the critic is more impactful in two of the three environments (Humanoid, Arm Push Easy), our method benefits from scaling the actor network jointly, with one environment (Ant Big Maze) demonstrating actor scaling to be more impactful. Thus, our method suggests that scaling both the actor and critic networks can play a complementary role in enhancing performance.
|
| 170 |
+
|
| 171 |
+
Deep Networks Unlock Batch Size Scaling Scaling batch size has been well-established in other areas of machine learning (Chen et al., 2022; Zhang et al., 2024). However, this approach has not translated as effectively to reinforcement learning (RL), and prior work has even reported negative impacts on value-based RL (Obando-Ceron et al., 2023). Indeed, in our experiments,
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 4: Scaling network width vs. depth. Here, we reflect findings from previous works (Lee et al., 2024; Nauman et al., 2024b) which suggest that increasing network width can enhance performance. However, in contrast to prior work, our method is able to scale depth, yielding more impactful performance gains. For instance, in the Humanoid environment, raising the width to 2048 (depth=4) fails to match the performance achieved by simply doubling the depth to 8 (width=256). The comparative advantage of scaling depth is more pronounced as the observational dimensionality increases.
|
| 175 |
+
|
| 176 |
+
eters, while one with a depth of 32 and 256 hidden voting under a fixed FLOP compute budget or specific are computationally efficient approach to improving
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
Figure 7: Deeper networks unlock batch size scaling. We find that as depth increases from 4 to 64 in Humanoid, larger networks can effectively leverage batch size scaling to achieve further improvements.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+
simply increasing the batch size for the original CRL networks yields only marginal differences in performance (Figure 7, top left).
|
| 190 |
+
|
| 191 |
+
At first glance, this might seem counterintuitive: since reinforcement learning typically involves fewer informational bits per piece of training data (LeCun, 2016), one might expect higher variance in batch loss or gradients, suggesting the need for larger batch sizes to compensate. At the same time, this possibility hinges on whether the model in question can actually make use of a bigger batch size—in domains of ML where scaling has been successful, larger batch sizes usually bring the most benefit when coupled with sufficiently large models (Zhang et al., 2024; Chen et al., 2022). One hypothesis is that the small models traditionally used in RL may obscure the underlying benefits of larger batch size.
|
| 192 |
+
|
| 193 |
+
To test this hypothesis, we study the effect of increasing the batch size for networks of varying depths. As shown in Figure 7, scaling the batch size becomes effective as network depth grows. This finding offers evidence that by scaling network capacity, we may simultaneously unlock the benefits of larger batch size, potentially making it an important component in the broader pursuit of scaling self-supervised RL.
|
| 194 |
+
|
| 195 |
+
Training Contrastive RL with 1000+ Layers We next study whether further increasing depth beyond 64 layers further improves performance. We use the Humanoid maze tasks as these are both the most challenging environments in the benchmark and also seem to benefit from the deepest scaling. The results, shown in Figure 12, indicate that performance continues to substantially improve as network depth reaches 256 and 1024 layers in the Humanoid U-Maze environment. While we were unable to scale beyond 1024 layers due to computational constraints, we expect to see continued improvements with even greater depths, especially on the most challenging tasks.
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 8: We disentangle the effects of exploration and expressivity on depth scaling by training three networks in parallel: a "collector," plus one deep and one shallow learner that train only from the collector's shared replay buffer. In all three environments, when using a deep collector (i.e. good data coverage), the deep learner outperforms the shallow learner, indicating that expressivity is crucial when controlling for good exploration. With a shallow collector (poor exploration), even the deep learner cannot overcome the limitations of insufficient data coverage. As such, the benefits of depth scaling arise from a combination of improved exploration and increased expressivity working jointly.
|
| 205 |
+
|
| 206 |
+
# 4.5 Why Scaling Happens
|
| 207 |
+
|
| 208 |
+
Depth Enhances Contrastive Representations The long-horizon setting has been a long-standing challenge in RL particularly in unsupervised goal-conditioned settings where there is no auxiliary reward feedback (Gupta et al., 2019). The family of U-Maze environments requires a global understanding of the maze layout for effective navigation. We consider a variant of the Ant U-Maze environment, the U4-maze, in which the agent must initially move in the direction opposite the goal to loop around and ultimately reach it. As shown in Figure 9, we observe a qualitative difference in the behavior of the shallow network (depth 4) compared to the deep network (depth 64). The visualized Q-values computed from the critic encoder representations reveal that the depth 4 network seemingly relies on Euclidean distance to the goal as a proxy for the Q value, even when a wall obstructs the direct path. In contrast, the depth 64 critic network learns richer representations, enabling it to effectively capture the topology of the maze as visualized by the trail of high Q values along the inner edge. These findings suggest that increasing network depth leads to richer learned representations,
|
| 209 |
+
|
| 210 |
+
enabling deeper networks to better capture environment topology and achieve more comprehensive state-space coverage in a self-supervised manner.
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
Figure 9: Deeper Q-functions are qualitatively different. In the U4-Maze, the start and goal positions are indicated by the $\odot$ and $\mathbf{G}$ symbols respectively, and the visualized Q values are computed via the $L_{2}$ distance in the learned representation space, i.e., $Q(s,a,g) = \| \phi (s,a) - \psi (g)\| _2$ . The shallow depth 4 network (left) naively relies on Euclidean proximity, showing high Q values near the start despite a maze wall. In contrast, the depth 64 network (right) clusters high Q values at the goal, gradually tapering along the interior.
|
| 214 |
+
|
| 215 |
+
Depth Enhances Exploration and Expressivity in a Synergized Way Our earlier results suggested that deeper networks achieve greater state-action coverage. To better understand why scaling works, we sought to determine to whether improved data alone explains the benefits of scaling, or whether it acts in conjunction with other factors. Thus, we designed an experiment in Figure 8 in which we train three networks in parallel: one network, the "collector," interacts with the environment and writes all experience to a shared replay buffer. Alongside it, two additional "learners", one deep and one shallow, train concurrently. Crucially, these two learners never collect their own data; they train only from the collector's buffer. This design holds the data distribution constant while varying the model's capacity, so any performance gap between the deep and shallow learners must come from expressivity rather than exploration. When the collector is deep (e.g., depth 32), across all three environments the deep learner substantially outperforms the shallow one across all three environments, indicating that the expressivity of the deep networks is critical. On the other hand, we repeat the experiment with shallow collectors (e.g., depth 4), which explores less effectively and therefore populates the buffer with low-coverage experience. Here, both the deep and shallow learners struggle and achieve similarly poor performance, which indicates that the deep network's additional capacity does not overcome the limitations of insufficient data coverage. As such, scaling depth enhances exploration and expressivity in a synergized way: stronger learning capacity drives more extensive exploration, and strong data coverage is essential to fully realize the power of stronger learning capacity. Both aspects jointly contribute to improved performance.
|
| 216 |
+
|
| 217 |
+
Deep Networks Learn to Allocate Greater Representational Capacity to States Near the Goal In Figure 10 we take a successful trajectory in the Humanoid environment and visualize the embeddings of state-action encoder along this trajectory for both deep vs. shallow networks. While the shallow network (Depth 4) tends to cluster near-goal states tightly together, the deep network produces more "spread out" representations. This distinction is important: in a self-supervised setting, we want our representations to separate states that matter—particularly future or goal-relevant states—from random ones. As such, we want to allocate more representational capacity to such critical regions. This suggests that deep networks may learn to allocate representational capacity more effectively to state regions that matter most for the downstream task.
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Successful Trajectory Path in Humanoid Env
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
Trajectory in Embedding Space (Depth 4)
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
Trajectory in Embedding Space (Depth 64)
|
| 227 |
+
Figure 10: We visualize state-action embeddings from shallow (depth 4) and deep (depth 64) networks along a successful trajectory in the Humanoid task. Near the goal, embeddings from the deep network expand across a curved surface, while those from the shallow network form a tight cluster. This suggests that deeper networks may devote greater representational capacity to regions of the state space that are more frequently visited and play a more critical role in successful task completion.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+
# Deeper Networks Enable Partial Experience
|
| 232 |
+
|
| 233 |
+
Stitching Another key challenge in reinforcement learning is learning policies that can generalize to tasks unseen during training. To evaluate this setting, we designed a modified version of the Ant U-Maze environment. As shown in Figure 11 (top right), the original JaxGCRL benchmark assesses the agent's performance on the three farthest goal positions located on the opposite side of the wall. However, instead of training on all possible subgoals (a superset of the evaluation state-goal pairs), we modified the setup to train on start-goal pairs that are at most 3 units apart, ensuring that none of the evaluation pairs ever appear in the training set. Figure 11 demonstrates that depth 4 networks show limited generalization, solving only the easiest goal (4 units away from the start). Depth 16 networks achieve moderate success, while depth 64 networks excel, sometimes solving the most challenging goal position. These results suggest that the increasing network depth results in some degree of stitching, combining $\leq 3$ -unit pairs to navigate the 6-unit span of the U-Maze.
|
| 234 |
+
|
| 235 |
+
The (CRL) Algorithm is Key In Appendix A, we show that scaled CRL outperforms other baseline goal-conditioned algorithms and advance the SOTA for goal-conditioned RL. We observe that for temporal difference methods
|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Figure 11: Deeper networks exhibit improved generalization. (Top left) We modify the training setup of the Ant U-Maze environment such that start-goal pairs are separated by $\leq 3$ units. This design guarantees that no evaluation pairs (Top right) were encountered during training, testing the ability for combinatorial generalization via stitching. (Bottom) Generalization ability improves as network depth grows from 4 to 16 to 64 layers.
|
| 243 |
+
|
| 244 |
+
(SAC, SAC+HER, TD3+HER), the performance saturates for networks of depth 4, and there is either zero or negative performance gains from deeper networks. This is in line with previous research showing that these methods benefit mainly from width (Lee et al., 2024; Nauman et al., 2024b). These results suggest that the self-supervised CRL algorithm is critical.
|
| 245 |
+
|
| 246 |
+
We also experiment with scaling more self-supervised algorithms, namely Goal-Conditioned Behavioral Cloning (GCBC) and Goal-Conditioned Supervised Learning (GCSL). While these methods yield zero success in certain environments, they show some utility in arm manipulation tasks. Interestingly, even a very simple self-supervised algorithm like GCBC benefits from increased depth. This
|
| 247 |
+
|
| 248 |
+
points to a promising direction for future work of further investigating other self-supervised methods to uncover potentially different or complementary recipes for scaling self-supervised RL.
|
| 249 |
+
|
| 250 |
+
Finally, recent work has augmented goal-conditioned RL with quasimetric architectures, leveraging the fact that temporal distances satisfy a triangle inequality-based invariance. In Appendix A, we also investigate whether the depth scaling effect persists when applied to these quasimetric networks.
|
| 251 |
+
|
| 252 |
+
# 4.6 Does Depth Scaling Improve Offline Contrastive RL?
|
| 253 |
+
|
| 254 |
+
In preliminary experiments, we evaluated depth scaling in the offline goal-conditioned setting using OGBench (Park et al., 2024). We found little evidence that increasing the network depth of CRL improves performance in this offline setting. To further investigate this, we conducted ablations: (1) scaling critic depth while holding the actor at 4 or 8 layers, and (2) applying cold initialization to the final layers of the critic encoders (Zheng et al., 2024). In all cases, baseline depth 4 networks often had the highest success. A key direction for future work is to see if our method can be adapted to enable scaling in the offline setting.
|
| 255 |
+
|
| 256 |
+
# 5 Conclusion
|
| 257 |
+
|
| 258 |
+
Arguably, much of the success of vision and language models today is due to the emergent capabilities they exhibit from scale (Srivastava et al., 2023), leading to many systems reducing the RL problem to a vision or language problem.
|
| 259 |
+
|
| 260 |
+
A critical question for large AI models is: where does the data come from? Unlike supervised learning paradigms, RL methods inherently address this by jointly optimizing both the model and the data collection process through exploration. Ultimately, determining effective ways of building RL systems that demonstrate emergent capabilities may be important for transforming the field into one that trains its own large models. We believe that our work is a step towards these systems. By integrating key components for scaling up RL into a single approach, we show that model performance consistently improves as scale increases in complex tasks. In addition, deep models exhibit qualitatively better behaviors which might be interpreted as implicitly acquired skills necessary to reach the goal.
|
| 261 |
+
|
| 262 |
+
Limitations. The primary limitations of our results are that scaling network depth comes at the cost of compute. An important direction for future work is to study how distributed training might be used to leverage even more compute, and how techniques such as pruning and distillation might be used to decrease the computational costs.
|
| 263 |
+
|
| 264 |
+
Impact Statement This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 265 |
+
|
| 266 |
+
Acknowledgments. We gratefully acknowledge Galen Collier, Director of Researcher Engagement, for support through the NSF grant, as well as the staff at Princeton Research Computing for their invaluable assistance. We also thank Colin Lu for his discussions and contributions to this work. This research was also partially supported by the National Science Centre, Poland (grant no. 2023/51/D/ST6/01609), and the Warsaw University of Technology through the Excellence Initiative: Research University (IDUB) program. Finally, we would also like to thank Jens Tuyls and Harshit Sikchi for providing helpful commends and feedback on the manuscript.
|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
Figure 12: Testing the limits of scale. We extend the results from Figure 1 by scaling networks even further on the challenging Humanoid maze environments. We observe continued performance improvements with network depths of 256 and 1024 layers on Humanoid U-Maze. Note that for the 1024-layer networks, we observed the actor loss exploding at the onset of training, so we maintained the actor depth at 512 while using 1024-layer networks only for the two critic encoders.
|
| 270 |
+
|
| 271 |
+

|
| 272 |
+
|
| 273 |
+
# References
|
| 274 |
+
|
| 275 |
+
Ahn, M., Brohan, A., Brown, N., Chebotar, Y., Cortes, O., David, B., Finn, C., Gopalakrishnan, K., Hausman, K., Herzog, A., Ho, D., Hsu, J., Ibarz, J., Ichter, B., Irpan, A., Jang, E., Ruano, R. J., Jeffrey, K., Jesmonth, S., Joshi, N., Julian, R. C., Kalashnikov, D., Kuang, Y., Lee, K.-H., Levine, S., Lu, Y., Luu, L., Parada, C., Pastor, P., Quiambao, J., Rao, K., Rettinghouse, J., Reyes, D., Sermanet, P., Sievers, N., Tan, C., Toshev, A., Vanhoucke, V., Xia, F., Xiao, T., Xu, P., Xu, S., and Yan, M. (2022). Do as i can, not as i say: Grounding language in robotic affordances. Conference on Robot Learning.
|
| 276 |
+
Andrychowicz, M., Wolski, F., Ray, A., Schneider, J., Fong, R., Welinder, P., McGrew, B., Tobin, J., Pieter Abbeel, O., and Zaremba, W. (2017). Hindsight Experience Replay. In Neural Information Processing Systems, volume 30.
|
| 277 |
+
Ba, J. L., Kiros, J. R., and Hinton, G. E. (2016). Layer normalization. arXiv preprint arXiv: 1607.06450.
|
| 278 |
+
Blier, L., Tallec, C., and Ollivier, Y. (2021). Learning Successor States and Goal-Dependent Values: A Mathematical Viewpoint.
|
| 279 |
+
Bortkiewicz, M., Pałucki, W., Myers, V., Dziarmaga, T., Arczewski, T., Kuciński, L., and Eysenbach, B. (2024). Accelerating goal-conditioned rl algorithms and research. arXiv preprint arXiv:2408.11052.
|
| 280 |
+
Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., and Joulin, A. (2021). Emerging properties in self-supervised vision transformers. arXiv preprint arXiv: 2104.14294.
|
| 281 |
+
Chang, B., Meng, L., Haber, E., Tung, F., and Begert, D. (2018). Multi-level residual networks from dynamical systems view. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net.
|
| 282 |
+
Chen, C., Zhang, J., Xu, Y., Chen, L., Duan, J., Chen, Y., Tran, S. D., Zeng, B., and Chilimbi, T. (2022). Why do we need large batchesizes in contrastive learning? a gradient-bias perspective. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in Neural Information Processing Systems.
|
| 283 |
+
Chen, T., Kornblith, S., Swersky, K., Norouzi, M., and Hinton, G. E. (2020). Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems, 33:22243-22255.
|
| 284 |
+
Dehghani, M., Djolonga, J., Mustafa, B., Padlewski, P., Heek, J., Gilmer, J., Steiner, A., Caron, M., Geirhos, R., Alabdulmohsin, I. M., Jenatton, R., Beyer, L., Tschannen, M., Arnab, A., Wang, X., Riquelme, C., Minderer, M., Puigcerver, J., Evci, U., Kumar, M., van Steenkiste, S., Elsayed, G. F., Mahendran, A., Yu, F., Oliver, A., Huot, F., Bastings, J., Collier, M., Gritsenko, A., Birodkar, V., Vasconcelos, C., Tay, Y., Mensink, T., Kolesnikov, A., Paveti'c, F., Tran, D., Kipf, T., Luvcic, M., Zhai, X., Keysers, D., Harmsen, J., and Houlsby, N. (2023). Scaling vision transformers to 22 billion parameters. International Conference on Machine Learning.
|
| 285 |
+
Driess, D., Xia, F., Sajjadi, M. S. M., Lynch, C., Chowdhery, A., Ichter, B., Wahid, A., Tompson, J., Vuong, Q., Yu, T., Huang, W., Chebotar, Y., Sermanet, P., Duckworth, D., Levine, S., Vanhoucke, V., Hausman, K., Toussaint, M., Greff, K., Zeng, A., Mordatch, I., and Florence, P. R. (2023). Palm-e: An embodied multimodal language model. International Conference on Machine Learning.
|
| 286 |
+
Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. (2024). The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
|
| 287 |
+
Espeholt, L., Soyer, H., Munos, R., Simonyan, K., Mnih, V., Ward, T., Doron, Y., Firoiu, V., Harley, T., Dunning, I., et al. (2018). Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures. In International conference on machine learning, pages 1407-1416. PMLR.
|
| 288 |
+
Esser, P., Kulal, S., Blattmann, A., Entezari, R., Müller, J., Saini, H., Levi, Y., Lorenz, D., Sauer, A., Boesel, F., et al. (2024). Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*.
|
| 289 |
+
Eysenbach, B., Salakhutdinov, R., and Levine, S. (2021). C-Learning: Learning to Achieve Goals via Recursive Classification. In International Conference on Learning Representations. arXiv.
|
| 290 |
+
Eysenbach, B., Zhang, T., Levine, S., and Salakhutdinov, R. R. (2022). Contrastive learning as goal-conditioned reinforcement learning. Advances in Neural Information Processing Systems, 35:35603-35620.
|
| 291 |
+
Farebrother, J., Orbay, J., Vuong, Q., Taiga, A. A., Chebotar, Y., Xiao, T., Irpan, A., Levine, S., Castro, P. S., Faust, A., Kumar, A., and Agarwal, R. (2024). Stop Regressing: Training Value Functions via Classification for Scalable Deep RL.
|
| 292 |
+
|
| 293 |
+
Freeman, C. D., Frey, E., Raichuk, A., Girgin, S., Mordatch, I., and Bachem, O. (2021). Brax - a Differentiable Physics Engine for Large Scale Rigid Body Simulation. In NeurIPS Datasets and Benchmarks. arXiv.
|
| 294 |
+
Goyal, P., Mahajan, D., Gupta, A., and Misra, I. (2019). Scaling and benchmarking self-supervised visual representation learning. In Proceedings of the IEEE/cvf International Conference on computer vision, pages 6391-6400.
|
| 295 |
+
Gupta, A., Kumar, V., Lynch, C., Levine, S., and Hausman, K. (2019). Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning. Conference on Robot Learning.
|
| 296 |
+
He, K., Zhang, X., Ren, S., and Sun, J. (2015). Deep residual learning for image recognition. Computer Vision and Pattern Recognition.
|
| 297 |
+
He, K., Zhang, X., Ren, S., and Sun, J. (2016). Identity Mappings in Deep Residual Networks, pages 630-645. Springer International Publishing.
|
| 298 |
+
Henderson, P., Islam, R., Bachman, P., Pineau, J., Precup, D., and Meger, D. (2018). Deep reinforcement learning that matters. In Proceedings of the AAAI conference on artificial intelligence, volume 32.
|
| 299 |
+
Huang, S., Dossa, R. F. J., Ye, C., Braga, J., Chakraborty, D., Mehta, K., and Araujo, J. G. (2022). Cleanrl: High-quality single-file implementations of deep reinforcement learning algorithms. Journal of Machine Learning Research, 23(274):1-18.
|
| 300 |
+
Kaelbling, L. P. (1993). Learning to achieve goals. In *IJCAI*, volume 2, pages 1094–8. CiteSeer.
|
| 301 |
+
Kumar, A., Agarwal, R., Geng, X., Tucker, G., and Levine, S. (2023). Offline q-learning on diverse multi-task data both scales and generalizes. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net.
|
| 302 |
+
LeCun, Y. (2016). Predictive learning. Invited talk at the 30th Conference on Neural Information Processing Systems (NIPS). Barcelona, Spain.
|
| 303 |
+
Lee, H., Hwang, D., Kim, D., Kim, H., Tai, J. J., Subramanian, K., Wurman, P. R., Choo, J., Stone, P., and Seno, T. (2024). SimBa: Simplicity Bias for Scaling Up Parameters in Deep Reinforcement Learning.
|
| 304 |
+
Lee, K.-H., Nachum, O., Yang, M., Lee, L., Freeman, D., Xu, W., Guadarrama, S., Fischer, I., Jang, E., Michalewski, H., and Mordatch, I. (2022). Multi-Game Decision Transformers.
|
| 305 |
+
Liu, B., Feng, Y., Liu, Q., and Stone, P. (2023). Metric Residual Networks for Sample Efficient Goal-Conditioned Reinforcement Learning.
|
| 306 |
+
Liu, H., Li, C., Wu, Q., and Lee, Y. J. (2024). Visual instruction tuning. Advances in neural information processing systems, 36.
|
| 307 |
+
Lyle, C., Rowland, M., and Dabney, W. (2022). Understanding and preventing capacity loss in reinforcement learning. arXiv preprint arXiv:2204.09560.
|
| 308 |
+
Lyle, C., Zheng, Z., Khetarpal, K., van Hasselt, H., Pascanu, R., Martens, J., and Dabney, W. (2024). Disentangling the causes of plasticity loss in neural networks. arXiv preprint arXiv:2402.18762.
|
| 309 |
+
Makoviychuk, V., Wawrzyniak, L., Guo, Y., Lu, M., Storey, K., Macklin, M., Hoeller, D., Rudin, N., Allshire, A., Handa, A., et al. (2021). Isaac gym: High performancegpu-based physics simulation for robot learning. arXiv preprint arXiv:2108.10470.
|
| 310 |
+
Myers, V., Zheng, C., Dragan, A., Levine, S., and Eysenbach, B. (2024). Learning temporal distances: Contrastive successor features can provide a metric structure for decision-making. International Conference on Machine Learning.
|
| 311 |
+
Nauman, M., Bortkiewicz, M., Milos, P., Trzcinski, T., Ostaszewski, M., and Cygan, M. (2024a). Overestimation, overfitting, and plasticity in actor-critic: the bitter lesson of reinforcement learning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.
|
| 312 |
+
Nauman, M., Ostaszewski, M., Jankowski, K., Miłos, P., and Cygan, M. (2024b). Bigger, Regularized, Optimistic: Scaling for compute and sample-efficient continuous control.
|
| 313 |
+
Neumann, O. and Gros, C. (2022). Scaling laws for a multi-agent reinforcement learning model. arXiv preprint arXiv:2210.00849.
|
| 314 |
+
Obando-Ceron, J., Bellemare, M. G., and Castro, P. S. (2023). Small batch deep reinforcement learning. Neural Information Processing Systems. Published at NeurIPS 2023.
|
| 315 |
+
|
| 316 |
+
Obando-Ceron, J., Sokar, G., Willi, T., Lyle, C., Farebrother, J., Foerster, J. N., Dziugaite, G., Precup, D., and Castro, P. S. (2024). Mixtures of experts unlock parameter scaling for deep rl. International Conference on Machine Learning.
|
| 317 |
+
Ota, K., Jha, D. K., and Kanezaki, A. (2021). Training larger networks for deep reinforcement learning. arXiv preprint arXiv:2102.07920.
|
| 318 |
+
Park, S., Frans, K., Eysenbach, B., and Levine, S. (2024). Ogbench: Benchmarking offline goal-conditioned rl. arXiv preprint arXiv: 2410.20092.
|
| 319 |
+
Radford, A. (2018). Improving language understanding by generative pre-training.
|
| 320 |
+
Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., Krueger, G., and Sutskever, I. (2021). Learning transferable visual models from natural language supervision. International Conference on Machine Learning.
|
| 321 |
+
Raffin, A., Hill, A., Gleave, A., Kanervisto, A., Ernestus, M., and Dormann, N. (2021). Stable-baselines3: Reliable reinforcement learning implementations. Journal of Machine Learning Research, 22(268):1-8.
|
| 322 |
+
Ramachandran, P., Zoph, B., and Le, Q. V. (2018). Searching for activation functions. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Workshop Track Proceedings. OpenReview.net.
|
| 323 |
+
Rudin, N., Hoeller, D., Reist, P., and Hutter, M. (2022). Learning to walk in minutes using massively parallel deep reinforcement learning. In Conference on Robot Learning, pages 91-100. PMLR.
|
| 324 |
+
Rutherford, A., Ellis, B., Gallici, M., Cook, J., Lupu, A., Ingvarsson, G., Willi, T., Khan, A., de Witt, C. S., Souly, A., et al. (2023). Jaxmarl: Multi-agent rl environments and algorithms in jax. arXiv preprint arXiv:2311.10090.
|
| 325 |
+
Schwarzer, M., Obando-Ceron, J. S., Courville, A. C., Bellemare, M. G., Agarwal, R., and Castro, P. S. (2023). Bigger, better, faster: Human-level atari with human-level efficiency. In Krause, A., Brunskill, E., Cho, K., Engelhardt, B., Sabato, S., and Scarlett, J., editors, International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pages 30365-30380. PMLR.
|
| 326 |
+
Sohn, K. (2016). Improved Deep Metric Learning With Multi-Class N-Pair Loss Objective. In Neural Information Processing Systems, volume 29. Curran Associates, Inc.
|
| 327 |
+
Srivastava, A., Rastogi, A., Rao, A., et al. (2023). Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. Trans. Mach. Learn. Res.
|
| 328 |
+
Team, A. A., Bauer, J., Baumli, K., Baveja, S., Behbahani, F., Bhoopchand, A., Bradley-Schmieg, N., Chang, M., Clay, N., Collister, A., et al. (2023). Human-timescale adaptation in an open-ended task space. arXiv preprint arXiv:2301.07608.
|
| 329 |
+
Todorov, E., Erez, T., and Tassa, Y. (2012). Mujoco: A Physics Engine for Model-Based Control. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 5026-5033. IEEE, IEEE.
|
| 330 |
+
Torgo, L. and Gama, J. (1996). Regression by classification. In Advances in Artificial Intelligence: 13th Brazilian Symposium on Artificial Intelligence, SBIA'96 Curitiba, Brazil, October 23-25, 1996 Proceedings 13, pages 51-60. Springer.
|
| 331 |
+
Tuyls, J., Madeka, D., Torkkola, K., Foster, D., Narasimhan, K., and Kakade, S. (2024). Scaling Laws for Imitation Learning in Single-Agent Games.
|
| 332 |
+
Van Hasselt, H., Doron, Y., Strub, F., Hessel, M., Sonnerat, N., and Modayil, J. (2018). Deep reinforcement learning and the deadly triad. arXiv preprint arXiv:1812.02648.
|
| 333 |
+
Vaswani, A., Shazeer, N. M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. nips.
|
| 334 |
+
Veit, A., Wilber, M., and Belongie, S. (2016). Residual networks behave like ensembles of relatively shallow networks. arXiv preprint arXiv: 1605.06431.
|
| 335 |
+
Wang, T., Torralba, A., Isola, P., and Zhang, A. (2023a). Optimal goal-reaching reinforcement learning via quasimetric learning.
|
| 336 |
+
|
| 337 |
+
Wang, T., Torralba, A., Isola, P., and Zhang, A. (2023b). Optimal goal-reaching reinforcement learning via quasimetric learning. In Krause, A., Brunskill, E., Cho, K., Engelhardt, B., Sabato, S., and Scarlett, J., editors, International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pages 36411-36430. PMLR.
|
| 338 |
+
Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., Chi, E. H., Hashimoto, T., Vinyals, O., Liang, P., Dean, J., and Fedus, W. (2022). Emergent abilities of large language models. Trans. Mach. Learn. Res.
|
| 339 |
+
Zhai, X., Kolesnikov, A., Houlsby, N., and Beyer, L. (2021). Scaling vision transformers. Computer Vision and Pattern Recognition.
|
| 340 |
+
Zhang, H., Morwani, D., Vyas, N., Wu, J., Zou, D., Ghai, U., Foster, D., and Kakade, S. (2024). How does critical batch size scale in pre-training? arXiv preprint arXiv: 2410.21676.
|
| 341 |
+
Zheng, C., Eysenbach, B., Walke, H., Yin, P., Fang, K., Salakhutdinov, R., and Levine, S. (2024). Stabilizing Contrastive RL: Techniques for Offline Goal Reaching. In International Conference on Learning Representations. arXiv.
|
| 342 |
+
Zheng, C., Salakhutdinov, R., and Eysenbach, B. (2023). Contrastive Difference Predictive Coding. In Twelfth International Conference on Learning Representations. arXiv.
|
| 343 |
+
Zong, Y., Aodha, O. M., and Hospedales, T. (2024). Self-supervised multimodal learning: A survey.
|
| 344 |
+
|
| 345 |
+
# A Additional Experiments
|
| 346 |
+
|
| 347 |
+
# A.1 Scaled CRL Outperforms All Other Baselines on 8 out of 10 Environments
|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
Figure 12: Scaled CRL (Ours) outperforms baselines CRL (original), SAC, SAC+HER, TD3+HER, GCSL, and GCBC in 8 out 10 environments.
|
| 371 |
+
|
| 372 |
+
In Figure 1, we demonstrated that increasing the depth of the CRL algorithm leads to significant performance improvements over the original CRL (see also Table 1). Here, we show that these gains translate to state-of-the-art results in online goal-conditioned RL, with Scaled CRL outperforming both standard TD-based methods such as SAC, SAC+HER, and TD3+HER, as well as self-supervised imitation-based approaches like GCBC and GCSL.
|
| 373 |
+
|
| 374 |
+
# A.2 The CRL Algorithm is Key: Depth Scaling is Not Effective on Other Baselines
|
| 375 |
+
|
| 376 |
+
Next, we investigate whether increasing network depth in the baseline algorithms yields similar performance improvements as observed in CRL. We find that SAC, SAC+HER, and TD3+HER do not benefit from depths beyond four layers, which is consistent with prior findings (Lee et al., 2024; Nauman et al., 2024b). Additionally, GCSL and GCBC fail to achieve any meaningful performance on the Humanoid and Ant Big Maze tasks. Interestingly, we do observe one exception, as GCBC exhibits improved performance with increased depth in the Arm Push Easy environment.
|
| 377 |
+
|
| 378 |
+
Table 1: Increasing network depth (depth $D = 4 \rightarrow {64}$ ) increases performance on CRL (Figure 1). Scaling depth exhibits the greatest benefits on tasks with the largest observation dimension (Dim).
|
| 379 |
+
|
| 380 |
+
<table><tr><td>Task</td><td>Dim</td><td>D=4</td><td>D=64</td><td>Imprv.</td></tr><tr><td>Arm Binpick Hard</td><td></td><td>38 ±4</td><td>219 ±15</td><td>5.7×</td></tr><tr><td>Arm Push Easy</td><td>17</td><td>308 ±33</td><td>762 ±30</td><td>2.5×</td></tr><tr><td>Arm Push Hard</td><td></td><td>171 ±11</td><td>410 ±13</td><td>2.4×</td></tr><tr><td>Ant U4-Maze</td><td></td><td>11.4 ±4.1</td><td>286 ±36</td><td>25×</td></tr><tr><td>Ant U5-Maze</td><td rowspan="2">29</td><td>0.97 ±0.7</td><td>61 ±18</td><td>63×</td></tr><tr><td>Ant Big Maze</td><td>61 ±20</td><td>441 ±25</td><td>7.3×</td></tr><tr><td>Ant Hardest Maze</td><td></td><td>215 ±8</td><td>387 ±21</td><td>1.8×</td></tr><tr><td>Humanoid</td><td></td><td>12.6 ±1.3</td><td>649 ±19</td><td>52×</td></tr><tr><td>Humanoid U-Maze</td><td>268</td><td>3.2 ±1.2</td><td>159 ±33</td><td>50×</td></tr><tr><td>Humanoid Big Maze</td><td></td><td>0.06 ±0.04</td><td>59 ±21</td><td>1051×</td></tr></table>
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
|
| 410 |
+

|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
Figure 13: Depth scaling yields limited gains for SAC, SAC+HER, TD3+HER, GCSL, and GCBC.
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
|
| 419 |
+
# A.3 Additional Scaling Experiments: Offline GCBC, BC, and QRL
|
| 420 |
+
|
| 421 |
+
We further investigate several additional scaling experiments. As shown in Figure 14, our approach successfully scales with depth in the offline GCBC setting on the antmaze-medium-stitch task from OGBench. We find that our the combination of layer normalization, residual connections, and Swish activations is critical, suggesting that our architectural choices may be applied to unlock depth scaling in other algorithms and settings. We also attempt to scale depth for behavioral cloning and the QRL (Wang et al., 2023a) algorithm—in both of these cases, however, we observe negative results.
|
| 422 |
+
|
| 423 |
+

|
| 424 |
+
Figure 14: Our approach successfully scales depth in offline GCBC on antmaze-medium-stitch (OGBench). In contrast, scaling depth for BC (antmaze-giant-navigate, expert SAC data) and for both online (FetchPush) and offline QRL (pointmaze-giant-stitch, OGBench) yield negative results.
|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
Depth
|
| 428 |
+
|
| 429 |
+

|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
|
| 433 |
+
# A.4 Can Depth Scaling also be Effective for Quasimetric Architectures?
|
| 434 |
+
|
| 435 |
+
Prior work (Wang et al., 2023b; Liu et al., 2023) has found that temporal distances satisfy an important invariance property, suggesting the use of quasimetric architectures when learning temporal distances. Our next experiment tests whether changing the architecture affects the scaling properties of self-supervised RL. Specifically, we use the CMD-1 algorithm (Myers et al., 2024), which employs a backward NCE loss with MRN representations. The results indicate that scaling benefits are not limited to a single neural network parametrization. However, MRN's poor performance on the Ant U5-Maze task suggests further innovation is needed for consistent scaling with quasimetric models.
|
| 436 |
+
|
| 437 |
+

|
| 438 |
+
Figure 15: Performance of depth scaling on CRL augmented with quasimetric architectures (CMD-1).
|
| 439 |
+
|
| 440 |
+
# A.5 Additional Architectural Ablations: Layer Norm and Swish Activation
|
| 441 |
+
|
| 442 |
+
We conduct ablation experiments to validate the architectural choices of layer norm and swish activation. Figure 16 shows that removing layer normalization performs significantly worse. Additionally, scaling with ReLU significantly hampers scalability. These results, along with Figure 5 show that all of our architectural components—residual connections, layer norm, and swish activations—are jointly essential to unlocking the full performance of depth scaling.
|
| 443 |
+
|
| 444 |
+

|
| 445 |
+
Figure 16: (Left) Layer Norm is essential for scaling depth. (Right) Scaling with ReLU activations leads to worse performance compared to Swish activations.
|
| 446 |
+
|
| 447 |
+
# A.6 Can We Integrate Novel Architectural Innovations from the Emerging RL Scaling Literature?
|
| 448 |
+
|
| 449 |
+
Recently, Simba-v2 proposed a new architecture for scalable RL. Its key innovation is the replacement of layer normalization with hyperspherical normalization, which projects network weights onto the unit-norm hypersphere after each gradient update. As shown, the same depth-scaling trends hold when adding hyperspherical normalization to our architecture, and it further improves the sample efficiency of depth scaling. This demonstrates that our method can naturally incorporate new architectural innovations emerging in the RL scaling literature.
|
| 450 |
+
|
| 451 |
+
Table 2: Integrating hyperspherical normalization in our architecture enhances the sample efficiency of depth scaling.
|
| 452 |
+
|
| 453 |
+
<table><tr><td colspan="4">Steps to reach ≥200 success</td><td colspan="4">Steps to reach ≥400 success</td><td colspan="4">Steps to reach ≥600 success</td></tr><tr><td>Depth</td><td>4</td><td>16</td><td>32</td><td>Depth</td><td>4</td><td>16</td><td>32</td><td>Depth</td><td>4</td><td>16</td><td>32</td></tr><tr><td>With</td><td>-</td><td>50</td><td>42</td><td>With</td><td>-</td><td>62</td><td>48</td><td>With</td><td>-</td><td>77</td><td>67</td></tr><tr><td>Without</td><td>-</td><td>64</td><td>54</td><td>Without</td><td>-</td><td>75</td><td>64</td><td>Without</td><td>-</td><td>-</td><td>77</td></tr></table>
|
| 454 |
+
|
| 455 |
+
# A.7 Residuals Norms in Deep Networks
|
| 456 |
+
|
| 457 |
+
Prior work has noted decreasing residual activation norms in deeper layers (Chang et al., 2018). We investigate whether this pattern also holds in our setting. For the critic, the trend is generally evident, especially in very deep architectures (e.g., depth 256). The effect is not as pronounced in the actor.
|
| 458 |
+
|
| 459 |
+

|
| 460 |
+
Average Residual Magnitudes (L2 Norm)
|
| 461 |
+
|
| 462 |
+

|
| 463 |
+
|
| 464 |
+

|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
Figure 17: L2 norms of residual activations in networks with depths of 32, 64, 128, and 256.
|
| 468 |
+
|
| 469 |
+
# A.8 Scaling Depth for Offline Goal-conditioned RL
|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
Figure 18: To evaluate the scalability of our method in the offline setting, we scaled model depth on OGBench (Park et al., 2024). In two out of three environments, performance drastically declined as depth scaled from 4 to 64, while a slight improvement was seen on antimaze-medium-stitch-v0. Successfully adapting our method to scale offline GCRL is an important direction for future work.
|
| 473 |
+
|
| 474 |
+
# B Experimental Details
|
| 475 |
+
|
| 476 |
+
# B.1 Environment Setup and Hyperparameters
|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
Figure 19: The scaling results of this paper are demonstrated on the JaxGCRL benchmark, showing that they replicate across a diverse range of locomotion, navigation, and manipulation tasks. These tasks are set in the online goal-conditioned setting where there are no auxiliary rewards or demonstrations. Figure taken from (Bortkiewicz et al., 2024).
|
| 480 |
+
|
| 481 |
+
Our experiments use the JaxGCRL suite of GPU-accelerated environments, visualized in Figure 19, and a contrastive RL algorithm with hyperparameters reported in Table 7. In particular, we use 10 environments, namely: ant/big_maze, ant_hardest_maze, arm_binpick-hard, arm.push_easy, arm.push-hard, humanoid, humanoidBIG_maze, humanoid_u_maze, ant_u4_maze, ant_u5_maze.
|
| 482 |
+
|
| 483 |
+
# B.2 Python Environment Differences
|
| 484 |
+
|
| 485 |
+
In all plots presented in the paper, we used MJX 3.2.6 and Brax 0.10.1 to ensure a fair and consistent comparison. During development, we noticed discrepancies in physics behavior between the environment versions we employed (the CleanRL version of JaxGCRL) and the version recommended in a more recent commit of JaxGCRL (Bortkiewicz et al., 2024). Upon examination, the performance differences (shown in Figure 20) stem from a difference in versions in the MJX and Brax packages. Nonetheless, in both sets of MJX and Brax versions, performance scales monotonically with depth.
|
| 486 |
+
|
| 487 |
+

|
| 488 |
+
Figure 20: Scaling behavior for humanoid in two different python environments: MJX=3.2.3, Brax=0.10.5 and MJX=3.2.6, Brax=0.10.1 (ours) version of JaxGCRL. Scaling depth improves the performance significantly for both versions. In the environment we used, training requires fewer environment steps to reach a marginally better performance than in other Python environment.
|
| 489 |
+
|
| 490 |
+
# B.3 Wall-clock Time of Our Approach
|
| 491 |
+
|
| 492 |
+
We report the wall-clock time of our approach in Table 3. The table shows results for depths of 4, 8, 16, 32, and 64 across all ten environments, and for the Humanoid U-Maze environment, scaling up to 1024 layers. Overall, wall-clock time increases approximately linearly with depth beyond a certain point.
|
| 493 |
+
|
| 494 |
+
Table 3: Wall-clock time (in hours) for Depth 4, 8, 16, 32, and 64 across all 10 environments.
|
| 495 |
+
|
| 496 |
+
<table><tr><td>Environment</td><td>Depth 4</td><td>Depth 8</td><td>Depth 16</td><td>Depth 32</td><td>Depth 64</td></tr><tr><td>Humanoid</td><td>1.48 ± 0.00</td><td>2.13 ± 0.01</td><td>3.40 ± 0.01</td><td>5.92 ± 0.01</td><td>10.99 ± 0.01</td></tr><tr><td>Ant Big Maze</td><td>2.12 ± 0.00</td><td>2.77 ± 0.00</td><td>4.04 ± 0.01</td><td>6.57 ± 0.02</td><td>11.66 ± 0.03</td></tr><tr><td>Ant U4-Maze</td><td>1.98 ± 0.27</td><td>2.54 ± 0.01</td><td>3.81 ± 0.01</td><td>6.35 ± 0.01</td><td>11.43 ± 0.03</td></tr><tr><td>Ant U5-Maze</td><td>9.46 ± 1.75</td><td>10.99 ± 0.02</td><td>16.09 ± 0.01</td><td>31.49 ± 0.34</td><td>46.40 ± 0.12</td></tr><tr><td>Ant Hardest Maze</td><td>5.11 ± 0.00</td><td>6.39 ± 0.00</td><td>8.94 ± 0.01</td><td>13.97 ± 0.01</td><td>23.96 ± 0.06</td></tr><tr><td>Arm Push Easy</td><td>9.97 ± 1.03</td><td>11.02 ± 1.29</td><td>12.20 ± 1.43</td><td>14.94 ± 1.96</td><td>19.52 ± 1.97</td></tr><tr><td>Arm Push Hard</td><td>9.74 ± 1.05</td><td>10.55 ± 1.20</td><td>11.98 ± 1.49</td><td>14.40 ± 1.64</td><td>18.53 ± 0.06</td></tr><tr><td>Arm Binpick Hard</td><td>18.41 ± 2.16</td><td>17.48 ± 1.88</td><td>19.47 ± 0.05</td><td>21.91 ± 1.93</td><td>29.64 ± 6.10</td></tr><tr><td>Humanoid U-Maze</td><td>8.72 ± 0.01</td><td>11.29 ± 0.01</td><td>16.36 ± 0.03</td><td>26.48 ± 0.05</td><td>46.74 ± 0.04</td></tr><tr><td>Humanoid Big Maze</td><td>12.45 ± 0.02</td><td>15.02 ± 0.01</td><td>20.34 ± 0.01</td><td>30.61 ± 0.05</td><td>50.33 ± 0.05</td></tr></table>
|
| 497 |
+
|
| 498 |
+
Table 4: Total wall-clock time (in hours) for training from Depth 4 up to Depth 1024 in the Humanoid U-Maze environment.
|
| 499 |
+
|
| 500 |
+
<table><tr><td>Depth</td><td>Time (h)</td></tr><tr><td>4</td><td>3.23 ± 0.001</td></tr><tr><td>8</td><td>4.19 ± 0.003</td></tr><tr><td>16</td><td>6.07 ± 0.003</td></tr><tr><td>32</td><td>9.83 ± 0.006</td></tr><tr><td>64</td><td>17.33 ± 0.003</td></tr><tr><td>128</td><td>32.67 ± 0.124</td></tr><tr><td>256</td><td>73.83 ± 2.364</td></tr><tr><td>512</td><td>120.88 ± 2.177</td></tr><tr><td>1024</td><td>134.15 ± 0.081</td></tr></table>
|
| 501 |
+
|
| 502 |
+
# B.4 Wall-clock Time: Comparison to Baselines
|
| 503 |
+
|
| 504 |
+
Since the baselines use standard sized networks, naturally our scaled approach incurs higher raw wall-clock time per environment step (Table 5). However, a more practical metric is the time required to reach a given performance level. As shown in Table 6, our approach outperforms the strongest baseline, SAC, in 7 of 10 environments while requiring less wall-clock time.
|
| 505 |
+
|
| 506 |
+
Table 5: Wall-clock training time comparison of our method vs. baselines across all 10 environments.
|
| 507 |
+
|
| 508 |
+
<table><tr><td>Environment</td><td>Scaled CRL</td><td>SAC</td><td>SAC+HER</td><td>TD3</td><td>GCSL</td><td>GCBC</td></tr><tr><td>Humanoid</td><td>11.0 ± 0.0</td><td>0.5 ± 0.0</td><td>0.6 ± 0.0</td><td>0.8 ± 0.0</td><td>0.4 ± 0.0</td><td>0.6 ± 0.0</td></tr><tr><td>Ant Big Maze</td><td>11.7 ± 0.0</td><td>1.6 ± 0.0</td><td>1.6 ± 0.0</td><td>1.7 ± 0.0</td><td>1.5 ± 0.3</td><td>1.4 ± 0.1</td></tr><tr><td>Ant U4-Maze</td><td>11.4 ± 0.0</td><td>1.2 ± 0.0</td><td>1.3 ± 0.0</td><td>1.3 ± 0.0</td><td>0.7 ± 0.0</td><td>1.1 ± 0.1</td></tr><tr><td>Ant U5-Maze</td><td>46.4 ± 0.1</td><td>5.7 ± 0.0</td><td>6.1 ± 0.0</td><td>6.2 ± 0.0</td><td>2.8 ± 0.1</td><td>5.6 ± 0.5</td></tr><tr><td>Ant Hardest Maze</td><td>24.0 ± 0.0</td><td>4.3 ± 0.0</td><td>4.5 ± 0.0</td><td>5.0 ± 0.0</td><td>2.1 ± 0.6</td><td>4.4 ± 0.5</td></tr><tr><td>Arm Push Easy</td><td>19.5 ± 0.6</td><td>8.3 ± 0.0</td><td>8.5 ± 0.0</td><td>8.4 ± 0.0</td><td>6.4 ± 0.1</td><td>8.3 ± 0.3</td></tr><tr><td>Arm Push Hard</td><td>18.5 ± 0.0</td><td>8.5 ± 0.0</td><td>8.6 ± 0.0</td><td>8.3 ± 0.1</td><td>5.2 ± 0.3</td><td>7.4 ± 0.5</td></tr><tr><td>Arm Binpick Hard</td><td>29.6 ± 1.3</td><td>20.7 ± 0.1</td><td>20.7 ± 0.0</td><td>18.4 ± 0.3</td><td>8.0 ± 0.9</td><td>16.2 ± 0.4</td></tr><tr><td>Humanoid U-Maze</td><td>46.7 ± 0.0</td><td>3.0 ± 0.0</td><td>3.5 ± 0.0</td><td>5.4 ± 0.0</td><td>3.1 ± 0.1</td><td>7.2 ± 0.8</td></tr><tr><td>Humanoid Big Maze</td><td>50.3 ± 0.0</td><td>8.6 ± 0.0</td><td>9.3 ± 0.0</td><td>7.5 ± 1.1</td><td>5.1 ± 0.0</td><td>11.4 ± 1.9</td></tr></table>
|
| 509 |
+
|
| 510 |
+
Table 6: Wall-clock time (in hours) for our approach to surpass SAC's final performance. As shown, our approach surpasses SAC performance in less wall-clock time in 7 out of 10 environments. The N/A* entries are because in those environments, scaled CRL doesn't outperform SAC.
|
| 511 |
+
|
| 512 |
+
<table><tr><td>Environment</td><td>SAC</td><td>Scaled CRL (Depth 64)</td></tr><tr><td>Humanoid</td><td>0.46</td><td>6.37</td></tr><tr><td>Ant Big Maze</td><td>1.55</td><td>0.00</td></tr><tr><td>Ant U4-Maze</td><td>1.16</td><td>0.00</td></tr><tr><td>Ant U5-Maze</td><td>5.73</td><td>0.00</td></tr><tr><td>Ant Hardest Maze</td><td>4.33</td><td>0.45</td></tr><tr><td>Arm Push Easy</td><td>8.32</td><td>1.91</td></tr><tr><td>Arm Push Hard</td><td>8.50</td><td>6.65</td></tr><tr><td>Arm Binpick Hard</td><td>20.70</td><td>4.43</td></tr><tr><td>Humanoid U-Maze</td><td>3.04</td><td>N/A*</td></tr><tr><td>Humanoid Big Maze</td><td>8.55</td><td>N/A*</td></tr></table>
|
| 513 |
+
|
| 514 |
+
Table 7: Hyperparameters
|
| 515 |
+
|
| 516 |
+
<table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>num_timesteps</td><td>100M-400M (varying across tasks)</td></tr><tr><td>update-to-data (UTD) ratio</td><td>1:40</td></tr><tr><td>max_replay_size</td><td>10,000</td></tr><tr><td>min_replay_size</td><td>1,000</td></tr><tr><td>episode_length</td><td>1,000</td></tr><tr><td>discounting</td><td>0.99</td></tr><tr><td>num_envs</td><td>512</td></tr><tr><td>batch_size</td><td>512</td></tr><tr><td>policy_lr</td><td>3e-4</td></tr><tr><td>critic_lr</td><td>3e-4</td></tr><tr><td>contrastive_loss_function</td><td>InfoNCE</td></tr><tr><td>energy_function</td><td>L2</td></tr><tr><td>logsumexp_penalty</td><td>0.1</td></tr><tr><td>Network depth</td><td>depends on the experiment</td></tr><tr><td>Network width</td><td>depends on the experiment</td></tr><tr><td>representation dimension</td><td>64</td></tr></table>
|
| 517 |
+
|
| 518 |
+
# NeurIPS Paper Checklist
|
| 519 |
+
|
| 520 |
+
# 1. Claims
|
| 521 |
+
|
| 522 |
+
Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
|
| 523 |
+
|
| 524 |
+
Answer: [Yes]
|
| 525 |
+
|
| 526 |
+
Justification: The abstract contains 3 main claims: (1) Depth scaled to 1024 layers; (2) Performance increases 2-50x on CRL and outperforms other goal-conditioned baselines. (3) These performance gains leads to qualitatively new learned behaviors. Each of these claims are clearly substantiated in the main text in Section 4.
|
| 527 |
+
|
| 528 |
+
Guidelines:
|
| 529 |
+
|
| 530 |
+
- The answer NA means that the abstract and introduction do not include the claims made in the paper.
|
| 531 |
+
- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers.
|
| 532 |
+
- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings.
|
| 533 |
+
- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper.
|
| 534 |
+
|
| 535 |
+
# 2. Limitations
|
| 536 |
+
|
| 537 |
+
Question: Does the paper discuss the limitations of the work performed by the authors?
|
| 538 |
+
|
| 539 |
+
Answer: [Yes]
|
| 540 |
+
|
| 541 |
+
Justification: We included a Limitations section that describes the main limitation of our paper, which is latency of deep networks. We also multiple times in the paper demarcated where our research can be extended by future work.
|
| 542 |
+
|
| 543 |
+
Guidelines:
|
| 544 |
+
|
| 545 |
+
- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper.
|
| 546 |
+
- The authors are encouraged to create a separate "Limitations" section in their paper.
|
| 547 |
+
- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be.
|
| 548 |
+
- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated.
|
| 549 |
+
- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon.
|
| 550 |
+
- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size.
|
| 551 |
+
- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness.
|
| 552 |
+
- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations.
|
| 553 |
+
|
| 554 |
+
# 3. Theory assumptions and proofs
|
| 555 |
+
|
| 556 |
+
Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?
|
| 557 |
+
|
| 558 |
+
Answer: [NA]
|
| 559 |
+
|
| 560 |
+
Justification: This is an empirical paper. As such, no theoretical results that require assumptions or proofs.
|
| 561 |
+
|
| 562 |
+
Guidelines:
|
| 563 |
+
|
| 564 |
+
- The answer NA means that the paper does not include theoretical results.
|
| 565 |
+
- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced.
|
| 566 |
+
- All assumptions should be clearly stated or referenced in the statement of any theorems.
|
| 567 |
+
- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition.
|
| 568 |
+
- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material.
|
| 569 |
+
- Theorems and Lemmas that the proof relies upon should be properly referenced.
|
| 570 |
+
|
| 571 |
+
# 4. Experimental result reproducibility
|
| 572 |
+
|
| 573 |
+
Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?
|
| 574 |
+
|
| 575 |
+
Answer: [Yes]
|
| 576 |
+
|
| 577 |
+
Justification: Yes, documentation for reproducing the experiments is included alongside the anonymous code.
|
| 578 |
+
|
| 579 |
+
Guidelines:
|
| 580 |
+
|
| 581 |
+
- The answer NA means that the paper does not include experiments.
|
| 582 |
+
|
| 583 |
+
- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not.
|
| 584 |
+
|
| 585 |
+
- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable.
|
| 586 |
+
|
| 587 |
+
- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed.
|
| 588 |
+
|
| 589 |
+
- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example
|
| 590 |
+
|
| 591 |
+
(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm.
|
| 592 |
+
(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully.
|
| 593 |
+
(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset).
|
| 594 |
+
(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results.
|
| 595 |
+
|
| 596 |
+
# 5. Open access to data and code
|
| 597 |
+
|
| 598 |
+
Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?
|
| 599 |
+
|
| 600 |
+
Answer: [Yes]
|
| 601 |
+
|
| 602 |
+
Justification: See link to anonymous code in Abstract.
|
| 603 |
+
|
| 604 |
+
Guidelines:
|
| 605 |
+
|
| 606 |
+
- The answer NA means that paper does not include experiments requiring code.
|
| 607 |
+
- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 608 |
+
- While we encourage the release of code and data, we understand that this might not be possible, so "No" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark).
|
| 609 |
+
- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 610 |
+
- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc.
|
| 611 |
+
- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why.
|
| 612 |
+
- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable).
|
| 613 |
+
- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted.
|
| 614 |
+
|
| 615 |
+
# 6. Experimental setting/details
|
| 616 |
+
|
| 617 |
+
Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?
|
| 618 |
+
|
| 619 |
+
Answer: [Yes]
|
| 620 |
+
|
| 621 |
+
Justification: See Experiments section and Appendix on Experimental Details
|
| 622 |
+
|
| 623 |
+
Guidelines:
|
| 624 |
+
|
| 625 |
+
- The answer NA means that the paper does not include experiments.
|
| 626 |
+
- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them.
|
| 627 |
+
- The full details can be provided either with the code, in appendix, or as supplemental material.
|
| 628 |
+
|
| 629 |
+
# 7. Experiment statistical significance
|
| 630 |
+
|
| 631 |
+
Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?
|
| 632 |
+
|
| 633 |
+
Answer: [Yes]
|
| 634 |
+
|
| 635 |
+
Justification: Error bars in figures depict one standard error across random seeds. We used 5 seeds in Figure 1. For other figures in the main text, we could only run 3 seeds because of computational constraints.
|
| 636 |
+
|
| 637 |
+
Guidelines:
|
| 638 |
+
|
| 639 |
+
- The answer NA means that the paper does not include experiments.
|
| 640 |
+
- The authors should answer "Yes" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper.
|
| 641 |
+
|
| 642 |
+
- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions).
|
| 643 |
+
- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)
|
| 644 |
+
- The assumptions made should be given (e.g., Normally distributed errors).
|
| 645 |
+
- It should be clear whether the error bar is the standard deviation or the standard error of the mean.
|
| 646 |
+
- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\%$ CI, if the hypothesis of Normality of errors is not verified.
|
| 647 |
+
- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates).
|
| 648 |
+
- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text.
|
| 649 |
+
|
| 650 |
+
# 8. Experiments compute resources
|
| 651 |
+
|
| 652 |
+
Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?
|
| 653 |
+
|
| 654 |
+
Answer: [Yes]
|
| 655 |
+
|
| 656 |
+
Justification: Compute resources are detailed in the appendix.
|
| 657 |
+
|
| 658 |
+
Guidelines:
|
| 659 |
+
|
| 660 |
+
- The answer NA means that the paper does not include experiments.
|
| 661 |
+
- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage.
|
| 662 |
+
- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute.
|
| 663 |
+
- The paper should disclose whether the full research project required more compute than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper).
|
| 664 |
+
|
| 665 |
+
# 9. Code of ethics
|
| 666 |
+
|
| 667 |
+
Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?
|
| 668 |
+
|
| 669 |
+
Answer: [Yes]
|
| 670 |
+
|
| 671 |
+
Justification: No known violations of the Code of Ethics.
|
| 672 |
+
|
| 673 |
+
Guidelines:
|
| 674 |
+
|
| 675 |
+
- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics.
|
| 676 |
+
- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics.
|
| 677 |
+
- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction).
|
| 678 |
+
|
| 679 |
+
# 10. Broader impacts
|
| 680 |
+
|
| 681 |
+
Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?
|
| 682 |
+
|
| 683 |
+
Answer: [Yes]
|
| 684 |
+
|
| 685 |
+
Justification: The Conclusion notes that there are no immediately societal impacts of the work.
|
| 686 |
+
|
| 687 |
+
Guidelines:
|
| 688 |
+
|
| 689 |
+
- The answer NA means that there is no societal impact of the work performed.
|
| 690 |
+
|
| 691 |
+
- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact.
|
| 692 |
+
- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations.
|
| 693 |
+
- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster.
|
| 694 |
+
- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology.
|
| 695 |
+
- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML).
|
| 696 |
+
|
| 697 |
+
# 11. Safeguards
|
| 698 |
+
|
| 699 |
+
Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?
|
| 700 |
+
|
| 701 |
+
Answer: [NA]
|
| 702 |
+
|
| 703 |
+
Justification: No immediate impact to high-risk applications.
|
| 704 |
+
|
| 705 |
+
Guidelines:
|
| 706 |
+
|
| 707 |
+
- The answer NA means that the paper poses no such risks.
|
| 708 |
+
- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters.
|
| 709 |
+
- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images.
|
| 710 |
+
- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort.
|
| 711 |
+
|
| 712 |
+
# 12. Licenses for existing assets
|
| 713 |
+
|
| 714 |
+
Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?
|
| 715 |
+
|
| 716 |
+
Answer: [NA]
|
| 717 |
+
|
| 718 |
+
Justification: Benchmarks used are appropriately cited in the main text.
|
| 719 |
+
|
| 720 |
+
Guidelines:
|
| 721 |
+
|
| 722 |
+
- The answer NA means that the paper does not use existing assets.
|
| 723 |
+
- The authors should cite the original paper that produced the code package or dataset.
|
| 724 |
+
- The authors should state which version of the asset is used and, if possible, include a URI.
|
| 725 |
+
- The name of the license (e.g., CC-BY 4.0) should be included for each asset.
|
| 726 |
+
- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided.
|
| 727 |
+
|
| 728 |
+
- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset.
|
| 729 |
+
- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided.
|
| 730 |
+
- If this information is not available online, the authors are encouraged to reach out to the asset's creators.
|
| 731 |
+
|
| 732 |
+
# 13. New assets
|
| 733 |
+
|
| 734 |
+
Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?
|
| 735 |
+
|
| 736 |
+
Answer: [NA]
|
| 737 |
+
|
| 738 |
+
Justification: Datasets and benchmark used are all from prior work and appropriately cited. Guidelines:
|
| 739 |
+
|
| 740 |
+
- The answer NA means that the paper does not release new assets.
|
| 741 |
+
- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc.
|
| 742 |
+
- The paper should discuss whether and how consent was obtained from people whose asset is used.
|
| 743 |
+
- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file.
|
| 744 |
+
|
| 745 |
+
# 14. Crowdsourcing and research with human subjects
|
| 746 |
+
|
| 747 |
+
Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?
|
| 748 |
+
|
| 749 |
+
Answer: [NA]
|
| 750 |
+
|
| 751 |
+
Justification: No crowdsourcing experiments.
|
| 752 |
+
|
| 753 |
+
Guidelines:
|
| 754 |
+
|
| 755 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 756 |
+
- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper.
|
| 757 |
+
- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector.
|
| 758 |
+
|
| 759 |
+
# 15. Institutional review board (IRB) approvals or equivalent for research with human subjects
|
| 760 |
+
|
| 761 |
+
Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?
|
| 762 |
+
|
| 763 |
+
Answer: [NA]
|
| 764 |
+
|
| 765 |
+
Justification: No human subject experiments
|
| 766 |
+
|
| 767 |
+
Guidelines:
|
| 768 |
+
|
| 769 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 770 |
+
- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper.
|
| 771 |
+
|
| 772 |
+
We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution.
|
| 773 |
+
- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review.
|
| 774 |
+
|
| 775 |
+
# 16. Declaration of LLM usage
|
| 776 |
+
|
| 777 |
+
Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required.
|
| 778 |
+
|
| 779 |
+
Answer: [NA]
|
| 780 |
+
|
| 781 |
+
Justification: LLMs were not used in writing the paper, and were only used for occasional code debugging.
|
| 782 |
+
|
| 783 |
+
Guidelines:
|
| 784 |
+
|
| 785 |
+
- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components.
|
| 786 |
+
- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described.
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11560b8e346de5e75cafcf0ba8a747310810a60284eb53d15801676a72c00e54
|
| 3 |
+
size 1458132
|
1000layernetworksforselfsupervisedrlscalingdepthcanenablenewgoalreachingcapabilities/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85c7250365a716f25faf728a570aeb05d42026e1cecefb5716386eb0d8734bdf
|
| 3 |
+
size 816747
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b49cb730f91939a8ffbf0798b32833383b076c1ec585b6b82ee590f562d930da
|
| 3 |
+
size 233477
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dbbe0d75f4a0c417e2f78e2eeb21d3a697dd8032b3b01547e5ba76d546166efc
|
| 3 |
+
size 280002
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/2af2a6c4-d824-4f9d-a26b-4132df89fa21_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:672cecfe38467589f1b88c856b9c3d57d11d2ec341c0a28273db134d756549ae
|
| 3 |
+
size 3221024
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d16b07da2f42dfbeeef2c4c54c69e8ebbac637f87a30aca0e3c7ae3f0e1c4ca
|
| 3 |
+
size 2202544
|
3basilanalgorithmicframeworkforsparsepluslowrankcompressionofllms/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c218beb2b11d2005b1b386d198dbe164f4668d97b291f2a0a9bf33f6d79f40c
|
| 3 |
+
size 1021916
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fa5b1cef48a46db88169183d2389e8d16fe74d9420fa220e0a567aefdc4de61
|
| 3 |
+
size 187183
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5129725699300765800e9f945c5754c38cf4d74fe39bfedc7e255a2887ab6a4b
|
| 3 |
+
size 231480
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/b7ddcf35-7b87-4e57-a1da-f034ad401c0a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed01c2a9d57383723102b819e0f9fafa91d36b3bd9e7409600a450eae54f477c
|
| 3 |
+
size 6877630
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e52ee751e18077caab8d9476f535e4dbacf0b543c9630c0fa3f2c9f98e84753b
|
| 3 |
+
size 1189225
|
3dagentatrimodalmultiagentresponsiveframeworkforcomprehensive3dobjectannotation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf0abe0662873ae56bdb3747c7ba8bc49ef22b451cc7155461dcac495e23eb81
|
| 3 |
+
size 916717
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c279e62b70c3b799236b2ebf392ee64cc268466fef9cc36bc772a4bc9203c617
|
| 3 |
+
size 192474
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:103175d9ca927c6984af2439d9bbd1372a547de15a411a89121580ca86c6d98f
|
| 3 |
+
size 247750
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/20f2c39d-2687-48ce-8af6-bc79fc25f92b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd7ca908b1fd64466657c1f87878e8d5b148f034bf0c76675cc8f4c6b9df824e
|
| 3 |
+
size 12577097
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0771d8c00841f66d2692ff38164a388d62b5d528e1d31ece97c1588b8e87fa84
|
| 3 |
+
size 1136145
|
3dequivariantvisuomotorpolicylearningviasphericalprojection/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d07b97db6f54fff6e074e5df15c779fd8c7ab89047b2e93c1014341a5b05234
|
| 3 |
+
size 1077550
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b0286c5aaf1a6516249b7dac357597494b4a714644fcb4dbe7b1a2777a381db
|
| 3 |
+
size 132205
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:133b7ad374dc6ae11ae11415500aa567c648e0c0eea38c65b4d6fdc4ac000438
|
| 3 |
+
size 169572
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/bfec7c41-0681-41c8-8491-f468a3e77d73_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2cb282626c4d59d52976d54fdd60c96beca38aa72dcfb412a392c295e1b99b4
|
| 3 |
+
size 40052412
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/full.md
ADDED
|
@@ -0,0 +1,619 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Gaussian Flats: Hybrid 2D/3D Photometric Scene Reconstruction
|
| 2 |
+
|
| 3 |
+
# Maria Taktasheva
|
| 4 |
+
|
| 5 |
+
Simon Fraser University
|
| 6 |
+
maria_taktasheva@sfu.ca
|
| 7 |
+
|
| 8 |
+
# Lily Goli*
|
| 9 |
+
|
| 10 |
+
University of Toronto
|
| 11 |
+
|
| 12 |
+
# Alessandro Fiorini
|
| 13 |
+
|
| 14 |
+
University of Bologna
|
| 15 |
+
|
| 16 |
+
# Zhen Li
|
| 17 |
+
|
| 18 |
+
Simon Fraser University
|
| 19 |
+
|
| 20 |
+
# Daniel Rebain
|
| 21 |
+
|
| 22 |
+
University of British Columbia
|
| 23 |
+
|
| 24 |
+
# Andrea Tagliasacchi*
|
| 25 |
+
|
| 26 |
+
Simon Fraser University
|
| 27 |
+
|
| 28 |
+
University of Toronto
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
Faux render of 3D Gaussian Flats
|
| 32 |
+
Figure 1: Teaser - We introduce 3D Gaussian Flats, a hybrid representation of 2D Gaussians on semantically distinct planar surfaces and 3D Gaussians elsewhere (left). Our method achieves a photorealistic quality on par with fully 3D approaches, while improving geometry over surface reconstruction methods (right) e.g. no visible hole in the middle of the 'garden' scene from Mip-NeRF360 [1].
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
Ground Truth
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Ours
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
Ours
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
3DGS-MCMC
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
2DGS
|
| 48 |
+
3DGS
|
| 49 |
+
MCMC
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
2DGS
|
| 53 |
+
|
| 54 |
+
# Abstract
|
| 55 |
+
|
| 56 |
+
Recent advances in radiance fields and novel view synthesis enable creation of realistic digital twins from photographs. However, current methods struggle with flat, texture-less surfaces, creating uneven and semi-transparent reconstructions, due to an ill-conditioned photometric reconstruction objective. Surface reconstruction methods solve this issue but sacrifice visual quality. We propose a novel hybrid 2D/3D representation that jointly optimizes constrained planar (2D) Gaussians for modeling flat surfaces and freeform (3D) Gaussians for the rest of the scene. Our end-to-end approach dynamically detects and refines planar regions, improving both visual fidelity and geometric accuracy. It achieves state-of-the-art depth estimation on ScanNet++ and ScanNetv2, and excels at mesh extraction without overfitting to a specific camera model, showing its effectiveness in producing high-quality reconstruction of indoor scenes.
|
| 57 |
+
|
| 58 |
+
# 1 Introduction
|
| 59 |
+
|
| 60 |
+
Recent advances in radiance fields and novel view synthesis have enabled the creation of realistic digital twins from collections of real-world photographs [2, 3]. These techniques allow for high-fidelity 3D reconstructions that capture intricate details of real-world scenes, making them invaluable for applications in virtual reality, gaming, cultural heritage preservation, and scientific visualization.
|
| 61 |
+
|
| 62 |
+
However, when optimizing for novel view synthesis on flat and texture-less surfaces (e.g. walls, ceilings, tables that are prevalent in indoor scenes), current methods struggle in producing a faithful 3D reconstruction as the problem is photometrically under-constrained [4]. Specifically, modern novel view synthesis frameworks like [5, 6], which are optimized via volume rendering, model flat surfaces with low densities, resulting in non-opaque representations of solid surfaces; see the surface of the table in Figure 1 as an example. Conversely, surface reconstruction methods that assume solid, flat surfaces avoid this limitation [7]. However, they compromise visual quality in favor of a more parsimonious 3D reconstruction; see figure 1. Our core research question is whether these seemingly conflicting objectives could be achieved simultaneously.
|
| 63 |
+
|
| 64 |
+
Some approaches have attempted to answer this questions by first creating a full 3D representation, and then - post-training - detecting planar surfaces to enable 3D planar reconstruction [8, 9]. However, these methods do not leverage planar assumptions during the optimization of the scene representation itself, limiting their effectiveness. Others enforce planar assumptions during training through various regularizer losses [10]. However, these losses can be hard to tune, as they are only suitable for the portion of the scene that is solid and flat, hindering the reconstruction whenever these assumptions are violated.
|
| 65 |
+
|
| 66 |
+
In contrast to these methods, we propose to look at the problem in an end-to-end fashion, conjoining the process of photometric to the one of planar surface reconstruction. To achieve this, we introduce a hybrid 2D/3D representation, where flat surfaces are modeled with 2D Gaussian splats [7] that are confined to 2D planes, while the remaining of the scene is modeled with a classical, and more expressive, 3DGS model [6]. By jointly optimizing planar (2D) and freeform (3D) Gaussians, our approach enables better fitting of the final representation to planar surfaces within the scene. During photometric optimization, our method dynamically detects planar regions, and adaptively grows their extent, resulting in reconstruction that retains high visual quality (as measured by PSNR) compared to a classical 3DGS scene, while simultaneously achieving superior geometric accuracy (as measured by depth error).
|
| 67 |
+
|
| 68 |
+
Our evaluations demonstrate that this hybrid representation achieves state-of-the-art depth estimation results on challenging indoor datasets including the new ScanNet++ dataset which was designed for dense reconstruction tasks using NeRF-based approaches, and the legacy ScanNetv2 dataset with sparser camera views. Our method delivers crisp reconstructed surfaces, while maintaining competitive visual quality compared to fully 3D representations. Beyond novel view synthesis, our approach has application in mesh extraction for planar surfaces, producing high-quality meshes and accurate mesh segmentation results across diverse capture setups (DSLR and iPhone captures), without the overfitting issues that negatively affect previous methods trained on specific camera models.
|
| 69 |
+
|
| 70 |
+
# 2 Related Work
|
| 71 |
+
|
| 72 |
+
Modern neural scene reconstruction methods aim to generate high-quality 3D representations from 2D images for applications like novel view synthesis [5, 6]. Despite significant progress, volumetric approaches struggle to accurately reconstruct planar surfaces [11], while surface reconstruction methods fail to recover volumetric effects [12]. Finding an approach that accurately reconstructs planar geometry without compromising the quality of the surrounding scene geometry and appearance is a key challenge.
|
| 73 |
+
|
| 74 |
+
Representations for differentiable rendering Neural Radiance Field (NeRF) [5] pioneered scene reconstruction with a 3D neural representation optimized through differentiable volumetric rendering. 3D Gaussian Splatting (3DGS) [6] overcame NeRF slow training/rendering speed by representing scenes as efficiently rasterizable 3D Gaussians, dramatically accelerating rendering while maintaining quality. The impressive speed-quality balance of 3DGS quickly established it as a standard approach,
|
| 75 |
+
|
| 76 |
+
with recent advancements such as 3DGS-MCMC [13] further enhancing its accessibility by eliminating the dependency on SfM initialization. Despite these innovations, volumetric representations still struggle with clean geometry reconstruction in flat and textureless surfaces common in indoor environments, hindering applications like mesh extraction. Our method addresses these challenges through a hybrid 2D/3D Gaussian representation that achieves superior geometric reconstruction while preserving rendering quality.
|
| 77 |
+
|
| 78 |
+
Surface representations and planar constraints While NeRF [5] and 3DGS [6] employ fully volumetric representations, alternative approaches such as [11, 14] model scenes as solid surfaces. This philosophy inspired SuGaR [15], to use a regularization term that encourages the Gaussians to align with the surface of the scene, and later 2DGS [7], which uses 2D Gaussian primitives to reconstruct surfaces outperforming prior surface reconstruction methods [11, 14, 15]. Recent work [16] uses 2D Gaussians as in 2DGS, with multi-view depth and normal regularization to improve surface quality, while RaDe-GS [17] enables depth and normal rasterization for 3D Gaussians to support similar regularization. Other works introduced more explicit primitives, including planes [18, 19], estimizable geometry through learnable opacity maps [20], and soup of planes for dynamic reconstruction [21]. While these methods excel at representing flat surfaces with clean geometry, they typically sacrifice rendering quality and struggle to model phenomena that are better explained by volumetric effects, rather than surfaces. Some methods enforce planar constraints only as regularization losses, such as Guo et al. [22] that uses Manhattan world assumptions on semantically segmented regions and Chen et al. [23] that enforces plane normal consistency in textureless regions. Although helpful, regularizers can be difficult to tune. Our approach instead explicitly detects and optimizes planes within scene reconstruction, avoiding such issues.
|
| 79 |
+
|
| 80 |
+
3D plane detection and reconstruction Another research direction detects planar surfaces in an initial 3D reconstruction and fits planes only to detected regions, extending single-image plane detection [24, 25] to multi-view settings. PlanarNeRF [26] adds a plane-predicting MLP branch to NeRF, supervised via ground truth labels or plane detection consistency across frames, but prevents plane MLP gradients from affecting the geometry prediction branch. PlanarRecon [8] reconstructs a sparse feature volume, which is decoded into plane features and clustered. AirPlanes [9] and NeuralPlane [27] build 3D-consistent plane embeddings per 3D point, emphasizing semantic priors for accurate detection. While we also use semantic knowledge, our method jointly detects and optimizes planes alongside scene reconstruction, allowing geometry to benefit from planar constraints. Further, unlike these methods, our approach yields full scene reconstructions suitable for novel view synthesis, vs. a coarse surface reconstruction.
|
| 81 |
+
|
| 82 |
+
Hybrid representations Recent hybrid 2D-3D approaches have explored planar surface representation. Kim and Lim [28] integrate meshes into 3DGS for indoor scenes, using SAM [29] to detect planar surfaces and represent them with meshes while employing 3D Gaussians for other objects. Zanjani et al. [30] combine SAM segmentation with normal estimation to lift 2D plane descriptors to 3D, clustering the planar Gaussians using a tree structure. In contrast, our method offers a simpler solution by representing the scene with a mixture of 2D and 3D Gaussians. This design remains fully compatible with the 3DGS rendering pipeline, eliminating the need for complex hybrid mesh handling, or hierarchical tree structures.
|
| 83 |
+
|
| 84 |
+
# 3 Method
|
| 85 |
+
|
| 86 |
+
Given $N$ posed images $\{I_c\}$ and $M$ planar surfaces $\{P_p\}$ , each specified by binary image masks $\{\mathcal{M}_{p,c}\}$ , we aim to reconstruct a hybrid novel view synthesis method that combines a classical 3DGS model with a 2D piecewise planar representation of the scene. Our goal is to reconstruct the scene so that the planar surfaces are accurately recovered and compactly represented by 2D Gaussian primitives, while the rest of the scene is modeled with 3D Gaussians, with the key objective of avoiding the artifacts that can typically be seen when using 3D primitives to model planar surfaces; see Figure 1.
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
Figure 2: Overview - Training of our model is split into two parts: warm-up, in which 3D Gaussians are trained as in [6] using a photometric loss; and planar training, in which 3D Gaussians and planar Gaussians are trained along with the parameters of the planes to which planar Gaussians are locked. Planar training is performed in alternating phases, with Gaussian parameters frozen while plane parameters are optimized, and vice versa. Legend: learnable (warm up), learnable (Gaussian phase), learnable (plane phase).
|
| 90 |
+
|
| 91 |
+
# 3.1 Hybrid representation
|
| 92 |
+
|
| 93 |
+
Our representation consists of $M$ planes $\mathcal{P} = \{P_p\}$ , each characterized by its 3D origin and normal $(\mathbf{o}_p, \mathbf{n}_p)$ . The geometry of each such plane $P_p$ is represented through a set of 2D Gaussians $\mathcal{G} = \{\mathbf{g}_k\}_{k=1}^{K_k}$ such that,
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathbf {g} _ {k} = \mathcal {N} \left(\mu_ {k}, \Sigma_ {k}\right), \quad \mu_ {k} \in P _ {k}, \quad \Sigma_ {k} \in \mathbb {R} ^ {2 \times 2}. \tag {1}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Here, $\mu_{k}$ is the center of the $k$ -th Gaussian on the plane $P_{p}$ , and $\Sigma_{k}$ is the 2D covariance matrix, parametrized with a 2D in-plane rotation $\mathbf{R}_{k}$ and a 2D diagonal scale matrix $\mathbf{S}_{k}$ . The plane-to-world transformation matrix is defined as $\mathbf{T}_{\mathrm{pw}} = \mathrm{hom}(\mathbf{R},\mathbf{o})$ , where $\mathbf{R}$ is any rotation matrix that satisfies $\hat{z} = \mathbf{Rn}$ with $\hat{z}$ being the unit vector along the z-axis in the world frame. Thus, the degrees of freedom of planar Gaussians can be mapped to world coordinates through the rigid transformation:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\bar {\mu} _ {k} = \mathbf {T} _ {\mathrm {p w}} [ \mu_ {k}; 0; 1 ], \quad \bar {\Sigma} _ {k} = \mathbf {T} _ {\mathrm {p w}} \operatorname {d i a g} (\Sigma_ {k}, 1, 1) \mathbf {T} _ {\mathrm {p w}} ^ {\top} \tag {2}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
yielding a standard 3D Gaussian primitive representation suitable for rendering. The remaining scene geometry is represented by unconstrained 3D Gaussians $\bar{\mathcal{G}} = \{\bar{\mathbf{g}}_k\}_{k=1}^{\bar{K}}$ :
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\bar {\mathbf {g}} _ {k} = \mathcal {N} (\bar {\mu} _ {k}, \bar {\Sigma} _ {k}), \quad \mu_ {k} \in \mathbb {R} ^ {3}, \quad \Sigma_ {k} \in \mathbb {R} ^ {3 \times 3} \tag {3}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
All Gaussians have view-dependent colors $\mathbf{c}$ represented as Spherical Harmonics, and opacity $\alpha$ as in vanilla 3DGS. To reconstruct the scene with our hybrid representation, we need to optimize the degrees of freedom of planes $\mathcal{P}$ , 2D planar Gaussians $\mathcal{G}$ , and 3D freeform Gaussians $\bar{\mathcal{G}}$ . We begin our optimization with a warm-up stage using only 3D Gaussians (for N=3500 iterations). After that, we begin our planar reconstruction where in each round of optimization we: (i) dynamically initialize plane parameters by robustly fitting planes to the current representation (section 3.2); (ii) alternate between optimizing plane and Gaussian parameters (section 3.2); (iii) densify our representation through a (slightly modified) MCMC densification, due to the challenges of optimizing compact-support functions (section 3.4).
|
| 112 |
+
|
| 113 |
+
# 3.2 Plane initialization
|
| 114 |
+
|
| 115 |
+
For compactness of notation, let us drop our indices, and consider the binary mask $\mathcal{M} \gets \mathcal{M}_{c,p}$ for the $p$ -th planar surface in the $c$ -th view, and denote with $\pi$ the function that projects a 3D point to the coordinate frame of the $n$ -th image. We start by selecting all the Gaussians (i) whose mean projects into the mask, (ii) that are sufficiently opaque, and (iii) that lie within a shell of the expected ray termination of the $n$ -th image:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\tilde {\mathcal {G}} = \left\{\bar {\mathbf {g}} _ {k} \mid \pi (\bar {\mu} _ {k}) \in \mathcal {M}, \alpha_ {k} > \kappa , | D (\pi (\bar {\mu} _ {k})) - d _ {k} | < \delta \right\}, \tag {4}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
where the thresholds $\alpha_{\mathrm{th}} = 0.1$ and $d_{\mathrm{th}} = 0.05$ are hyper-parameters that control this selection process, and where $D$ is the expected ray termination map (i.e. depth map), and $d_{k}$ is the depth of the
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Figure 3: Planar Relocation - A freeform Gaussian (teal) gets relocated to the plane to become a planar Gaussian (brown), when both its distance to the plane $(d_{\perp})$ and along $(d_{\parallel})$ the plane are small.
|
| 125 |
+
|
| 126 |
+
Gaussians. We then extract a candidate plane $P$ by RANSAC optimization on the point cloud that samples the Gaussians:
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
P, \mathcal {I} = \operatorname {R A N S A C} \left(\left\{x \sim \bar {\mathbf {g}} \mid \bar {\mathbf {g}} \in \tilde {\mathcal {G}} \right\}, \epsilon\right) \tag {5}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
where we accept $P$ as a viable plane candidate only whenever the mean inlier residual is lower than $\epsilon$ . The set $\mathcal{I}$ includes the indexes of Gaussians in $\tilde{\mathcal{G}}$ that are inliers of the RANSAC process. We further discard planes that are too small with set $\mathcal{I}$ having a smaller size than 100. Once a plane corresponding to $\mathcal{M}$ has been accepted, all the semantic masks for that plane $p$ are excluded from subsequent RANSAC runs. The plane initialization process is repeated for remaining masks, after each completed round of plane and Gaussian optimization, as described in Section 3.3.
|
| 133 |
+
|
| 134 |
+
Snapping We then remove the discovered inliers from the set of 3D Gaussians $\bar{\mathcal{G}}\gets \bar{\mathcal{G}}\setminus \mathcal{I}$ , and add them to our set of 2D Gaussians $\mathcal{G}\gets \mathcal{G}\cup \mathcal{I}$ . During the latter operation, we clip 3D Gaussians to 2D to become planar by transforming to the local plane coordinates, and set the third component of their means and scales to zero. Further, only rotation about the $z$ -axis in local plane coordinates is preserved
|
| 135 |
+
|
| 136 |
+
Active set update If the accepted plane $\mathcal{P}_i$ has an angular distance below a threshold to an already existing plane, while its origin $\mathbf{o}_i$ also has a small Euclidean distance to the closest Gaussian center on that plane, we merge the two planes. Otherwise, the plane is added as a new plane to the active set of planes $\mathcal{P}$ . In merging, we assign the new plane's Gaussians to the previously found one. This allows our optimization to merge planar areas that have only been partially observed in any view.
|
| 137 |
+
|
| 138 |
+
# 3.3 Optimization
|
| 139 |
+
|
| 140 |
+
We optimize our representation by block-coordinate descent, starting each round of optimization by only optimizing the plane parameters for a fixed number of 10 iterations, and then freezing these, and optimizing the Gaussian parameters (both 2D and 3D) for another 100 iterations. This alternation in optimization is critical to avoid instability; see an ablation in figure 7. In the first optimization block, within each iteration, the parameters of the $p$ -th plane within the $c$ -th image are optimized by the loss:
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\underset {\mathbf {o} _ {p}, \mathbf {n} _ {p}} {\arg \min } = \underbrace {\left\| I _ {c} - \tilde {I} _ {c} \right\| _ {1}} _ {\mathcal {L} _ {\text {p h o t o}}} + \lambda_ {\text {m a s k}} \underbrace {\left\| \mathcal {M} _ {c , p} - \tilde {\mathcal {M}} _ {c , p} \right\| _ {1}} _ {\mathcal {L} _ {\text {m a s k}}}, \tag {6}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $\tilde{\mathcal{M}}$ is the predicted plane mask, obtained by rendering the mixture of Gaussians with binarized color (white for planar, and black for 3D), and alpha blended using the original Gaussian opacities during the rasterization. In the second optimization block, we optimize all Gaussian parameters jointly:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\underset {\mathcal {G}, \bar {\mathcal {G}}} {\arg \min } \mathcal {L} _ {\text {p h o t o}} + \lambda_ {\text {m a s k}} \mathcal {L} _ {\text {m a s k}} + \lambda_ {\text {T V}} \mathcal {L} _ {\text {T V}} + \lambda_ {\text {s c a l e}} \mathcal {L} _ {\text {s c a l e}} + \lambda_ {\text {o p a c i t y}} \mathcal {L} _ {\text {o p a c i t y}}, \tag {7}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
where $\mathcal{L}_{\mathrm{TV}}$ is the total depth variation regularization from Niemeyer et al. [10], $\mathcal{L}_{\mathrm{scale}}$ is the scale regularizer and $\mathcal{L}_{\mathrm{opacity}}$ is the opacity regularizer from Kheradmand et al. [13] that vanishes the size of Gaussians that are unconstrained by the photometric loss. Note that planar Gaussians move rigidly during plane optimization (6), and move locally in the plane during Gaussian optimization (7), as only their 2D in-plane parameters are optimized.
|
| 153 |
+
|
| 154 |
+
# 3.4 Planar relocation
|
| 155 |
+
|
| 156 |
+
We follow 3DGS-MCMC [13] in our training dynamics. For densification of planes, we rely on relocating low-opacity Gaussians to locations of dense high-opacity Gaussians, as this allows
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 4: Novel View Synthesis - Quantitative and qualitative results show significant improvement in predicted depth compared to previous work, while maintaining comparable rendering quality to the full 3D representations.
|
| 160 |
+
|
| 161 |
+
transferring between 3D and 2D/planar Gaussians. However, the number of Gaussians on planes, especially when the plane has weak texture, is usually low, leading to a slow densification rate for planes / planar Gaussians. To address this issue, whenever a freeform Gaussian projects into the current mask $\pi (\bar{\mu}_k)\in \mathcal{M}$ , and it is sufficiently close to the currently reconstruction, we stochastically re-locate it to the plane. To measure distance, we identify the 2D Gaussian with the smallest Euclidean distance to $\bar{\mu}_k$ , and measure its distance in the direction of the plane normal $d_{\perp}$ , and the one along the plane $d_{||}$ ; see Figure 3. We stochastically relocate this if both distances are sufficiently small, as expressed by the following Bernoulli distribution:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
p \sim \mathcal {B} (\beta), \beta = \left[ 1 - \Phi \left(\frac {d _ {\perp}}{\sigma_ {\perp}}\right) \right] \cdot \left[ 1 - \Phi \left(\frac {d _ {\parallel}}{\sigma_ {\parallel}}\right) \right], \tag {8}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
where $\Phi$ is the cumulative distribution function of a Gaussian, and $\sigma_{\perp}$ and $\sigma_{\parallel}$ are hyper-parameters that control the stochastic re-location.
|
| 168 |
+
|
| 169 |
+
# 4 Results
|
| 170 |
+
|
| 171 |
+
We validate our proposed method for scene reconstruction through the novel view synthesis task on common indoor scene datasets, assessing both rendered image and depth quality metrics (section 4.1). We then show an application of our method to mesh extraction for planar surfaces (section 4.2). Finally, we validate our design choices through an ablation study on different aspects of the method (section 4.3). We provide our implementation details in the supplementary material.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 5: Novel View Synthesis on ScanNetv2 - Our method outperforms baselines in image and depth quality on ScanNetv2 despite sparse camera views.
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Metric</td><td>3DGS-MCMC</td><td>2DGS</td><td>Ours</td></tr><tr><td>RMSE↓</td><td>0.46</td><td>0.60</td><td>0.40</td></tr><tr><td>MAE↓</td><td>0.37</td><td>0.44</td><td>0.31</td></tr><tr><td>AbsRel↓</td><td>0.19</td><td>0.23</td><td>0.16</td></tr><tr><td>δ < 1.25 ↑</td><td>0.61</td><td>0.63</td><td>0.70</td></tr><tr><td>δ < 1.25² ↑</td><td>0.87</td><td>0.77</td><td>0.90</td></tr><tr><td>δ < 1.25³ ↑</td><td>0.95</td><td>0.83</td><td>0.97</td></tr><tr><td>PSNR↑</td><td>20.18</td><td>21.44</td><td>21.75</td></tr><tr><td>LPIPS↓</td><td>0.29</td><td>0.30</td><td>0.27</td></tr><tr><td>SSIM↑</td><td>0.83</td><td>0.85</td><td>0.86</td></tr><tr><td># primitives</td><td>500K</td><td>809K</td><td>500K</td></tr><tr><td>(% planar)</td><td></td><td></td><td>(17.6%)</td></tr></table>
|
| 177 |
+
|
| 178 |
+
# 4.1 Novel View Synthesis - Figures 4 and 5
|
| 179 |
+
|
| 180 |
+
We evaluate our hybrid representation's novel view synthesis on common indoor scene reconstruction benchmarks and provide comparisons with both state-of-the-art fully 3D representations and 2D surface reconstruction approaches. We show a significant improvement in the reconstructed surface geometry while maintaining high visual quality.
|
| 181 |
+
|
| 182 |
+
Datasets We perform evaluations on common indoor scene benchmarks ScanNet++[31] and ScanNetv2[32], as they primarily feature indoor scenes with flat textureless surfaces suitable for the task at hand. ScanNet++ provides dense scenes with SfM camera poses and sparse point clouds, designed primarily for 3D reconstruction approaches that follow the NeRF [5] paradigm. Conversely, the legacy version of ScanNet i.e. ScanNetv2 offers sparser views without SfM information. Our method works with or without initial sparse point clouds, enabling reconstruction initialized with sparse SfM point cloud on ScanNet++ and experiments with randomly initialized point clouds on ScanNetv2. For ScanNet++, we use 11 training scenes with ground truth meshes for depth derivation, utilizing iPhone video streams, sampling every 10th frame for training at $2 \times$ downsampling and every 8th for testing. We chose the scenes that are diverse in their content and contain various planar surfaces. For ScanNet, we evaluate on 5 scenes with sufficient overlapping views of planar surfaces following the data preparation scheme of [27]. The 2D plane masks were generated using PlaneRecNet [25] and propagated through the image sequence with SAMv2 video processor [29].
|
| 183 |
+
|
| 184 |
+
Baselines We compare against SOTA reconstruction methods, both fully 3D representations and 2D surface reconstruction methods. For 3D representations, we compare with vanilla 3DGS [6], and 3DGS-MCMC [13] as it is more robust version to random initializations, and has higher rendering quality. Within photometric surface reconstruction methods, we compare to 2DGS [7] as a widely used state-of-the-art, as well as to PGSR [16] and RaDe-GS [17], which more recently report improved depth quality. All methods are trained for 30K iterations.
|
| 185 |
+
|
| 186 |
+
Metrics We use the common image quality metrics PSNR, SSIM and LPIPS for evaluating the rendered RGB. Further, we choose depth as a strong indicator for the quality of the reconstructed surface geometry. We provide depth quality metrics by computing the rendered depth as the expected ray termination at each pixel. We report RMSE, MAE and average absolute error relative to ground truth depth (AbsRel). Additionally, we provide depth accuracy percentage at different error thresholds similar to [33]. The metrics are computed only on the defined portion of the ground-truth depths. We further report the total number of primitives in our model and the percentage that are planar (and thus can be represented more compactly).
|
| 187 |
+
|
| 188 |
+
Analysis Quantitative and qualitative results across both datasets show significant improvement in depth accuracy compared to all baselines. Notably, our method achieves comparable image quality to SOTA 3D representations on dense ScanNet++ scenes while surpassing them in depth quality, evidenced by sharper geometry reconstruction in qualitative examples. The slight PSNR difference with 3D methods reflects a trade-off: our constrained geometry enforces correct structure, while unconstrained methods can inflate PSNR by fitting view-dependent effects with incorrect geometry.
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
Figure 6: Mesh Extraction - Our method shows consistent results across iPhone and DSLR captures, while baselines typically overfit to one camera type. Qualitatively, our approach extracts complete meshes for most target planes with fewer inaccurate plane detections (shown in gray) compared to baselines. Target planes are shown with distinct colors on the ground truth.
|
| 192 |
+
|
| 193 |
+
In the sparser ScanNetv2 scenes, our approach delivers superior performance in both depth and image quality, leveraging the planar prior of indoor environments to overcome the geometric ambiguity that challenges pure 3D methods in sparse captures. Our method also substantially outperforms 2DGS in both image fidelity and depth accuracy metrics.
|
| 194 |
+
|
| 195 |
+
# 4.2 Mesh Extraction - figure 6
|
| 196 |
+
|
| 197 |
+
Our method enables mesh extraction from reconstructed 3D planar surfaces. For each plane, we un-project all 2D segmentation masks to 3D by computing ray-plane intersections, yielding a point cloud. This point cloud is downsampled using fixed-size voxels and rasterized onto plane coordinates to create an occupancy grid. We then use Marching Squares for contour extraction (We omit small contours with less than 100 points), followed by ear-clipping triangulation to produce the final mesh. We evaluate the quality of the retrieved mesh for the planar surfaces and compare our method to planar reconstruction methods.
|
| 198 |
+
|
| 199 |
+
Datasets We use ScanNet++ to extract planar surface meshes. We show results both on the subset of this dataset captured by iPhone and also the DSLR subset, showing that our method can handle different camera models, while previous methods usually overfit to one modality. For ground truth, we follow the approach of Watson et al. [9] to obtain a ground truth planar mesh. We then only consider the subset of planes in the ground truth mesh that we have annotated segmentation masks for each scene. We provide details on selecting these planes in Appendix E.
|
| 200 |
+
|
| 201 |
+
Baselines We compare against previous planar reconstruction methods AirPlanes [9] and PlanarRecon [8] that provide extracted planar mesh as output of their methods. We follow the same evaluation setting as in the original papers on the iPhone subset of the dataset. For DSLR images, we crop the images to the specified FoV in each baseline to match their training distribution.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 7: Ablation on design choices - Loss components and optimization strategy are critical, with simultaneous plane-Gaussian optimization causing significant drops. 2D Gaussian snapping greatly improves depth accuracy compared to regularization alternatives. Similarly, Gaussian relocation is essential.
|
| 205 |
+
|
| 206 |
+
Metrics We report mesh accuracy metrics including accuracy, precision, recall, completeness and Chamfer distance as defined in Ye et al. [27]. We also provide mesh segmentation metrics that evaluate how well detected plane segments match ground truth segments following [9].
|
| 207 |
+
|
| 208 |
+
Analysis Our method outperforms the baselines on DSLR images subset of the dataset. Unlike previous methods that are trained on specific modalities (i.e. phone camera) and struggle to transfer to different camera models (i.e. DSLR camera), our approach maintains consistent mesh quality due to having zero-shot mesh extraction on test scenes through photometric reconstruction. Additionally, our method outperforms PlanarRecon on iPhone data, while having competitive performance to AirPlanes. Qualitative results reveal that both PlanarRecon and AirPlanes extract extraneous planes with numerous random small fragments, resulting in unsightly and impractical meshes. In contrast, our method produces clean planar surfaces, yielding a more coherent and usable reconstruction.
|
| 209 |
+
|
| 210 |
+
# 4.3 Ablation - Figure 7
|
| 211 |
+
|
| 212 |
+
We ablate our design choices and additionally test our method's robustness to random point cloud initialization (in table 1).
|
| 213 |
+
|
| 214 |
+
Loss design We ablate the effect of $\mathcal{L}_{mask}$ and $\mathcal{L}_{TV}$ . Although removing these losses reduces the image quality by some margin, it affects depth quality more significantly. Qualitative rendering shows that $\mathcal{L}_{mask}$ contributes significantly to detecting and growing 2D Gaussians.
|
| 215 |
+
|
| 216 |
+
**Optimization design** Our method is based on optimizing Gaussians and plane parameters together in an alternating fashion. We show that fixing plane parameters with no optimization degrades our results both quantitatively and qualitatively. Simultaneous joint optimization of Gaussians and planes also affects the results negatively. In Figure 7, note how the floor plane gets stuck above the ground level, as revealed by its intersection with the bin.
|
| 217 |
+
|
| 218 |
+
2D Gaussian design Using hybrid 2D/3D Gaussians is one of the main components of our design. Therefore, we ablate the necessity of having 2D Gaussians by disabling snapping as described in Section 3.2. This shows a significant drop in depth accuracy, which is also evident in qualitative results. As an alternative to snapping, we can regularize the smallest scale component in planar Gaussians. However, we find that this approach is difficult to tune and provides suboptimal results. Finally, we ablate our densification process with relocation of Gaussians to planes. Without relocation, planes are not fully detected, with the planar Gaussians comprising the plane maintaining low opacity. Furthermore, some of the Gaussians remain close to the plane while not being detected as belonging to that plane.
|
| 219 |
+
|
| 220 |
+
# 5 Conclusions
|
| 221 |
+
|
| 222 |
+
We introduce 3D Gaussian Flats, a hybrid 2D/3D Gaussian representation that accurately models planar surfaces without sacrificing rendering quality. Our method jointly optimizes 2D Gaussians constrained to planar surfaces alongside free-form Gaussians for the remaining scene. By leveraging semantic segmentation masks, we predict both a full 3D representation and semantically distinct planes for planar mesh extraction in indoor scenes. Our approach achieves state-of-the-art depth estimation on indoor scene benchmarks while maintaining high image quality. Additionally, our planar mesh extraction method generalizes across different camera models, overcoming domain gap limitations that typically cause previous methods to fail.
|
| 223 |
+
|
| 224 |
+
**Limitations** Our reliance on initial 3DGS reconstruction often generates insufficient Gaussians in flat areas with no texture, although this potentially can be addressed via more adaptive densification strategies. Further, using a weak spherical harmonics appearance model still leads to building extra geometry to compensate for view-dependent effects, which a stronger appearance model would resolve. Additionally, we depend on 2D semantic masks from SAMv2 that may contain errors, but our method will naturally improve alongside advances in semantic segmentation. Finally, our RANSAC-based approach, while robust, introduces computational overhead that extends training time. We believe our hybrid representation opens exciting new avenues for research into more efficient approaches that balance geometric precision with visual fidelity.
|
| 225 |
+
|
| 226 |
+
# References
|
| 227 |
+
|
| 228 |
+
[1] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. CVPR, 2022. URL https://github.com/google-research/multinerf.1
|
| 229 |
+
[2] A. Tewari, J. Thies, B. Mildenhall, P. Srinivasan, E. Tretschk, W. Yifan, C. Lassner, V. Sitzmann, R. Martin-Brualla, S. Lombardi, T. Simon, C. Theobalt, M. Nießner, J. T. Barron, G. Wetzstein, M. Zollhöfer, and V. Golyanik. Advances in neural rendering. Computer Graphics Forum, 2022. 2
|
| 230 |
+
[3] Guikun Chen and Wenguan Wang. A survey on 3d gaussian splatting. arXiv preprint arXiv:2401.03890, 2025. 2
|
| 231 |
+
[4] Lily Goli, Cody Reading, Silvia Sellán, Alec Jacobson, and Andrea Tagliasacchi. Bayes' Rays: Uncertainty quantification in neural radiance fields. CVPR, 2024. URL https://github.com/BayesRays/BayesRays.2
|
| 232 |
+
[5] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. URL https://github.com/bmild/nerf.2,3,7
|
| 233 |
+
[6] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 2023. URL https://github.com/graphdeco-inria/gaussian-splatting.2,3,4,6,7
|
| 234 |
+
[7] Binbin Huang, Zehao Yu, Anpei Chen, Andreas Geiger, and Shenghua Gao. 2d gaussian splatting for geometrically accurate radiance fields. In SIGGRAPH, 2024. URL https://github.com/hbb1/2d-gaussian-splatting. 2, 3, 6, 7, 1
|
| 235 |
+
[8] Yiming Xie, Matheus Gadelha, Fengting Yang, Xiaowei Zhou, and Huaizu Jiang. Planarrecon: Real-time 3d plane detection and reconstruction from posed monocular videos. In CVPR, 2022. URL https://github.com/neu-vi/PlanarRecon.2,3,8,6
|
| 236 |
+
[9] Jamie Watson, Filippo Aleotti, Mohamed Sayed, Zawar Qureshi, Oisin Mac Aodha, Gabriel Brostow, Michael Firman, and Sara Vicente. Airplanes: Accurate plane estimation via 3d-consistent embeddings. In CVPR, 2024. URL https://github.com/nianticlabs/airplanes.2,3,8,9,6
|
| 237 |
+
|
| 238 |
+
[10] Michael Niemeyer, Jonathan T. Barron, Ben Mildenhall, Mehdi S. M. Sajjadi, Andreas Geiger, and Noha Radwan. Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In CVPR, 2022. URL https://github.com/google-research/google-research/tree/master/regnerf.2,5,7
|
| 239 |
+
[11] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. NeurIPS, 2021. URL https://github.com/Totoro97/NeuS.2,3
|
| 240 |
+
[12] Zian Wang, Tianchang Shen, Merlin Nimier-David, Nicholas Sharp, Jun Gao, Alexander Keller, Sanja Fidler, Thomas Müller, and Zan Gojcic. Adaptive shells for efficient neural radiance field rendering. ACM TOG., 2023. 2
|
| 241 |
+
[13] Shakiba Kheradmand, Daniel Rebain, Gopal Sharma, Weiwei Sun, Jeff Tseng, Hossam Isack, Abhishek Kar, Andrea Tagliasacchi, and Kwang Moo Yi. 3d gaussian splatting as markov chain monte carlo. In NeurIPS, 2024. URL https://github.com/ubc-vision/3dgs-mcmc. 3, 5, 6, 7, 1
|
| 242 |
+
[14] Zhaoshuo Li, Thomas Müller, Alex Evans, Russell H Taylor, Mathias Unberath, Ming-Yu Liu, and Chen-Hsuan Lin. Neuralangelo: High-fidelity neural surface reconstruction. In CVPR, 2023. URL https://github.com/NVlabs/neuralangelo.3
|
| 243 |
+
[15] Antoine Guédon and Vincent Lepetit. Sugar: Surface-aligned gaussian splatting for efficient 3d mesh reconstruction and high-quality mesh rendering. In CVPR, 2024. URL https://github.com/Aanttwo/SuGaR.3
|
| 244 |
+
[16] Danpeng Chen, Hai Li, Weicai Ye, Yifan Wang, Weijian Xie, Shangjin Zhai, Nan Wang, Haomin Liu, Hujun Bao, and Guofeng Zhang. Pgsr: Planar-based gaussian splatting for efficient and high-fidelity surface reconstruction. IEEE Transactions on Visualization and Computer Graphics, 2024. URL https://github.com/zju3dv/PGSR.3, 6, 7
|
| 245 |
+
[17] Baowen Zhang, Chuan Fang, Rakesh Shrestha, Yixun Liang, Xiaoxiao Long, and Ping Tan. Rade-gs: Rasterizing depth in gaussian splatting. arXiv preprint arXiv:2406.01467, 2024. URL https://github.com/BaowenZ/RaDe-GS.3,6,7
|
| 246 |
+
[18] Zhi-Hao Lin, Wei-Chiu Ma, Hao-Yu Hsu, Yu-Chiang Frank Wang, and Shenlong Wang. Neurmips: Neural mixture of planar experts for view synthesis. In CVPR, 2022. URL https://github.com/chih-hao-lin/neurmips.3
|
| 247 |
+
[19] Bin Tan, Rui Yu, Yujun Shen, and Nan Xue. Planarsplatting: Accurate planar surface reconstruction in 3 minutes. In CVPR, 2025. 3
|
| 248 |
+
[20] David Svitov, Pietro Morerio, Lourdes Agapito, and Alessio Del Bue. Billboard splatting (bb-splat): Learnable textured primitives for novel view synthesis. arXiv preprint arXiv:2411.08508, 2024. URL https://github.com/david-svitov/BBSplat.3
|
| 249 |
+
[21] Yao-Chih Lee, Zhoutong Zhang, Kevin Blackburn-Matzen, Simon Niklaus, Jianming Zhang, Jia-Bin Huang, and Feng Liu. Fast view synthesis of casual videos with soup-of-planes. In ECCV, 2024. 3
|
| 250 |
+
[22] Haoyu Guo, Sida Peng, Haotong Lin, Qianqian Wang, Guofeng Zhang, Hujun Bao, and Xiaowei Zhou. Neural 3d scene reconstruction with the manhattan-world assumption. In CVPR, 2022. URL https://github.com/zju3dv/manhattan_sdf.3
|
| 251 |
+
[23] Zheng Chen, Chen Wang, Yuan-Chen Guo, and Song-Hai Zhang. Structnerf: Neural radiance fields for indoor scenes with structural hints. IEEE TPAMI, 2023. 3
|
| 252 |
+
[24] Chen Liu, Kihwan Kim, Jinwei Gu, Yasutaka Furukawa, and Jan Kautz. Planercnn: 3d plane detection and reconstruction from a single image. In CVPR, 2019. URL https://github.com/NVlabs/planercnn.3
|
| 253 |
+
[25] Yaxu Xie, Fangwen Shu, Jason Rambach, Alain Pagani, and Didier Stricker. Planerecnet: Multi-task learning with cross-task consistency for piece-wise plane detection and reconstruction from a single rgb image. In BMVC, 2021. URL https://github.com/EryiXie/PlaneRecNet.3,7,4,6
|
| 254 |
+
|
| 255 |
+
[26] Zheng Chen, Qingan Yan, Huangying Zhan, Changjiang Cai, Xiangyu Xu, Yuzhong Huang, Weihan Wang, Ziyue Feng, Lantao Liu, and Yi Xu. Planarnerf: Online learning of planar primitives with neural radiance fields. arXiv preprint arXiv:2401.00871, 2023. 3
|
| 256 |
+
[27] Hanqiao Ye, Yuzhou Liu, Yangdong Liu, and Shuhan Shen. Neuralplane: Structured 3d reconstruction in planar primitives with neural fields. In ICLR, 2025. URL https://github.com/3dv-casia/NeuralPlane. 3, 7, 9
|
| 257 |
+
[28] Jiyeop Kim and Jongwoo Lim. Integrating meshes and 3d gaussians for indoor scene reconstruction with sam mask guidance. arXiv preprint arXiv:2407.16173, 2024. 3
|
| 258 |
+
[29] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dólar, and Christoph Feichtenhofer. Sam 2: Segment anything in images and videos. *ICLR*, 2025. URL https://github.com/facebookresearch/segment-anything. 3, 7, 4, 6
|
| 259 |
+
[30] Farhad G Zanjani, Hong Cai, Hanno Ackermann, Leila Mirvakhabova, and Fatih Porikli. Planar gaussian splatting. In WACV, 2025. 3
|
| 260 |
+
[31] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In ICCV, 2023. URL https://kaldir.vc.in.tum.de/scannetpp. Licensed under the ScanNet++ Terms of Use. 7, 17, 1, 3, 4
|
| 261 |
+
[32] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017. URL http://www.scan-net.org. Licensed under the MIT License. 7, 17
|
| 262 |
+
[33] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. URL https://github.com/LiheYoung/Depth-Anything. 7
|
| 263 |
+
[34] Thomas Schöps, Johannes L. Schonberger, Silvano Galliani, Torsten Sattler, Konrad Schindler, Marc Pollefeys, and Andreas Geiger. A multi-view stereo benchmark with high-resolution images and multi-camera videos. In Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1
|
| 264 |
+
[35] Zehao Yu, Torsten Sattler, and Andreas Geiger. Gaussian opacity fields: Efficient adaptive surface reconstruction in unbounded scenes. ACM Transactions on Graphics, 2024. 1
|
| 265 |
+
[36] Matias Turkulainen, Xuqian Ren, Iaroslav Melekhov, Otto Seiskari, Esa Rahtu, and Juho Kannala. Dn-splatter: Depth and normal priors for gaussian splatting and meshing. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2025. 1
|
| 266 |
+
|
| 267 |
+
# NeurIPS Paper Checklist
|
| 268 |
+
|
| 269 |
+
# 1. Claims
|
| 270 |
+
|
| 271 |
+
Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
|
| 272 |
+
|
| 273 |
+
Answer: [Yes]
|
| 274 |
+
|
| 275 |
+
Justification: A discussion of current state of the literature can be found in section 2, proposed method is defined in section 3 and the discussion of results in section 4 support the claims made in the abstract and introduction.
|
| 276 |
+
|
| 277 |
+
Guidelines:
|
| 278 |
+
|
| 279 |
+
- The answer NA means that the abstract and introduction do not include the claims made in the paper.
|
| 280 |
+
- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers.
|
| 281 |
+
- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings.
|
| 282 |
+
- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper.
|
| 283 |
+
|
| 284 |
+
# 2. Limitations
|
| 285 |
+
|
| 286 |
+
Question: Does the paper discuss the limitations of the work performed by the authors?
|
| 287 |
+
|
| 288 |
+
Answer: [Yes]
|
| 289 |
+
|
| 290 |
+
Justification: Limitations of the work are listed in section 5.
|
| 291 |
+
|
| 292 |
+
Guidelines:
|
| 293 |
+
|
| 294 |
+
- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper.
|
| 295 |
+
- The authors are encouraged to create a separate "Limitations" section in their paper.
|
| 296 |
+
- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be.
|
| 297 |
+
- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated.
|
| 298 |
+
- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon.
|
| 299 |
+
- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size.
|
| 300 |
+
- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness.
|
| 301 |
+
- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations.
|
| 302 |
+
|
| 303 |
+
# 3. Theory assumptions and proofs
|
| 304 |
+
|
| 305 |
+
Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?
|
| 306 |
+
|
| 307 |
+
# Answer: [NA]
|
| 308 |
+
|
| 309 |
+
Justification: The paper does not contain theoretical results.
|
| 310 |
+
|
| 311 |
+
# Guidelines:
|
| 312 |
+
|
| 313 |
+
- The answer NA means that the paper does not include theoretical results.
|
| 314 |
+
- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced.
|
| 315 |
+
- All assumptions should be clearly stated or referenced in the statement of any theorems.
|
| 316 |
+
- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition.
|
| 317 |
+
- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material.
|
| 318 |
+
- Theorems and Lemmas that the proof relies upon should be properly referenced.
|
| 319 |
+
|
| 320 |
+
# 4. Experimental result reproducibility
|
| 321 |
+
|
| 322 |
+
Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?
|
| 323 |
+
|
| 324 |
+
# Answer: [Yes]
|
| 325 |
+
|
| 326 |
+
Justification: All experiments descriptions in the paper follow the same structure, discussing datasets, baselines, metrics and analysing the results with provided hyperparameter settings and data sampling methodology. In Appendix E, we additionally discuss data preparation procedures.
|
| 327 |
+
|
| 328 |
+
# Guidelines:
|
| 329 |
+
|
| 330 |
+
- The answer NA means that the paper does not include experiments.
|
| 331 |
+
- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not.
|
| 332 |
+
- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable.
|
| 333 |
+
- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed.
|
| 334 |
+
- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example
|
| 335 |
+
(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm.
|
| 336 |
+
(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully.
|
| 337 |
+
(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset).
|
| 338 |
+
(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results.
|
| 339 |
+
|
| 340 |
+
# 5. Open access to data and code
|
| 341 |
+
|
| 342 |
+
Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?
|
| 343 |
+
|
| 344 |
+
Answer: [Yes]
|
| 345 |
+
|
| 346 |
+
Justification: The code is provided with the supplementary material, the used datasets are public.
|
| 347 |
+
|
| 348 |
+
Guidelines:
|
| 349 |
+
|
| 350 |
+
- The answer NA means that paper does not include experiments requiring code.
|
| 351 |
+
- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 352 |
+
- While we encourage the release of code and data, we understand that this might not be possible, so "No" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark).
|
| 353 |
+
- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 354 |
+
- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc.
|
| 355 |
+
- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why.
|
| 356 |
+
- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable).
|
| 357 |
+
- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted.
|
| 358 |
+
|
| 359 |
+
# 6. Experimental setting/details
|
| 360 |
+
|
| 361 |
+
Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?
|
| 362 |
+
|
| 363 |
+
Answer: [Yes]
|
| 364 |
+
|
| 365 |
+
Justification: The hyperparameters and data splits are provided in section 4 and Appendix F. Hyperparameter choice justification is discussed.
|
| 366 |
+
|
| 367 |
+
Guidelines:
|
| 368 |
+
|
| 369 |
+
- The answer NA means that the paper does not include experiments.
|
| 370 |
+
- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them.
|
| 371 |
+
- The full details can be provided either with the code, in appendix, or as supplemental material.
|
| 372 |
+
|
| 373 |
+
# 7. Experiment statistical significance
|
| 374 |
+
|
| 375 |
+
Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?
|
| 376 |
+
|
| 377 |
+
Answer: [No]
|
| 378 |
+
|
| 379 |
+
Justification: It is infeasible to report statistical significance for all compared methods due to the amount of compute required and available. The observed variance in the conducted experiments was negligible.
|
| 380 |
+
|
| 381 |
+
Guidelines:
|
| 382 |
+
|
| 383 |
+
- The answer NA means that the paper does not include experiments.
|
| 384 |
+
- The authors should answer "Yes" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper.
|
| 385 |
+
|
| 386 |
+
- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions).
|
| 387 |
+
- The method for calculating the error bars should be explained (closed form formula call to a library function, bootstrap, etc.)
|
| 388 |
+
- The assumptions made should be given (e.g., Normally distributed errors).
|
| 389 |
+
- It should be clear whether the error bar is the standard deviation or the standard error of the mean.
|
| 390 |
+
- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\%$ CI, if the hypothesis of Normality of errors is not verified.
|
| 391 |
+
- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates).
|
| 392 |
+
- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text.
|
| 393 |
+
|
| 394 |
+
# 8. Experiments compute resources
|
| 395 |
+
|
| 396 |
+
Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?
|
| 397 |
+
|
| 398 |
+
Answer: [Yes]
|
| 399 |
+
|
| 400 |
+
Justification: Information is provided in Appendix F.
|
| 401 |
+
|
| 402 |
+
Guidelines:
|
| 403 |
+
|
| 404 |
+
- The answer NA means that the paper does not include experiments.
|
| 405 |
+
- The paper should indicate the type of compute workers CPU or GPU, internal cluster or cloud provider, including relevant memory and storage.
|
| 406 |
+
- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute.
|
| 407 |
+
- The paper should disclose whether the full research project required more computer than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper).
|
| 408 |
+
|
| 409 |
+
# 9. Code of ethics
|
| 410 |
+
|
| 411 |
+
Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?
|
| 412 |
+
|
| 413 |
+
Answer: [Yes]
|
| 414 |
+
|
| 415 |
+
Justification: The authors reviewed and followed the NeurIPS Code of Ethics. In particular, used datasets are anonymized.
|
| 416 |
+
|
| 417 |
+
Guidelines:
|
| 418 |
+
|
| 419 |
+
The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics
|
| 420 |
+
- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics.
|
| 421 |
+
- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction).
|
| 422 |
+
|
| 423 |
+
# 10. Broader impacts
|
| 424 |
+
|
| 425 |
+
Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?
|
| 426 |
+
|
| 427 |
+
Answer: [NA]
|
| 428 |
+
|
| 429 |
+
Justification: The method improves the quality and efficiency in the tasks of Novel View Synthesis and planar mesh extraction – however, these improvements do not introduce conceptually new capabilities that would require revision of societal impact when compared to prior work.
|
| 430 |
+
|
| 431 |
+
# Guidelines:
|
| 432 |
+
|
| 433 |
+
- The answer NA means that there is no societal impact of the work performed.
|
| 434 |
+
- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact.
|
| 435 |
+
- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations.
|
| 436 |
+
- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster.
|
| 437 |
+
- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology.
|
| 438 |
+
- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML).
|
| 439 |
+
|
| 440 |
+
# 11. Safeguards
|
| 441 |
+
|
| 442 |
+
Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?
|
| 443 |
+
|
| 444 |
+
Answer: [NA]
|
| 445 |
+
|
| 446 |
+
Justification: The paper poses no such risks.
|
| 447 |
+
|
| 448 |
+
# Guidelines:
|
| 449 |
+
|
| 450 |
+
- The answer NA means that the paper poses no such risks.
|
| 451 |
+
- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters.
|
| 452 |
+
- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images.
|
| 453 |
+
- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort.
|
| 454 |
+
|
| 455 |
+
# 12. Licenses for existing assets
|
| 456 |
+
|
| 457 |
+
Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?
|
| 458 |
+
|
| 459 |
+
Answer: [Yes]
|
| 460 |
+
|
| 461 |
+
Justification: Used datasets are credited with licences ([31] and [32]), competing methods are cited after the method short name in every table, and the citation includes code access links crediting license. Used codebases are credited in Appendix E.
|
| 462 |
+
|
| 463 |
+
# Guidelines:
|
| 464 |
+
|
| 465 |
+
- The answer NA means that the paper does not use existing assets.
|
| 466 |
+
- The authors should cite the original paper that produced the code package or dataset.
|
| 467 |
+
- The authors should state which version of the asset is used and, if possible, include a URL.
|
| 468 |
+
|
| 469 |
+
- The name of the license (e.g., CC-BY 4.0) should be included for each asset.
|
| 470 |
+
- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided.
|
| 471 |
+
- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset.
|
| 472 |
+
- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided.
|
| 473 |
+
- If this information is not available online, the authors are encouraged to reach out to the asset's creators.
|
| 474 |
+
|
| 475 |
+
# 13. New assets
|
| 476 |
+
|
| 477 |
+
Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?
|
| 478 |
+
|
| 479 |
+
Answer: [Yes]
|
| 480 |
+
|
| 481 |
+
Justification: The code contains README file with all the information to reproduce the paper results.
|
| 482 |
+
|
| 483 |
+
Guidelines:
|
| 484 |
+
|
| 485 |
+
- The answer NA means that the paper does not release new assets.
|
| 486 |
+
- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc.
|
| 487 |
+
- The paper should discuss whether and how consent was obtained from people whose asset is used.
|
| 488 |
+
- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file.
|
| 489 |
+
|
| 490 |
+
# 14. Crowdsourcing and research with human subjects
|
| 491 |
+
|
| 492 |
+
Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?
|
| 493 |
+
|
| 494 |
+
Answer: [NA]
|
| 495 |
+
|
| 496 |
+
Justification: The paper does not involve crowdsourcing nor research with human subjects.
|
| 497 |
+
|
| 498 |
+
Guidelines:
|
| 499 |
+
|
| 500 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 501 |
+
- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper.
|
| 502 |
+
- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector.
|
| 503 |
+
|
| 504 |
+
# 15. Institutional review board (IRB) approvals or equivalent for research with human subjects
|
| 505 |
+
|
| 506 |
+
Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?
|
| 507 |
+
|
| 508 |
+
Answer: [NA]
|
| 509 |
+
|
| 510 |
+
Justification: The paper does not involve crowdsourcing nor research with human subjects.
|
| 511 |
+
|
| 512 |
+
Guidelines:
|
| 513 |
+
|
| 514 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 515 |
+
|
| 516 |
+
- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution.
|
| 517 |
+
- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review.
|
| 518 |
+
|
| 519 |
+
# 16. Declaration of LLM usage
|
| 520 |
+
|
| 521 |
+
Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required.
|
| 522 |
+
|
| 523 |
+
Answer: [NA]
|
| 524 |
+
|
| 525 |
+
Justification: LLM policy is reflected in OpenReview submission. The paper does not involve LLMs as any important, original, or non-standard components.
|
| 526 |
+
|
| 527 |
+
Guidelines:
|
| 528 |
+
|
| 529 |
+
- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components.
|
| 530 |
+
- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described.
|
| 531 |
+
|
| 532 |
+
# A Full mesh extraction results - Figures 8 to 10
|
| 533 |
+
|
| 534 |
+
We evaluate our hybrid representation on the task of full mesh extraction using the method from [7], we do it in addition to the planar-only mesh extraction experiments presented in Section 4.2, concatenating the two meshes together and comparing them to common benchmarks from Section 4.1.
|
| 535 |
+
|
| 536 |
+
Datasets We evaluate on ScanNet++ [31], a common indoor scene benchmark, as well as on subset of suitable indoor/outdoor scenes from ETH3D [34], which provides high quality mesh, and is more challenging because of sparse image supervision.
|
| 537 |
+
|
| 538 |
+
Baselines For ScanNet++ we reuse the models trained on iPhone data stream and evaluated on the task of NVS in Section 4.1 to access mesh quality reconstruction. On ETH3D, in addition, we evaluate Gaussian Opacity Fields (GOF) [35], an extension of 2DGS for higher quality mesh reconstruction, and DNSplatter [36], a method supervising 3DGS with mono-depth
|
| 539 |
+
|
| 540 |
+
To obtain the mesh, we use TSDF fusion with the median depth estimate for 3DGS, 2DGS, DNSplatter and ours, rather than the expected ray termination as in default settings (i.e., average depth). For PGSR we use their proposed unbiased depth computation, and for Gaussian Opacity Fields we extract the mesh using the level set of the Gaussians, hence the mesh is not colored.
|
| 541 |
+
|
| 542 |
+
Metrics We use the same metrics as for meshing task in planar mesh experiments Section 4.2. We compute the F1-score at $5\mathrm{cm}$ threshold. For both of the datasets, we use every 8th image as a test image.
|
| 543 |
+
|
| 544 |
+
Analysis We provide full mesh renders along with the metrics on ScanNet++ in Figure 8. For ETH3D, in addition to mesh renders in Figure 10, we provide rendered novel views from the test set in Figure 9. Note that captured planar surfaces are unbiased and outline well the structures of the scenes. Moreover, on in the sparse view setting on ETH3D dataset we achieve a notable rendering quality improvement.
|
| 545 |
+
|
| 546 |
+
# B Additional ablations - Tables 1 and 2
|
| 547 |
+
|
| 548 |
+
Random initialization We analyze the effect of having sparse point cloud initialization versus random initialization in our method on 11 DSLR scenes from ScanNet++ [31]. for random initialization we do 5000 iterations in our warmup stage, as opposed to the usual 3500. We show that our method maintains the robustness to random initialization similar to 3DGS-MCMC [13], and despite a drop in number of planar Gaussians, it achieves comparable depth and image quality metrics to our method when initialized with SfM sparse point cloud.
|
| 549 |
+
|
| 550 |
+
Table 1: Ablation on initialization - Our method is robust to random initialization and achieves comparable performance to when initialized with SfM point cloud.
|
| 551 |
+
|
| 552 |
+
<table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>RMSE↓</td><td>MAE↓</td><td>AbsRel↓</td><td>#primitives</td><td>(%planar)</td></tr><tr><td>3DGS-MCMC (SfM)</td><td>23.38</td><td>0.87</td><td>0.24</td><td>0.41</td><td>0.24</td><td>0.26</td><td>1.13M</td><td></td></tr><tr><td>Ours (SfM)</td><td>23.42</td><td>0.87</td><td>0.24</td><td>0.20</td><td>0.13</td><td>0.12</td><td>1.13M</td><td>(31%)</td></tr><tr><td>Ours (Random)</td><td>23.30</td><td>0.86</td><td>0.25</td><td>0.21</td><td>0.14</td><td>0.13</td><td>1.13M</td><td>(21%)</td></tr></table>
|
| 553 |
+
|
| 554 |
+
Full metrics set for ablation on design choices We provide the full set of metrics for ablation on design choices (described in section 4.3) in the table 2.
|
| 555 |
+
|
| 556 |
+

|
| 557 |
+
Figure 8: Full Mesh Extraction Results on ScanNet++ - Out method achieves competitive performance for surface reconstruction, while mainating the rendering quality. Checkered surfaces indicate different planes, planes are usually behind the TSDF-extracted mesh as they represent unbiased surfaces. Some of the meshes are shown from outside of the indoor scene to highlight the planar alignment.
|
| 558 |
+
|
| 559 |
+
# C Additional video and 3D mesh results
|
| 560 |
+
|
| 561 |
+
We provide video renderings of RGB and depth for our method compared to baselines in https://theialab.github.io/3dgs-flats. Video results best capture the significant enhancement of our approach over baselines in depth estimation and accurately modeling scene geometry.
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
Figure 9: Rendering Results on ETH3D Scenes - Our method outperforms the baselines in terms of rendering quality on this set of sparse view outdoor/indoor scenes, and the planar representation is crucial for achieving good novel view synthesis in sparse scenarios.
|
| 565 |
+
|
| 566 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Electro</td><td colspan="3">Terrace</td><td colspan="3">Delivery area</td></tr><tr><td>PSNR↑</td><td>LPIPS↓</td><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>SSIM↑</td><td>PSNR↑</td><td>LPIPS↓</td><td>SSIM↑</td></tr><tr><td>3DGS</td><td>16.45</td><td>0.38</td><td>0.72</td><td>20.77</td><td>0.27</td><td>0.78</td><td>19.48</td><td>0.29</td><td>0.83</td></tr><tr><td>2DGS</td><td>16.40</td><td>0.41</td><td>0.72</td><td>20.82</td><td>0.29</td><td>0.79</td><td>19.26</td><td>0.35</td><td>0.81</td></tr><tr><td>GOF</td><td>17.34</td><td>0.36</td><td>0.71</td><td>20.80</td><td>0.27</td><td>0.75</td><td>19.40</td><td>0.33</td><td>0.79</td></tr><tr><td>PGSR</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>16.64</td><td>0.41</td><td>0.69</td></tr><tr><td>DNSplatter</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>19.56</td><td>0.24</td><td>0.77</td></tr><tr><td>Ours</td><td>18.72</td><td>0.31</td><td>0.75</td><td>22.57</td><td>0.22</td><td>0.81</td><td>22.56</td><td>0.21</td><td>0.87</td></tr></table>
|
| 567 |
+
|
| 568 |
+
Table 2: Ablation on design choices – Loss components and optimization strategy are critical, with simultaneous plane-Gaussian optimization causing significant drops. 2D Gaussian snapping greatly improves depth accuracy compared to regularization alternatives. Similarly, Gaussian relocation is essential.
|
| 569 |
+
|
| 570 |
+
<table><tr><td>Full model</td><td>PSNR↑ 26.83</td><td>LPIPS↓ 0.27</td><td>SSIM↑ 0.86</td><td>RMSE↓ 0.25</td><td>MAE↓ 0.18</td><td>AbsRel↓ 0.09</td></tr><tr><td>Loss design:</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>w/o LTV</td><td>23.24</td><td>0.34</td><td>0.82</td><td>0.34</td><td>0.24</td><td>0.13</td></tr><tr><td>w/o Lmask</td><td>24.02</td><td>0.32</td><td>0.83</td><td>0.62</td><td>0.53</td><td>0.29</td></tr><tr><td>Optimization design:</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>w/o plane optimization</td><td>21.08</td><td>0.37</td><td>0.80</td><td>0.54</td><td>0.43</td><td>0.24</td></tr><tr><td>simult. joint optimization</td><td>19.52</td><td>0.38</td><td>0.79</td><td>0.40</td><td>0.32</td><td>0.18</td></tr><tr><td>2D Gaussian design:</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>w/o snapping</td><td>25.53</td><td>0.31</td><td>0.84</td><td>0.38</td><td>0.31</td><td>0.17</td></tr><tr><td>reg. w/o snapping</td><td>21.69</td><td>0.35</td><td>0.81</td><td>0.36</td><td>0.28</td><td>0.15</td></tr><tr><td>w/o relocation</td><td>20.00</td><td>0.37</td><td>0.80</td><td>0.59</td><td>0.50</td><td>0.28</td></tr></table>
|
| 571 |
+
|
| 572 |
+
# D Additional qualitative results – Figures 11 and 12
|
| 573 |
+
|
| 574 |
+
We provide more qualitative evidence for the performance of our method compared to 2DGS [7], 3DGS [6] and 3DGS-MCMC [13] baselines on the ScanNet++ [31] dataset in figure 11. The results show how baselines particularly struggle with reconstructing accurate geometry for the textureless areas while our method significantly improves upon these methods in depth estimation and keeps the visual quality of images.
|
| 575 |
+
|
| 576 |
+

|
| 577 |
+
Figure 10: Full Mesh Extraction Results on ETH3D Scenes - Our method outperforms the baselines.
|
| 578 |
+
|
| 579 |
+
Further, we provide more visualization for our estimated planes on ScanNet++ [31] dataset, showcasing the perfect alignment of our planes with the detected planar surfaces in figure 12.
|
| 580 |
+
|
| 581 |
+
# E Input planar masks
|
| 582 |
+
|
| 583 |
+
2D semantic masks Our method relies on input consistent 2D segmentation masks of planar surfaces. To obtain these masks, we can either annotate each image collection manually or automate the process for larger scenes. To automatically obtain the 2D segmentation masks, we employ PlaneRecNet [25] and SAMv2 video segmentation model [29], to create an annotation pipeline. We first input images to PlaneRecNet to obtain 2D plane annotations that are not semantically consistent across the image collection. We set the plane probability threshold to 0.5. While this method works well on iPhone images, it produces fewer plane annotations for DSLR images, that are out of distribution for its network trained on iPhone data. We then input these unmatched masks as seed to SAMv2. In order to do that, we order image collections that are not already sampled from a video. We propagate masks from the initial frame in 16-frame chunks of the sequence to the next 15 frames, and match SAMv2's prediction with any subsequent 2D masks output from [25], using Hungarian matching with an IoU metric. Although largely effective, this process is prone to error accumulation through mask propagation. However, we assume resultant masks are semantically consistent across the image sequence. We provide sample segmentation of an input sequence in the supplementary video and on the website.
|
| 584 |
+
|
| 585 |
+

|
| 586 |
+
Figure 11: Novel view synthesis and depth - Qualitative results on ScanNet++ iPhone dataset show our superior performance in both image quality and depth estimation in novel views. Note the limitation of the quality of Gaussian Splatting based methods for textureless surfaces, which makes the plane fitting procedure less robust.
|
| 587 |
+
|
| 588 |
+

|
| 589 |
+
|
| 590 |
+

|
| 591 |
+
|
| 592 |
+

|
| 593 |
+
|
| 594 |
+

|
| 595 |
+
Figure 12: We provide visualizations of our output planes on the rendered test views of ScanNet++ DSLR streams (top 2 rows) and iPhone stream (bottom 2 rows). Pink markings are due to the anonymization of the original ScanNet++ dataset. While some planar surfaces are missed due to lack of manual 2D planar mask annotation, the captured planes are reconstructed faithfully.
|
| 596 |
+
|
| 597 |
+

|
| 598 |
+
|
| 599 |
+

|
| 600 |
+
|
| 601 |
+

|
| 602 |
+
|
| 603 |
+

|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
|
| 607 |
+

|
| 608 |
+
|
| 609 |
+

|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+
Masked ground truth meshes For the planar mesh extraction task, we only consider planes with annotated segmentation masks from the ground truth mesh, as the 2D plane segmentation task is orthogonal to our method. To identify the relevant subset of planes, we unproject points from the ground truth depth maps that correspond to each plane according to its segmentation mask. We then fit a plane to each resulting point cloud using RANSAC and compile these fitted planes into a set $S$ . We match planes from the ground truth mesh to those in set S by applying two criteria: the normal cosine distance must be less than 0.99 to at least one plane in $S$ , and the distance between their closest points must be less than 0.1. Doing this allows for computational efficiency and increased robustness to missing or undersegmented planes in the input 2D annotations.
|
| 614 |
+
|
| 615 |
+
Code We release our code<sup>3</sup> publicly for reproducibility purposes and to facilitate future research in this area. We base our code on the 3DGS-MCMC paper [13] and additionally use SAMv2 [29], and PlaneRecNet [25] to generate masks. The baselines are evaluated using their official released code [7, 6, 16, 17, 13, 8, 9]. We further utilize AirPlanes [9] code to compute meshing metrics.
|
| 616 |
+
|
| 617 |
+
# F Hyperparameters settings
|
| 618 |
+
|
| 619 |
+
We use $\sigma_{\perp}$ and $\sigma_{\parallel}$ as hyper-parameters that control the stochastic re-location. These parameters are chosen depending on the metric scale of the dataset, and are defined in millimeters. For both datasets we used $\sigma_{\perp} = 0.01$ and $\sigma_{\parallel} = 0.3$ . We observe that setting $\lambda_{\mathrm{mask}} = 0.1$ , yields best results empirically. For regularizers, we use $\lambda_{\mathrm{TV}} = 0.1$ , $\lambda_{\mathrm{scale}} = 0.01$ and $\lambda_{\mathrm{opacity}} = 0.01$ following [10] and [13]. We use the same scheduling policy for learning plane origin and normal (rotation) as for the Gaussian means the vanilla 3DGS. All experiments were conducted on a single A6000 ADA GPU, with 46GB memory. The method runs for approximately 1 hour for a single ScanNet++/ScanNetV2 scene, which is comparable to PGSR [16], the second best method for geometric quality according to our experiments and $1.5\times$ longer than 3DGS-MCMC [13], the best method for Novel View Synthesis. The training time is increased due to the RANSAC overhead and block-coordinate descent optimization of planar parameters. Additionally, mesh extraction takes $\sim 3$ minutes and SAM mask propagation is on average 7 minutes long, depending on the scene type. We believe that the training time can be reduced in future work with addition of customized CUDA kernels.
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4df64f8b1ed55d82966da7f70e2fdeb37853a20e087487f11e3f5ab375fcc285
|
| 3 |
+
size 1849089
|
3dgaussianflatshybrid2d3dphotometricscenereconstruction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d98cccd3594fcf746dedd947d5037455fc15eb148c7584981fd78b34f7c427d6
|
| 3 |
+
size 664511
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7c4ed0e285d4dfba7e9423fa09b2f1c0694163d10f71dab56e7ec64078947b1
|
| 3 |
+
size 156307
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c58fbdfa76cfd6c9315086077096d1125e4e320d2055db0c7fc11114b3b7e3fb
|
| 3 |
+
size 207517
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/64fceee8-48f9-4972-bfcf-8de3c8fc2ec6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a60f221826db3bd766e05358b969ce2bb9a060927e72c582c68172edd86c4167
|
| 3 |
+
size 13754186
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/full.md
ADDED
|
@@ -0,0 +1,705 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Gaussian Splating based Scene-independent Camera Relocalization with Unidirectional and Bidirectional Feature Fusion
|
| 2 |
+
|
| 3 |
+
Junyi Wang<sup>1,2</sup>
|
| 4 |
+
|
| 5 |
+
junyiwang@sdu.edu.cn
|
| 6 |
+
|
| 7 |
+
Yuze Wang
|
| 8 |
+
|
| 9 |
+
wangyuze19980709@163.com
|
| 10 |
+
|
| 11 |
+
Wantong Duan
|
| 12 |
+
|
| 13 |
+
wantongd@buaa.edu.cn
|
| 14 |
+
|
| 15 |
+
Meng Wang2
|
| 16 |
+
|
| 17 |
+
wangm05@buaa.edu.cn
|
| 18 |
+
|
| 19 |
+
Yue Qi $^{2,3*}$
|
| 20 |
+
|
| 21 |
+
qy@buaa.edu.cn
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
Visual localization is a critical component across various domains. The recent emergence of novel scene representations, such as 3D Gaussian Splatting (3D GS), introduces new opportunities for advancing localization pipelines. In this paper, we propose a novel 3D GS-based framework for RGB based, scene-independent camera relocalization, with three main contributions. First, we design a two-stage pipeline with fully exploiting 3D GS. The pipeline consists of an initial stage, which utilizes 2D-3D correspondences between image pixels and 3D Gaussians, followed by pose refinement using the rendered image by 3D GS. Second, we introduce a 3D GS based Relocalization Network, termed GS-RelocNet, to establish correspondences for initial camera pose estimation. Additionally, we present a refinement network that further optimizes the camera pose. Third, we propose a unidirectional 2D-3D feature fusion module and a bidirectional image feature fusion module, integrated into GS-RelocNet and the refinement network, respectively, to enhance feature sharing across the two stages. Experimental results on public 7 Scenes, Cambridge Landmarks, TUM RGB-D and Bonn demonstrate state-of-the-art performance. Furthermore, the beneficial effects of the two feature fusion modules and pose refinement are also highlighted. In summary, we believe that the proposed framework can be a novel universal localization pipeline for further research.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
Visual localization is considered a fundamental research problem in computer vision and is applied across a variety of scenarios, including Augmented Reality (AR), Mixed Reality (MR), robotics, and autonomous driving Wang et al. (2024c); Jia et al. (2024); Zhu et al. (2024). The primary function of a localization algorithm is to estimate the 6-DoF (Degrees of Freedom) camera pose within a target environment.
|
| 30 |
+
|
| 31 |
+
As current works cover, two main types of methods are investigated to achieve robust localization performance, including feature matching and geometry regression. Specially, feature matching methods employ either hand-crafted Liu et al. (2017) or learned features Sun et al. (2021) to establish pixel correspondences for localization, while geometry regression approaches train the deep network
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Figure 1: (a) Localization pipeline. The framework predicts the initial pose by establishing 2D-3D correspondences between pixels and 3D Gaussians, followed by a pose refinement process to optimize pose by rendered views using 3D GS. (b) Localization performance comparison. Our method obtains the least localization error on the two datasets, while supporting scene-independent localization.
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
(a)
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
(b)
|
| 45 |
+
|
| 46 |
+
to solve the camera pose, by Absolute Pose Regression (APR) Chen et al. (2022) or Scene Coordinate Regression (SCR) Wang & Qi (2023b). However, due to the representation, these methods always ignore texture and illumination information, which limits their capacity to fully represent the scene.
|
| 47 |
+
|
| 48 |
+
Recent neural and geometric 3D structures for Novel View Synthesis (NVS) have gained significant popularity for scene representat in Mildenhall et al. (2021); Kerbl et al. (2023); Hu et al. (2023). Specially, 3D Gaussian Splatting (3D GS) offers a well-balanced approach to training and rendering performance for NVS, presenting new opportunities for the localization pipeline. However, how to leverage 3D GS for robust and accurate localization remains a significant challenge.
|
| 49 |
+
|
| 50 |
+
Current methods predominantly utilize 3D GS for pose refinement Keetha et al. (2024); Yan et al. (2024); Liu et al. (2025), which heavily relies on the accuracy of initial camera pose estimation. When the initial pose estimation is inaccurate or fails, the refinement process becomes ineffective. To address this, our motivation lies in the employment of 3D GS for both pose initialization and refinement, while achieving scene-independent relocalization to enhance robustness.
|
| 51 |
+
|
| 52 |
+
As illustrated in Fig. 1(a), we introduce a novel relocalization framework that first establishes 2D-3D correspondences between image pixels and 3D Gaussians, followed by a refinement stage that predicts the relative pose between real and rendered views using 3D GS. A feature fusion module is incorporated in both stages to enhance correspondence regression. To the best of our knowledge, this is the first 3D GS based, scene-independent relocalization framework, offering a robust solution for challenging localization tasks. Specially, the term "scene-independent" indicates that our framework can achieve robust relocalization in a target scene without requiring scene-specific pre-training, in contrast to most "scene-dependent" methods that necessitate prior training on the target scene. Our contributions are summarized as follows.
|
| 53 |
+
|
| 54 |
+
1. We propose a innovative framework for scene-independent camera relocalization. The framework comprises initial pose estimation by establishing correspondences between pixels of the input image and the scene expressed by the 3D Gaussians, and pose refinement by predicting the relative pose between the input image and rendered view using 3D GS.
|
| 55 |
+
2. We design GS-RelocNet for predicting the correspondences to obtain the initial camera pose. Within GS-RelocNet, we introduce a unidirectional feature fusion module to merge geometry and texture features for learning confidence scores between each pixel and 3D Gaussian.
|
| 56 |
+
3. We propose a pose refinement network based on 3D GS. In the refinement network, we present a bidirectional feature fusion module to combine features from rendered and real images.
|
| 57 |
+
|
| 58 |
+
To validate the performance of the framework, we conduct experiments on 7 Scenes and Cambridge Landmarks. As illustrated in Fig. 1(b), the results demonstrate the state-of-the-art localization performance on the two datasets. Meanwhile, our framework can support scene-independent relocalization, denoting that it can perform relocalization in unseen scenes.
|
| 59 |
+
|
| 60 |
+
# 2 Related Works
|
| 61 |
+
|
| 62 |
+
# 2.1 Localization with Feature Matching
|
| 63 |
+
|
| 64 |
+
Traditional hand-crafted feature matching based methods typically follow a pipeline consisting of feature extraction, matching, global map construction, and optimization Liu et al. (2017); Sattler et al. (2017). Moreover, semantic SLAM systems Yang & Scherer (2019); You et al. (2023); Lin et al. (2024b); Xi et al. (2025); Zhang et al. (2025) incorporate semantic information derived from learned features to enhance the robustness and accuracy of traditional hand-crafted feature based processes. Alternatively, learned feature matching methods aim to estimation pixel correspondences for pose estimation Wang et al. (2022, 2024d). Based on LoFTR Sun et al. (2021), Efficient LoFTR Wang et al. (2024d) performed the transformer with an aggregated attention mechanism using adaptive token selection for efficiency.
|
| 65 |
+
|
| 66 |
+
# 2.2 Localization with Geometry Regression
|
| 67 |
+
|
| 68 |
+
The geometry regression methods can be broadly categorized into two approaches, including APR and SCR methods. APR methods train the deep network to learn the relationship between 2D images and 6-DoF camera poses Chen et al. (2022, 2024b). While APR methods offer high computational efficiency, they are often limited by accuracy and generalization issues Liu et al. (2024b). Alternatively, SCR techniques calibrate the pose by using the Kabsch or Perspective-n-Point (PnP) algorithm to the known source and evaluated target coordinate, which achieve considerable localization performance Wang & Qi (2021, 2023b). Recent hot SCR researches are DUSt3R Wang et al. (2024b) and its subsequent extension works Leroy et al. (2024), By using large training samples, the methods achieve outstanding localization accuracy and generalization ability. These methods focus on geometry regression, bug can not fully exploit texture features.
|
| 69 |
+
|
| 70 |
+
# 2.3 Localization with Neural Radiance Field (NeRF) and 3D GS
|
| 71 |
+
|
| 72 |
+
Due to the outstanding NVS performance, NeRF is applied to the localization pipeline with iterative rendering and pose updates Germain et al. (2022); Moreau et al. (2023); Chen et al. (2024a); Wang et al. (2023); Xu et al. (2024). NeRFect Match Zhou et al. (2024b) explored the potential of NeRF's internal features in establishing precise 2D-3D matches for localization. With the shift in the NVS field from NeRF to 3D GS, STDLoc Huang et al. (2025) introduced a matching-oriented Gaussian sampling strategy and a scene-specific detector to achieve efficient and robust pose estimation.
|
| 73 |
+
|
| 74 |
+
# 3 Method
|
| 75 |
+
|
| 76 |
+
# 3.1 Overview
|
| 77 |
+
|
| 78 |
+
Given a target image and a 3D scene model expressed through 3D GS, our method predicts the 6-DoF camera pose of the target image within the scene. The overall localization process is composed of two stages, both utilizing 3D GS in distinct ways. In the initial pose estimation stage, we establish the correspondences between each image pixel and 3D Gaussian, followed by a PnP algorithm with RANSAC to solve the initial camera pose. Based on the predicted pose, we proceed to the refinement stage, where we first render the view using 3D GS, and then predict the relative pose between the target and rendered images to perform pose optimization.
|
| 79 |
+
|
| 80 |
+
The advantages of the proposed pipeline are twofold. First, we use 3D Gaussians to represent the scene in the initial pose estimation stage. Compared to the point cloud representation in SCR methods, 3D Gaussians retain more detailed geometric and texture information. Additionally, by establishing correspondences between image pixels and 3D Gaussians, GS-RelocNet enables scene-independent relocalization. Second, we employ the rendered view generated by 3D GS for pose refinement, which reduces the domain gap between the rendered and real views. Through this refinement process, the localization accuracy is further enhanced.
|
| 81 |
+
|
| 82 |
+
# 3.2 Initial Pose Estimation by GS-RelocNet
|
| 83 |
+
|
| 84 |
+
Pose estimation process. In the initial stage, we regress the confidence matrix between image pixels and selected Gaussians by GS-RelocNet. Based on the confidence threshold, we establish the correspondences between pixels and Gausaians, followed by a PnP method with RANSAC to solve the initial pose.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
Figure 2: Architecture of GS-RelocNet. The GS-RelocNet framework integrates an RGB feature encoder, a 3D Gaussian feature encoder, an RGB descriptor decoder, a point descriptor decoder, and a confidence metric regression module. In the figure, arrows indicate the process flow, and numbers adjacent to each block denote the corresponding filter size.
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
|
| 93 |
+
Inputs and outputs of GS-RelocNet. The GS-RelocNet receives a monocular RGB image and a 3D model expressed by 3D Gaussians as inputs, and outputs the confidence scores that quantify the correspondence between pixels and 3D Gaussians. These confidence scores are then used to establish 2D-3D correspondences for camera pose prediction.
|
| 94 |
+
|
| 95 |
+
Input processing. For 2D image processing, GS-RelocNet employs patch partition and embedding to segment the images into multiple parts. For the 3D Gaussians, the point cloud serialization and embedding are exploited to transform unstructured 3D Gaussians into a structured format. Specifically, the position, alpha, covariance matrix and spherical harmonic function of 3D Gaussian are processed independently, with the features from all four components concatenated. Additionally, we incorporate features from DINO V2 Oquab et al. (2023), along with a depth estimation head, as supplementary input to enhance geometry feature learning. Notably, the parameters of the DINO V2 model are kept frozen during this process.
|
| 96 |
+
|
| 97 |
+
Architecture of GS-RelocNet. The detailed architecture of GS-RelocNet is presented in Fig. 2. It consists of a spatial image encoder, a 3D Gaussian encoder, an image descriptor decoder, a 3D Gaussian descriptor decoder, and a confidence matrix decoder. Within both encoders and decoders, we incorporate a unidirectional feature fusion module to facilitate effective feature sharing from the model to the RGB image. The image branch employs a Swin Transformer Liu et al. (2021) architecture, consisting of multiple Swin Blocks, while the point cloud branch utilizes Point Transformer V3 Wu et al. (2024) for 3D Gaussian feature learning.
|
| 98 |
+
|
| 99 |
+
Unidirectional feature fusion module. Between consecutive image and 3D Gaussian learning blocks, we propose a unidirectional feature fusion module to combine 2D and 3D features, shown at the lower left of Fig. 2. The module takes image features and geometry features as inputs, and outputs the fused features of the two parts. Let the input RGB feature have dimensions $H_{u} \times W_{u} \times D_{u}$ , and the model feature have dimensions $N_{m} \times D_{m}$ . The whole fusion process is as follows.
|
| 100 |
+
|
| 101 |
+
Step 1, alignment of features. The module aligns the image and 3D model features. Specifically, the RGB feature is reshaped to $N_{m} \times (H_{u} \times W_{u} \times D_{u} / N_{m})$ , followed by a 1D convolution to transform it to $N_{m} \times D_{m}$ , aligning with the model feature dimensions.
|
| 102 |
+
|
| 103 |
+
Step 2, fusion with self-attention. In this step, the features from the image and 3D Gaussian are first added. Subsequently, multi-head self-attention is applied to added features and original model features respectively. Finally, the both features are again added to achieve feature fusion.
|
| 104 |
+
|
| 105 |
+
Step 3, feature transformation. The combined feature is reshaped to $H_{u} \times W_{u} \times (N_{m} \times D_{m} / H_{u} / W_{u})$ . A Swin Transformer block is then applied to restore the feature dimensions to $H_{u} \times W_{u} \times D_{u}$ , matching the input RGB feature. The input RGB feature is added to this output to produce the merged feature.
|
| 106 |
+
|
| 107 |
+
Step 4, iterative fusion. Steps 1 through 3 are iteratively applied. Specifically, the fused features from the current iteration are used as the RGB input for the next iteration.
|
| 108 |
+
|
| 109 |
+
Through these steps, GS-RelocNet can perform one-way fusion of 2D image and 3D Gaussian features. Specifically, the model features can influence the image descriptor learning, but not the other way around. On one hand, this feature sharing enhances RGB feature descriptor learning. On the other hand, the independence of model descriptor learning allows for pre-prediction of model descriptors before the inference stage, significantly accelerating the overall process. In summary, we argue that this fusion mechanism contributes to the localization task, and its effectiveness will be validated in the subsequent experiments.
|
| 110 |
+
|
| 111 |
+
Confidence matrix regression. After regressing the $N_{i}$ image descriptors and $N_{g}$ 3D Gaussian descriptors, GS-RelocNet performs regression of a confidence matrix with $N_{i} * N_{g}$ scores, where each score represents the confidence between its corresponding image and 3D Gaussian. The regression process proceeds as follows. First, for the image features, GS-RelocNet applies a positional encoding operation followed by a self-attention operation. Similarly, for the 3D Gaussian features, GS-RelocNet also applies a self-attention operation. Next, cross-attention is applied to the two sets of features. Finally, a dual softmax operation is employed to predict the final confidence matrix.
|
| 112 |
+
|
| 113 |
+
Loss of GS-RelocNet. To train GS-RelocNet, we need to construct the ground truth confidence between each 3D Gaussian and image pixel. Given a 3D Gaussian with 2D covariance matrix $\Sigma$ under the current view, then the ground truth confidence is calculated as the following formula,
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
C _ {g} = \frac {1}{2 \pi | \Sigma |} \exp \left[ - \frac {1}{2} (x - \mu) ^ {T} \Sigma^ {- 1} (x - \mu) \right], \tag {1}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $\mu$ denotes the 2D Gaussian center, and $x$ is the pixel position.
|
| 120 |
+
|
| 121 |
+
# 4 Pose Refinement
|
| 122 |
+
|
| 123 |
+
Refinement process. After processing with GS-RelocNet, we obtain the initial camera pose of the target image. Subsequently, we rendered the current view using 3D GS with the predicted camera pose. The refinement process is to predict the relative pose $(R_r^p,T_r^p)$ between the input real image and rendered view. Finally, the refined pose $(R_f^p,T_f^p)$ is obtained by the following formula.
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
R _ {f} ^ {p} = R _ {r} ^ {p} * R _ {i} ^ {p}, \quad T _ {f} ^ {p} = R _ {r} ^ {p} * T _ {i} ^ {p} + T _ {r} ^ {p}. \tag {2}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
To predict $R_{r}^{p}, T_{r}^{p}$ , we propose a refinement network to regress residual coordinate map proposed in the work Wang & Qi (2023a) between the rendered and real views.
|
| 130 |
+
|
| 131 |
+
Refinement network. As illustrated in Fig. 3, the network takes real and rendered views as inputs, and outputs the residual coordinate map, followed by a PnP method with RANSAC to predict the relative camera pose $(R_{r}^{p},T_{r}^{p})$ . The architecture of the refinement network consists of several Swin blocks to learn features of both real and rendered images.
|
| 132 |
+
|
| 133 |
+
Bidirectional feature fusion. Similar to GS-RelocNet, we design a feature fusion module to combine the features of real and rendered images. The key distinction is that the feature fusion process is bidirectional, following these steps. First, the two sets of features are added together, and a self-attention operation is applied to fuse the real and rendered features. Second, a Swin block is used to further process and learn the fused features. Finally, the resulting features of both parts are obtained by adding the original features to the fused output. Notably, the three steps outlined above can be repeated multiple times to refine the feature fusion process.
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
Figure 3: Architecture of the refinement network. The network takes real and rendered views as inputs, and outputs the residual coordinate map to predict the relative camera pose, which is composed of a real image feature encoder, a rendered image feature encoder and a bidirectional fusion module.
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
|
| 140 |
+
Loss function of refinement network. For the loss of the residual coordinate $(Loss_{map})$ , we use $L_{1}$ loss to train the refinement network. In addition to the coordinate map loss, the refinement network also exploits the auxiliary loss to facilitate feature learning. Specifically, the pose loss is expressed using the following formula,
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\operatorname {L o s s} _ {a u x} = \exp (- T) \cdot \| \mathbf {T} ^ {p} - \mathbf {T} ^ {g} \| + T + \exp (- Q) \cdot \| \mathbf {Q} ^ {p} - \mathbf {Q} ^ {q} \| + Q, \tag {3}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $\mathbf{T}^p$ , $\mathbf{T}^g$ represent the prediction and ground truth of camera position, $\mathbf{Q}^p$ , $\mathbf{Q}^g$ mean the prediction and ground truth of camera orientation, and $T$ , $Q$ are variables learned by the refinement network to balance the three terms.
|
| 147 |
+
|
| 148 |
+
Although the auxiliary loss directly outputs the 6-DoF relative camera pose, it is not used as the final result in our framework. This decision is based on the observation that direct learning based methods typically yield less accurate localization results Wang et al. (2020). Hence the refinement network uses the map to predict the camera pose, with the auxiliary loss as an additional guidance.
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
L o s s = L o s s _ {m a p} + \alpha * L o s s _ {a u x}. \tag {4}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
# 4.1 Datasets and Implementation Details
|
| 155 |
+
|
| 156 |
+
Datasets and train-test split. We conduct experiments on indoor 7 Scenes Shotton et al. (2013), TUM RGB-D Sturm et al. (2012), Bonn Palazzolo et al. (2019), ScanNet Dai et al. (2017), outdoor MegaDepth Li & Snavely (2018) and Cambridge Landmarks Kendall et al. (2015). The implementation is divided into scene-dependent and scene-independent settings. In scene-independent setting, we train GS-RelocNet on ScanNet and test it on 7 Scenes, TUM RGB-D and Bonn. For outdoor scene-independent setting, GS-RelocNet is trained on MegaDepth Li & Snavely (2018) and tested on Cambridge Landmarks. When performing the relocalization task on in a scene-dependent manner, GS-RelocNet is trained on 7 Scenes and Cambridge Landmarks respectively.
|
| 157 |
+
|
| 158 |
+
Gaussian selection. In training and inference stages, 4096 Gaussians are selected by spatially uniform sampling. Specifically, we partition the 3D space into $S_{x} \times S_{y} \times S_{z}$ grids with each grid resolution of $0.1\mathrm{m}$ . Let $N_{t}$ denote the total number of Gaussians. For each grid cell containing $N_{g}$ Gaussians, we randomly sample $N_{g} * 4096 / N_{t}$ Gaussians. If the number of sampled Gaussians is less than 4096, we randomly duplicate some samples to reach the desired count for training.
|
| 159 |
+
|
| 160 |
+
GS-RelocNet details. The RGB input of our framework is resized to $256 \times 256$ pixels, while 4096 3D Gaussians spatially uniform sampled from all 3D Gaussians, are processed by the Point Transformer network. In Fig. 2, the module setting and output filter size are indicated near the corresponding modules. Additionally, GS-RelocNet leverages an ADAM W optimizer with learning rates $2 \times 10^{-4}$ .
|
| 161 |
+
|
| 162 |
+
Refinement network training details. After obtaining the initial poses, we use 3D GS to render the view. Both the real and rendered images are resized to $128 \times 128$ , while the output size of the relative structure is $64 \times 64$ . In Fig. 3, the module setting and output filter size are also annotated. The loss coefficient $\alpha$ in Eq. (4) is set to 0.3, the variables $T, Q$ are initially set to 0.0.
|
| 163 |
+
|
| 164 |
+
Inference details. After training GS-RelocNet, we use the selected Gaussians and the test image to obtain the confidence map. Then we initially apply a fixed threshold (set to 0.7 in our experiments) to eliminate correspondences with similarity scores below this value. Subsequently, for each pixel
|
| 165 |
+
|
| 166 |
+
associated with multiple Gaussian correspondences, the pixel's coordinate is computed as a weighted average of the selected Gaussians, weighted by their confidence values. Additionally, if the number of correspondences falls below 100, we re-run GS-RelocNet by selecting an alternative set of 4096 Gaussians from the grids that contain Gaussians with confidence higher than 0.7. After determining the pixel and its weighted Gaussian coordinate, we use PnP with RANSAC to solve the initial pose.
|
| 167 |
+
|
| 168 |
+
Based on the initial pose, we render the view by trained 3D GS. Given the input image and rendered view, we predict the residual coordinate map by the refinement network, followed by a PnP method with RANSAC to solve the relative pose between them. Finally, the refined pose is obtained by Eq. 2.
|
| 169 |
+
|
| 170 |
+
# 4.2 Static 7 Scenes
|
| 171 |
+
|
| 172 |
+
Table 1: Median position (cm) and rotation $(^{\circ})$ errors on 7 Scenes. The sign $\ddagger$ means the result with SfM pseudo ground truth, while others leverage the original KinectFusion ground truth. In each scene, the red and blue marks represent the first and second.
|
| 173 |
+
|
| 174 |
+
<table><tr><td></td><td>Method</td><td>Chess</td><td>Fire</td><td>Heads</td><td>Office</td><td>Pumpkin</td><td>Kitchen</td><td>Stairs</td><td>Mean</td></tr><tr><td colspan="10">Scene-dependent</td></tr><tr><td rowspan="4">APR</td><td>MS-Transformer Shavit et al. (2021) (ICCV 2021)</td><td>11/6.38</td><td>23/11.5</td><td>13/13.0</td><td>18/8.14</td><td>17/8.42</td><td>16/8.92</td><td>29/10.3</td><td>18/9.51</td></tr><tr><td>DFNet Chen et al. (2022) (ECCV 2022)</td><td>3/1.12</td><td>6/2.30</td><td>4/2.29</td><td>6/1.54</td><td>7/1.92</td><td>7/1.74</td><td>12/2.63</td><td>6/1.93</td></tr><tr><td>Marepo Chen et al. (2024b) (CVPR 2024)</td><td>2.1/1.24</td><td>2.3/1.39</td><td>1.8/2.03</td><td>2.8/1.26</td><td>3.5/1.48</td><td>4.2/1.71</td><td>5.6/1.67</td><td>3.2/1.54</td></tr><tr><td>MS-HyperPose Ferens & Keller (2025) (CVPR 2025)</td><td>11/4.34</td><td>23/9.79</td><td>13/10.7</td><td>17/6.05</td><td>16/5.24</td><td>17/6.86</td><td>27/6.00</td><td>18/7.00</td></tr><tr><td rowspan="3">SCR</td><td>\( ACE^‡ \) Brachmann et al. (2023) (CVPR 2023)</td><td>0.55/0.18</td><td>0.83/0.33</td><td>0.53/0.33</td><td>1.0/0.29</td><td>1.1/0.22</td><td>0.77/0.21</td><td>2.89/0.81</td><td>1.1/0.34</td></tr><tr><td>DeViLoc Giang et al. (2024) (CVPR 2024)</td><td>2/0.78</td><td>2/0.74</td><td>1/0.65</td><td>3/0.82</td><td>4/1.02</td><td>3/1.19</td><td>4/1.12</td><td>2.7/1.10</td></tr><tr><td>\( GLACE^‡ \) Wang et al. (2024a) (CVPR 2024)</td><td>0.6/0.18</td><td>0.9/0.34</td><td>0.6/0.34</td><td>1.1/0.29</td><td>0.9/0.23</td><td>0.8/0.20</td><td>3.2/0.93</td><td>1.2/0.36</td></tr><tr><td rowspan="3">NeRF</td><td>CROSSFIRE Moreau et al. (2023) (CVPR 2023)</td><td>1/0.4</td><td>5/1.9</td><td>3/2.3</td><td>5/1.6</td><td>3/0.8</td><td>2/0.8</td><td>12/1.9</td><td>4/1.10</td></tr><tr><td>\( NeRFMatch^‡ \) Zhou et al. (2024a) (ECCV 2024)</td><td>0.9/0.30</td><td>1.1/0.40</td><td>1.5/1.00</td><td>3/0.80</td><td>2.2/0.60</td><td>1.0/0.30</td><td>10.1/1.70</td><td>2.8/0.70</td></tr><tr><td>PMNet Lin et al. (2024a) (ECCV 2024)</td><td>4/1.70</td><td>10/4.51</td><td>7/4.23</td><td>7/1.96</td><td>14/3.33</td><td>14/3.36</td><td>16/3.62</td><td>10/3.24</td></tr><tr><td rowspan="5">3D GS</td><td>DFNet + GS-CPR‡ Liu et al. (2025) (ICLR 2025)</td><td>0.7/0.20</td><td>0.9/0.32</td><td>0.6/0.36</td><td>1.2/0.32</td><td>1.3/0.31</td><td>0.9/0.25</td><td>2.2/0.61</td><td>1.1/0.34</td></tr><tr><td>\( ACE + GS-CPR‡ \) Liu et al. (2025) (ICLR 2025)</td><td>0.5/0.15</td><td>0.6/0.25</td><td>0.4/0.28</td><td>0.9/0.26</td><td>1.0/0.23</td><td>0.7/0.17</td><td>1.4/0.42</td><td>0.8/0.25</td></tr><tr><td>\( STDLoc^‡ \) Huang et al. (2025) (CVPR 2025)</td><td>0.46/0.15</td><td>0.57/0.24</td><td>0.45/0.26</td><td>0.86/0.24</td><td>0.93/0.21</td><td>0.63/0.19</td><td>1.42/0.41</td><td>0.76/0.24</td></tr><tr><td>\( Ours^‡ \) (No Refinement)</td><td>0.44/0.17</td><td>0.61/0.24</td><td>0.39/0.30</td><td>0.89/0.24</td><td>0.95/0.28</td><td>0.60/0.22</td><td>1.36/0.39</td><td>0.75/0.26</td></tr><tr><td>\( Ours^‡ \)</td><td>0.41/0.15</td><td>0.55/0.21</td><td>0.37/0.26</td><td>0.85/0.24</td><td>0.92/0.25</td><td>0.58/0.18</td><td>1.30/0.35</td><td>0.71/0.23</td></tr><tr><td colspan="10">Scene-independent</td></tr><tr><td>Hand-crafted</td><td>Active Search Sattler et al. (2016) (TPAMI)</td><td>4/1.96</td><td>3/1.53</td><td>2/1.45</td><td>9/3.61</td><td>8/3.10</td><td>7/3.37</td><td>3/2.22</td><td>51/2.46</td></tr><tr><td rowspan="2">RPR</td><td>RelocNet Balntas et al. (2018) (ECCV 2018)</td><td>21/10.90</td><td>32/11.80</td><td>15/13.40</td><td>31/10.30</td><td>40/10.90</td><td>33/10.30</td><td>33/11.40</td><td>29.3/11.29</td></tr><tr><td>Relative PoseNet Laskar et al. (2017) (ICCV 2017)</td><td>31/15.00</td><td>40/19.00</td><td>24/22.20</td><td>38/14.10</td><td>44/18.20</td><td>41/16.50</td><td>35/23.60</td><td>36.1/18.37</td></tr><tr><td rowspan="5">SCR</td><td>InLoc Taira et al. (2018) (CVPR 2018)</td><td>3/1.05</td><td>3/1.07</td><td>2/1.16</td><td>3/1.05</td><td>5/1.55</td><td>4/1.31</td><td>9/2.47</td><td>4.1/1.38</td></tr><tr><td>Pixloc Sarlin et al. (2021) (CVPR 2021)</td><td>2/0.80</td><td>2/0.73</td><td>1/0.82</td><td>3/0.82</td><td>4/1.21</td><td>3/1.20</td><td>5/1.30</td><td>2.9/0.98</td></tr><tr><td>Wang et al. Wang & Qi (2023a) (ISMAR 2023)</td><td>2.4/0.97</td><td>2.0/0.99</td><td>1.6/1.27</td><td>2.4/1.01</td><td>3.7/1.20</td><td>2.8/1.14</td><td>3.1/1.22</td><td>2.6/1.11</td></tr><tr><td>DUSt3R Wang et al. (2024b) (CVPR 2024)</td><td>3/0.96</td><td>4/1.02</td><td>1/1.00</td><td>4/1.04</td><td>5/1.26</td><td>4/1.36</td><td>21/4.06</td><td>6/1.53</td></tr><tr><td>Reloc3R Dong et al. (2025) (CVPR 2025)</td><td>3/0.99</td><td>4/1.13</td><td>2/1.23</td><td>5/0.88</td><td>7/1.14</td><td>5/1.23</td><td>12/1.25</td><td>5.4/1.12</td></tr><tr><td rowspan="2">3D GS</td><td>Ours (No Refinement)</td><td>1.3/0.81</td><td>1.2/0.65</td><td>0.7/0.73</td><td>1.6/0.89</td><td>2.7/1.01</td><td>2.5/1.10</td><td>2.1/1.00</td><td>1.7/0.88</td></tr><tr><td>Ours</td><td>1.0/0.72</td><td>1.0/0.64</td><td>0.6/0.70</td><td>1.4/0.82</td><td>2.2/0.93</td><td>2.0/1.02</td><td>1.9/0.92</td><td>1.4/0.82</td></tr></table>
|
| 175 |
+
|
| 176 |
+
In Table 1, we provide the experimental localization results on 7 Scenes. with both scene-dependent and scene-independent categories using the median position and rotation errors.
|
| 177 |
+
|
| 178 |
+
Scene-dependent method comparison. In scene-independent setting, our results gain best accuracy on both mean position and orientation metrics. Among 7 scenes, our method obtains the best position on all 7 scenes and orientation on 5 scenes, demonstrating the state-of-the-art performance under scene-dependent situation. Additionally, our method achieves $1.21\text{cm} /0.61^{\circ}$ with the original ground truth, which also demonstrates superior accuracy compared to other methods evaluated.
|
| 179 |
+
|
| 180 |
+
Scene-independent method comparison. With scene-independent approaches, our method achieves the lowest mean position and orientation errors on all 7 scenes. Moreover, our method significantly reduces the position error from other best $2.6\mathrm{cm} / 0.98^{\circ}$ to $1.4\mathrm{cm} / 0.82^{\circ}(\downarrow 46.2\% /16.3\%)$ . Besides the localization accuracy, to our knowledge, our framework is the first scene-independent relocalization method with 3D GS.
|
| 181 |
+
|
| 182 |
+
Result visualization. Fig. 4 provides visualization results of the camera trajectories with green poses denoting the ground truth and blue ones representing our results. The results show minimal differences between the predicted and ground truth poses, demonstrating the suitability of our method.
|
| 183 |
+
|
| 184 |
+
# 4.3 Dynamic TUM RGB-D and Bonn
|
| 185 |
+
|
| 186 |
+
Challenges on TUM RGB-D and Bonn. The TUM RGB-D test sequences involve two individuals walking around a table increase the complexity of localization. Similarly, the Bonn dataset features highly dynamic sequences, such as individuals manipulating boxes or interacting with balloons.
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 4: The visualization results of camera pose on 7 Scenes. In each scene, the green and blue poses denote the ground truth and prediction respectively.
|
| 190 |
+
|
| 191 |
+
Table 2: RMSE of ATE [cm] results in four dynamic scenes of TUM RGB-D. In each scene, the red and blue marks represent the first and second respectively.
|
| 192 |
+
|
| 193 |
+
<table><tr><td rowspan="2"></td><td rowspan="2">Method</td><td colspan="4">fr3_walking_</td><td rowspan="2">Mean</td></tr><tr><td>xyz</td><td>static</td><td>ryp</td><td>half</td></tr><tr><td rowspan="2">Hand-crafted</td><td>ORB-SLAM2 Mur-Artal & Tardós (2017)</td><td>45.9</td><td>9.3</td><td>65.8</td><td>32.8</td><td>38.5</td></tr><tr><td>DGM-VINS Song et al. (2023)</td><td>3.6</td><td>1.3</td><td>7.1</td><td>3.3</td><td>3.8</td></tr><tr><td rowspan="3">Hand-crafted + Semantics</td><td>DynaSLAM Bescos et al. (2018)</td><td>1.5</td><td>0.6</td><td>3.5</td><td>2.5</td><td>2.0</td></tr><tr><td>DS-SLAM Yu et al. (2018)</td><td>2.5</td><td>0.8</td><td>44.4</td><td>3.0</td><td>12.7</td></tr><tr><td>LC-CRF SLAM Du et al. (2020)</td><td>1.6</td><td>1.1</td><td>4.6</td><td>2.8</td><td>2.5</td></tr><tr><td>3D GS</td><td>Ours</td><td>1.1</td><td>0.4</td><td>2.2</td><td>2.0</td><td>1.4</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Root Mean Square Error (RMSE) of Absolute Trajectory Error (ATE) on dynamic TUM RGB-D. Table 2 presents the RMSE of ATE for four dynamic scenes, compared against ORB-SLAM2, DynaSLAM, DS-SLAM, LC-CRF SLAM, and DGM-VINS. Our approach consistently outperforms these state-of-the-art SLAM systems in the RMSE of ATE metric, demonstrating superior localization accuracy in dynamic environments.
|
| 196 |
+
|
| 197 |
+
RMSE of ATE on dynamic Bonn. We evaluated our framework on the Bonn dataset across 20 test scenes, consistent with LC-CRF SLAM, and compared it with ReFusion Palazzolo et al. (2019), MaskFusion Runz et al. (2018), and LC-CRF SLAM Du et al. (2020). The mean RMSE of ATE results are $23.8\mathrm{cm}$ (ReFusion), $25.1\mathrm{cm}$ (MaskFusion), $6.8\mathrm{cm}$ (LC-CRF SLAM), and $4.3\mathrm{cm}$ (Ours). Our framework achieves the lowest mean RMSE of ATE, highlighting its exceptional localization accuracy in highly dynamic scenes.
|
| 198 |
+
|
| 199 |
+
# 4.4 Cambridge Landmarks
|
| 200 |
+
|
| 201 |
+
Table 3: Median localization results on Cambridge Landmarks compared with other methods. Units of position and orientation are centimeter (cm) and $^\circ$ . $\star$ means the result with a scene-independent setting. In each scene, the red and blue marks represent the first and second.
|
| 202 |
+
|
| 203 |
+
<table><tr><td></td><td>Method</td><td>College</td><td>Hospital</td><td>Shop</td><td>StMary</td><td>Mean</td></tr><tr><td rowspan="2">APR</td><td>MS-Transformer Shavit et al. (2021) (ICCV 2021)</td><td>85/1.45</td><td>175/2.43</td><td>88/3.20</td><td>166/4.12</td><td>129/2.80</td></tr><tr><td>DFNet Chen et al. (2022) (ECCV 2022)</td><td>73/2.37</td><td>200/2.98</td><td>67/2.21</td><td>137/4.02</td><td>119/2.90</td></tr><tr><td rowspan="5">SCR</td><td>InLoc Taira et al. (2018) (CVPR 2018)</td><td>46/0.8</td><td>48/1.0</td><td>11/0.5</td><td>18/0.6</td><td>31/0.73</td></tr><tr><td>DSAC* Brachmann & Rother (2021) (TPAMI)</td><td>15/0.3</td><td>21/0.4</td><td>5/0.3</td><td>13/0.4</td><td>14/0.35</td></tr><tr><td>ACE Brachmann et al. (2023) (CVPR 2023)</td><td>29/0.38</td><td>31/0.61</td><td>5/0.3</td><td>19/0.6</td><td>21/0.47</td></tr><tr><td>DUSt3R-224* Wang et al. (2024b) (CVPR 2024)</td><td>20/0.32</td><td>26/0.46</td><td>9/0.38</td><td>11/0.38</td><td>17/0.39</td></tr><tr><td>Reloc3R-224* Dong et al. (2025) (CVPR 2025)</td><td>47/0.41</td><td>87/0.66</td><td>18/0.53</td><td>41/0.73</td><td>48/0.58</td></tr><tr><td rowspan="4">NeRF</td><td>NeuMap Tang et al. (2023) (CVPR 2023)</td><td>14/0.2</td><td>19/0.4</td><td>6/0.3</td><td>17/0.5</td><td>14/0.35</td></tr><tr><td>CROSSFIRE Moreau et al. (2023) (ICCV 2023)</td><td>47/0.7</td><td>43/0.7</td><td>20/1.2</td><td>39/1.4</td><td>37/1.00</td></tr><tr><td>NeRFMatch Zhou et al. (2024b) (ECCV 2024)</td><td>12.7/0.2</td><td>20.7/0.4</td><td>8.7/0.4</td><td>11.3/0.4</td><td>0.13/0.35</td></tr><tr><td>PMNet Lin et al. (2024a) (ECCV 2024)</td><td>68/1.97</td><td>103/1.31</td><td>58/2.10</td><td>133/3.73</td><td>91/2.28</td></tr><tr><td rowspan="6">3D Gaussian</td><td>DFNet + GS-CPR Liu et al. (2025) (ICLR 2025)</td><td>26/0.34</td><td>48/0.72</td><td>10/0.36</td><td>27/0.62</td><td>28/0.51</td></tr><tr><td>ACE + GS-CPR Liu et al. (2025) (ICLR 2025)</td><td>25/0.29</td><td>26/0.38</td><td>5/0.23</td><td>13/0.41</td><td>17/33</td></tr><tr><td>STDLoc Huang et al. (2025) (CVPR 2025)</td><td>15/0.17</td><td>11.9/0.21</td><td>3/0.13</td><td>4.7/0.14</td><td>9/0.16</td></tr><tr><td>Ours (No Refinement)</td><td>11/0.19</td><td>13/0.26</td><td>4/0.18</td><td>7/0.15</td><td>9/0.20</td></tr><tr><td>Ours*</td><td>12/0.18</td><td>13/0.25</td><td>5/0.19</td><td>7/0.20</td><td>9/0.21</td></tr><tr><td>Ours</td><td>9/0.15</td><td>10/0.19</td><td>3/0.15</td><td>5/0.13</td><td>7/0.16</td></tr></table>
|
| 204 |
+
|
| 205 |
+
In Table 3, we provide the experimental localization results on Cambridge Landmarks in comparison with APR, SCR, NeRF and 3D GS methods. The results on Cambridge Landmarks demonstrate that our method gains the best performance on both mean position and orientation metrics. Compared
|
| 206 |
+
|
| 207 |
+
with the recent PMNet Lin et al. (2024a), the position error is significantly reduced from 91cm to 7cm, further validating the effectiveness of our framework.
|
| 208 |
+
|
| 209 |
+
Generalization on Cambridge Landmarks. To assess generalization, we trained GS-RelocNet on the MegaDepth dataset Li & Snavely (2018) and tested it on the Cambridge Landmarks dataset in a scene-independent setting (marked as $\dagger$ ). The mean pose error across four scenes is $9\mathrm{cm} / 0.21^{\circ}$ , surpassing the performance of DUS3R ( $17\mathrm{cm} / 0.39^{\circ}$ ) and Reloc3R ( $48\mathrm{cm} / 0.58^{\circ}$ ). These results underscore the robustness and generalization capability of GS-RelocNet in diverse, unseen environments.
|
| 210 |
+
|
| 211 |
+
Discussion of pose refinement. In Tables 1 and 3, we also present the localization performance without pose refinement. Two notable observations emerge from the results. First, pose refinement leads to improvements in localization accuracy across all scenes, demonstrating the positive impact of the refinement process using 3D Gaussians. Second, even without pose refinement, our method remains competitive with other state-of-the-art approaches. On 7 Scenes, the scene-dependent result is comparable with STDLoc, while the scene-independent performance is obviously more accurate than others. On Cambridge, the results are also comparable to those of STDLoc.
|
| 212 |
+
|
| 213 |
+
Discussion of running time. Our framework efficiency comprises initial pose estimation with confidence map regression and PnP, and pose refinement with view rendering, residual map regression and PnP. On average, it processes testing images at 65 ms (15.4 FPS) on an Nvidia 4090 GPU across 7 Scenes and Cambridge Landmarks. The average running times per frame are as follows, 39 ms for confidence regression, 4 ms for initial PnP with RANSAC, 9 ms for view rendering, 8 ms for residual coordinate regression, and 5 ms for PnP in pose refinement. This outperforms 3D GS based methods like ACE Brachmann et al. (2023) + GS-CPR Liu et al. (2025) (190ms, 5.3 FPS) and STDLoc Huang et al. (2025) (143ms, 7 FPS), highlighting our computational efficiency.
|
| 214 |
+
|
| 215 |
+
Discussion of comparison with 3G GS based methods. From Tables 1 and 3, we can see that localization performance of 3D GS based STDLoc and ACE + GS-CPR is slightly less accurate than ours, but also is competitive. Compared to these methods, the additional superiorities of our framework lies in two aspects. First, our method supports scene-independent relocalization, while STDLoc and GS-CPR requests training before localization in the target scene. Second, our inference speed is faster than the other two methods, achieving more than twice speed.
|
| 216 |
+
|
| 217 |
+
# 4.5 Detailed Studies
|
| 218 |
+
|
| 219 |
+
Table 4: Scene-independent localization results on 7 Scenes with original ground truth and Cambridge Landmarks with different settings.
|
| 220 |
+
|
| 221 |
+
<table><tr><td></td><td>Fusion in encoder</td><td>Fusion in decoder</td><td>Fusion in Refinement</td><td>7 Scenes</td><td>Cambridge Landmarks</td></tr><tr><td>S1</td><td>×</td><td>×</td><td>×</td><td>2.1/1.17</td><td>14/0.35</td></tr><tr><td>S2</td><td>✓</td><td>×</td><td>×</td><td>1.9/1.14</td><td>13/0.32</td></tr><tr><td>S3</td><td>×</td><td>✓</td><td>×</td><td>1.9/1.11</td><td>12/0.36</td></tr><tr><td>S4</td><td>×</td><td>×</td><td>✓</td><td>2.0/1.03</td><td>13/0.35</td></tr><tr><td>S5</td><td>×</td><td>✓</td><td>✓</td><td>1.6/0.97</td><td>11/0.28</td></tr><tr><td>S6</td><td>✓</td><td>×</td><td>✓</td><td>1.7/0.92</td><td>10/0.26</td></tr><tr><td>S7</td><td>✓</td><td>✓</td><td>×</td><td>1.5/0.84</td><td>9/0.23</td></tr><tr><td>S8</td><td>✓</td><td>✓</td><td>✓</td><td>1.4/0.82</td><td>9/0.21</td></tr><tr><td></td><td colspan="3">Iterations of fusion model in GS-RelocNet</td><td></td><td></td></tr><tr><td>S9</td><td></td><td>1</td><td></td><td>1.7/0.92</td><td>12/0.29</td></tr><tr><td>S10</td><td></td><td>2</td><td></td><td>1.4/0.82</td><td>9/0.21</td></tr><tr><td>S11</td><td></td><td>4</td><td></td><td>1.4/0.86</td><td>10/0.22</td></tr><tr><td>S12</td><td></td><td>8</td><td></td><td>1.5/0.87</td><td>11/0.24</td></tr></table>
|
| 222 |
+
|
| 223 |
+
Discussion of outstanding performance. The results of the aforementioned experiments demonstrate state-of-the-art performance of our framework. We attribute our outstanding performance to three main factors. First, our framework leverages the full potential of 3D GS for both initial pose estimation and pose refinement. Second, GS-RelocNet is specifically designed to establish accurate correspondences between pixels and 3D Gaussians. Third, we propose a refinement network that predicts the relative pose between real and rendered images using a bidirectional feature fusion module. In the following sections, we present ablation experiments to demonstrate the effectiveness of these fusion modules.
|
| 224 |
+
|
| 225 |
+
Ablation studies of fusion modules. In S1 - S8 of Table 4, we conduct ablation experiments of the three fusion modules, including the unidirectional feature fusion module in encoder, decoder of
|
| 226 |
+
|
| 227 |
+
GS-RelocNet and the bidirectional fusion module in the refinement network. In S5, S6, and S7, each module is removed individually. With this setting, the two features are reshaped and concatenated directly. It is evident that accuracy across all three datasets decreases in comparison to setting S8. In S2, S3, and S4, two modules are removed individually, and the results are less accurate than those in settings S5, S6, and S7. In setting S1, where all three modules are removed, the accuracy decreases most significantly, further confirming the positive impact of the three fusion modules.
|
| 228 |
+
|
| 229 |
+
Detailed studies of the iterations in the unidirectional feature fusion module. In S9 - S12 of Table 4, we explore the effect of the number of iterations in the unidirectional feature fusion module by setting the iteration values to 1, 2, 4, and 8, respectively. When the iteration is set to 1, the result demonstrates that the estimation performance is less accurate than the other 3 settings. Moreover, the results for iterations set to 2, 4, and 8 are comparable. This discrepancy can be explained by the fact that with only one iteration, the RGB and point cloud features are not sufficiently fused, resulting in less accurate estimates. In contrast, with 2, 4, or 8 iterations, the fusion between the two feature types is sufficiently accomplished for RGB based pose estimation, leading to comparable performance.
|
| 230 |
+
|
| 231 |
+
Limitation discussion. A primary limitation of our framework is its reliance on a high-quality 3D GS model of the target scene. When the 3D GS model is of suboptimal quality, localization performance may degrade, leading to failures or significant errors.
|
| 232 |
+
|
| 233 |
+
# 4.6 AR Application
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Figure 5: AR effect on the Chess and Office scene of 7 Scenes. We render a virtual walking person and a bottle onto the Chess scene, and two virtual bottles and a static person onto the Office scene based on the predicted camera pose of our framework.
|
| 237 |
+
|
| 238 |
+
To demonstrate the performance in real-world AR applications, we present the virtual-real fusion results for two scenes using the predicted pose by our framework, the Chess and Office scenes from the 7 Scenes dataset, as shown in Fig. 5. Specifically, we render a virtual walking person and a bottle onto the Chess scene, and two virtual bottles and a static person onto the Office scene.
|
| 239 |
+
|
| 240 |
+
# 5 Conclusion
|
| 241 |
+
|
| 242 |
+
In this paper, we propose a novel 3D Gaussian based camera relocalization framework, composed of of two stages, an initial pose estimation stage, which predicts 2D-3D correspondences between image pixels and 3D Gaussians, and a refinement stage, which estimates the residual pose between the target and rendered views. To estimate the 2D-3D correspondences, we introduce a descriptor matching network called GS-RelocNet. Within GS-RelocNet, we design a unidirectional feature fusion model to combine RGB features with Gaussian features. After obtaining the initial camera pose, we proceed with the pose refinement network. In this refinement network, we propose a bidirectional feature fusion model to merge the features from the real and rendered images. To validate the performance of our framework, we conduct experiments on both indoor 7 Scenes, TUM RGB-D, Bonn and outdoor Cambridge Landmarks datasets. The results demonstrate state-of-the-art localization accuracy on both datasets. Additionally, we provide detailed studies on the feature fusion modules and the refinement stage, further highlighting the effectiveness of our approach.
|
| 243 |
+
|
| 244 |
+
In summary, this paper makes a significant contribution by introducing a scene-independent localization framework through the full utilization of 3D Gaussians. By leveraging 3D Gaussians in both the initial and refinement stages, our method is able to deliver more accurate localization results across a variety of scenes. We hope that the proposed framework could be a universal localization pipeline.
|
| 245 |
+
|
| 246 |
+
# Acknowledgments
|
| 247 |
+
|
| 248 |
+
The paper is supported by Shandong Provincial Natural Science Foundation (No. ZR2024QF215), Key Research and Development Program of Rizhao (No. 2024ZDYF010053), National Natural Science Foundation of China (No. 62072020) and the Open Project Program of State Key Laboratory of Virtual Reality Technology and Systems, Beihang University (No. VRLAB2024A**).
|
| 249 |
+
|
| 250 |
+
The authors thank the Zhiyang Innovation Technology Co., Ltd. for computing power and data support.
|
| 251 |
+
|
| 252 |
+
# References
|
| 253 |
+
|
| 254 |
+
Balntas, V., Li, S., and Prisacariu, V. Relocnet: Continuous metric learning relocalisation using neural nets. In European Conference on Computer Vision, pp. 751-767, 2018.
|
| 255 |
+
Bescos, B., Fácil, J. M., Civera, J., and Neira, J. Dynaslam: Tracking, mapping, and inpainting in dynamic scenes. IEEE Robotics and Automation Letters, 3(4):4076-4083, 2018.
|
| 256 |
+
Brachmann, E. and Rother, C. Learning less is more-6d camera localization via 3d surface regression. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 4654-4662, 2018.
|
| 257 |
+
Brachmann, E. and Rother, C. Visual camera re-localization from rgb and rgb-d images using dsac. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):5847-5865, 2021.
|
| 258 |
+
Brachmann, E., Cavallari, T., and Prisacariu, V. A. Accelerated coordinate encoding: Learning to relocalize in minutes using rgb and poses. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5044-5053, 2023.
|
| 259 |
+
Chen, S., Li, X., Wang, Z., and Prisacariu, V. A. Dfnet: Enhance absolute pose regression with direct feature matching. In European Conference on Computer Vision, pp. 1-17. Springer, 2022.
|
| 260 |
+
Chen, S., Bhalgat, Y., Li, X., Bian, J.-W., Li, K., Wang, Z., and Prisacariu, V. A. Neural refinement for absolute pose regression with feature synthesis. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20987-20996, 2024a.
|
| 261 |
+
Chen, S., Cavallari, T., Prisacariu, V. A., and Brachmann, E. Map-relative pose regression for visual relocalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20665-20674, 2024b.
|
| 262 |
+
Dai, A., Chang, A. X., Savva, M., Halber, M., Funkhouser, T., and Nießner, M. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 5828-5839, 2017.
|
| 263 |
+
Dong, S., Wang, S., Liu, S., Cai, L., Fan, Q., Kannala, J., and Yang, Y. Reloc3r: Large-scale training of relative camera pose regression for generalizable, fast, and accurate visual localization. In IEEE Computer Vision and Pattern Recognition Conference, pp. 16739-16752, 2025.
|
| 264 |
+
Du, Z.-J., Huang, S.-S., Mu, T.-J., Zhao, Q., Martin, R. R., and Xu, K. Accurate dynamic slam using crf-based long-term consistency. IEEE Transactions on Visualization and Computer Graphics, 28(4):1745-1757, 2020.
|
| 265 |
+
Ferens, R. and Keller, Y. Hyperpose: Hypernetwork-infused camera pose localization and an extended cambridge landmarks dataset. arXiv preprint arXiv:2303.02610, 2025.
|
| 266 |
+
Germain, H., DeTone, D., Pascoe, G., Schmidt, T., Novotny, D., Newcombe, R., Sweeney, C., Szeliski, R., and Balntas, V. Feature query networks: Neural surface description for camera pose refinement. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5071-5081, 2022.
|
| 267 |
+
Giang, K. T., Song, S., and Jo, S. Learning to produce semi-dense correspondences for visual localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 19468-19478, 2024.
|
| 268 |
+
Hu, J., Mao, M., Bao, H., Zhang, G., and Cui, Z. Cp-slam: Collaborative neural point-based slam system. Advances in Neural Information Processing Systems, 36:39429-39442, 2023.
|
| 269 |
+
Huang, Z., Yu, H., Shentu, Y., Yuan, J., and Zhang, G. From sparse to dense: Camera relocalization with scene-specific detector from feature gaussian splatting. In IEEE Computer Vision and Pattern Recognition Conference, pp. 27059-27069, 2025.
|
| 270 |
+
|
| 271 |
+
Jia, P., Liu, Y., Li, X., Zhao, X., Wang, Y., Du, Y., Han, X., Wei, X., Wang, S., and Yin, D. G3: an effective and adaptive framework for worldwide geolocation using large multi-modality models. Advances in Neural Information Processing Systems, 37:53198-53221, 2024.
|
| 272 |
+
Keetha, N., Karhade, J., Jatavallabhula, K. M., Yang, G., Scherer, S., Ramanan, D., and Luiten, J. Splatam: Splat track & map 3d gaussians for dense rgb-d slam. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21357-21366, 2024.
|
| 273 |
+
Kendall, A. and Cipolla, R. Geometric loss functions for camera pose regression with deep learning. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 5974-5983, 2017.
|
| 274 |
+
Kendall, A., Grimes, M., and Cipolla, R. Posenet: A convolutional network for real-time 6-dof camera relocalization. In IEEE International Conference on Computer Vision, pp. 2938-2946, 2015.
|
| 275 |
+
Kerbl, B., Kopanas, G., Leimkuhler, T., and Drettakis, G. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics, 42(4):139-1, 2023.
|
| 276 |
+
Kulhanek, J., Peng, S., Kukelova, Z., Pollefeys, M., and Sattler, T. Wildgaussians: 3d gaussian splatting in the wild. arXiv preprint arXiv:2407.08447, 2024.
|
| 277 |
+
Laskar, Z., Melekhov, I., Kalia, S., and Kannala, J. Camera relocalization by computing pairwise relative poses using convolutional neural network. In IEEE International Conference on Computer Vision, pp. 929-938, 2017.
|
| 278 |
+
Leroy, V., Cabon, Y., and Revaud, J. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pp. 71-91, 2024.
|
| 279 |
+
Li, Z. and Snavely, N. Megadepth: Learning single-view depth prediction from internet photos. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 2041-2050, 2018.
|
| 280 |
+
Lin, J., Gu, J., Wu, B., Fan, L., Chen, R., Liu, L., and Ye, J. Learning neural volumetric pose features for camera localization. In European Conference on Computer Vision, pp. 198-214. Springer, 2024a.
|
| 281 |
+
Lin, X., Ruan, J., Yang, Y., He, L., Guan, Y., and Zhang, H. Robust data association against detection deficiency for semantic slam. IEEE Transactions on Automation Science and Engineering, 21(1):868-880, 2024b.
|
| 282 |
+
Liu, C., Chen, S., Bhalgat, Y., Hu, S., Cheng, M., Wang, Z., Prisacariu, V. A., and Braud, T. Gsloc: Efficient camera pose refinement via 3d gaussian splatting. arXiv preprint arXiv:2408.11085, 2024a.
|
| 283 |
+
Liu, C., Chen, S., Zhao, Y., Huang, H., Prisacariu, V., and Braud, T. Hr-apr: Apr-agnostic framework with uncertainty estimation and hierarchical refinement for camera relocalisation. arXiv preprint arXiv:2402.14371, 2024b.
|
| 284 |
+
Liu, C., Chen, S., Bhalgat, Y. S., Hu, S., Cheng, M., Wang, Z., Prisacariu, V. A., and Braud, T. Gs-cpr: Efficient camera pose refinement via 3d gaussian splattering. In International Conference on Learning Representations, 2025.
|
| 285 |
+
Liu, L., Li, H., and Dai, Y. Efficient global 2d-3d matching for camera localization in a large-scale 3d map. In IEEE International Conference on Computer Vision, pp. 2372-2381, 2017.
|
| 286 |
+
Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE/CVF International Conference on Computer Vision, pp. 10012-10022, 2021.
|
| 287 |
+
Mildenhall, B., Srinivasan, P. P., Tancik, M., Barron, J. T., Ramamoorthi, R., and Ng, R. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021.
|
| 288 |
+
Moreau, A., Piasco, N., Bennehar, M., Tsishkou, D., Stanciulescu, B., and de La Fortelle, A. Crossfire: Camera relocalization on self-supervised features from an implicit representation. In IEEE International Conference on Computer Vision, pp. 252-262, 2023.
|
| 289 |
+
Mur-Artal, R. and Tardós, J. D. Orb-slam2: An open-source slam system for monocular, stereo, and rgb-d cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017.
|
| 290 |
+
Oquab, M., Darcet, T., Moutakanni, T., Vo, H. V., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Howes, R., Huang, P.-Y., Xu, H., Sharma, V., Li, S.-W., Galuba, W., Rabbat, M., Assran, M., Ballas, N., Synnaeve, G., Misra, I., Jegou, H., Mairal, J., Labatut, P., Joulin, A., and Bojanowski, P. Dinov2: Learning robust visual features without supervision, 2023.
|
| 291 |
+
|
| 292 |
+
Palazzolo, E., Behley, J., Lottes, P., Giguere, P., and Stachniss, C. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 7855-7862, 2019.
|
| 293 |
+
Runz, M., Buffier, M., and Agapito, L. Maskfusion: Real-time recognition, tracking and reconstruction of multiple moving objects. In IEEE International Symposium on Mixed and Augmented Reality, pp. 10-20. IEEE, 2018.
|
| 294 |
+
Sarlin, P.-E., Unagar, A., Larsson, M., Germain, H., Toft, C., Larsson, V., Pollefeys, M., Lepetit, V., Hammarstrand, L., Kahl, F., et al. Back to the feature: Learning robust camera localization from pixels to pose. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3247-3257, 2021.
|
| 295 |
+
Sattler, T., Leibe, B., and Kobbelt, L. Efficient & effective prioritized matching for large-scale image-based localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1744-1756, 2016.
|
| 296 |
+
Sattler, T., Leibe, B., and Kobbelt, L. Efficient & effective prioritized matching for large-scale image-based localization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(9):1744-1756, 2017.
|
| 297 |
+
Shavit, Y., Ferens, R., and Keller, Y. Learning multi-scene absolute pose regression with transformers. In IEEE/CVF International Conference on Computer Vision, pp. 2733-2742, 2021.
|
| 298 |
+
Shotton, J., Glocker, B., Zach, C., Izadi, S., Criminisi, A., and Fitzgibbon, A. Scene coordinate regression forests for camera relocalization in rgb-d images. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 2930-2937, 2013.
|
| 299 |
+
Song, B., Yuan, X., Ying, Z., Yang, B., Song, Y., and Zhou, F. Dgm-vins: Visual-inertial slam for complex dynamic environments with joint geometry feature extraction and multiple object tracking. IEEE Transactions on Instrumentation and Measurement, 72:1-11, 2023.
|
| 300 |
+
Sturm, J., Engelhard, N., Endres, F., Burgard, W., and Cremers, D. A benchmark for the evaluation of rgb-d slam systems. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 573-580, 2012.
|
| 301 |
+
Sun, J., Shen, Z., Wang, Y., Bao, H., and Zhou, X. Loftr: Detector-free local feature matching with transformers. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8922-8931, 2021.
|
| 302 |
+
Taira, H., Okutomi, M., Sattler, T., Cimpoi, M., Pollefeys, M., Sivic, J., Pajdla, T., and Torii, A. Inloc: Indoor visual localization with dense matching and view synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 7199-7209, 2018.
|
| 303 |
+
Tang, S., Tang, S., Tagliasacchi, A., Tan, P., and Furukawa, Y. Neumap: Neural coordinate mapping by auto-transdecoder for camera localization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 929-939, 2023.
|
| 304 |
+
Valentin, J., Dai, A., Nießner, M., Kohli, P., Torr, P., Izadi, S., and Keskin, C. Learning to navigate the energy landscape. In International Conference on 3D Vision, pp. 323-332, 2016.
|
| 305 |
+
Wang, B., Chen, C., Lu, C. X., Zhao, P., Trigoni, N., and Markham, A. Atloc: Attention guided camera localization. In AAAI Conference on Artificial Intelligence, volume 34, pp. 10393-10401, 2020.
|
| 306 |
+
Wang, F., Jiang, X., Galliani, S., Vogel, C., and Pollefeys, M. Glace: Global local accelerated coordinate encoding. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21562-21571, 2024a.
|
| 307 |
+
Wang, J. and Qi, Y. Camera relocalization using deep point cloud generation and hand-crafted feature refinement. In IEEE International Conference on Robotics and Automation, pp. 5891-5897, 2021.
|
| 308 |
+
Wang, J. and Qi, Y. Scene-independent localization by learning residual coordinate map with cascaded localizers. In IEEE International Symposium on Mixed and Augmented Reality, pp. 79-88, 2023a.
|
| 309 |
+
Wang, J. and Qi, Y. Simultaneous scene-independent camera localization and category-level object pose estimation via multi-level feature fusion. In IEEE Conference Virtual Reality and 3D User Interfaces, pp. 254-264. IEEE, 2023b.
|
| 310 |
+
Wang, Q., Zhang, J., Yang, K., Peng, K., and Stiefelhagen, R. Matchformer: Interleaving attention in transformers for feature matching. In Asian Conference on Computer Vision, pp. 2746-2762, 2022.
|
| 311 |
+
Wang, S., Leroy, V., Cabon, Y., Chidlovskii, B., and Revaud, J. Dust3r: Geometric 3d vision made easy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20697-20709, 2024b.
|
| 312 |
+
Wang, T., Sheng, H., Chen, R., Yang, D., Cui, Z., Wang, S., Cong, R., and Zhao, M. Light field depth estimation: A comprehensive survey from principles to future. High-Confidence Computing, 4(1):100187, 2024c.
|
| 313 |
+
|
| 314 |
+
Wang, Y., Yan, Y., Shi, D., Zhu, W., Xia, J., Jeff, T., Jin, S., Gao, K., Li, X., and Yang, X. Nerf-ibvs: visual servo based on nerf for visual localization and navigation. Advances in Neural Information Processing Systems, 36: 8292-8304, 2023.
|
| 315 |
+
Wang, Y., He, X., Peng, S., Tan, D., and Zhou, X. Efficient loftr: Semi-dense local feature matching with sparse-like speed. arXiv preprint arXiv:2403.04765, 2024d.
|
| 316 |
+
Wu, X., Jiang, L., Wang, P.-S., Liu, Z., Liu, X., Qiao, Y., Ouyang, W., He, T., and Zhao, H. Point transformer v3: Simpler faster stronger. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4840-4851, 2024.
|
| 317 |
+
Xi, J., Zhang, W., Xu, Z., Zhu, S., Tang, L., and Zhao, L. Three-dimensional dynamic gesture recognition method based on convolutional neural network. High-Confidence Computing, 5(1):100280, 2025.
|
| 318 |
+
Xu, Y., Jiang, H., Xiao, Z., Feng, J., and Zhang, L. Dg-slam: Robust dynamic gaussian splattering slam with hybrid pose optimization. Advances in Neural Information Processing Systems, 37:51577-51596, 2024.
|
| 319 |
+
Yan, C., Qu, D., Xu, D., Zhao, B., Wang, Z., Wang, D., and Li, X. Gs-slam: Dense visual slam with 3d gaussian splatting. In EEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 19595-19604, 2024.
|
| 320 |
+
Yang, S. and Scherer, S. Cubeslam: Monocular 3-d object slam. IEEE Transactions on Robotics, 35(4):925-938, 2019.
|
| 321 |
+
You, M., Luo, C., Zhou, H., and Zhu, S. Dynamic dense crf inference for video segmentation and semantic slam. Pattern Recognition, 133:109023, 2023.
|
| 322 |
+
Yu, C., Liu, Z., Liu, X.-J., Xie, F., Yang, Y., Wei, Q., and Fei, Q. Ds-slam: A semantic visual slam towards dynamic environments. In IEEE/RSJ International Conference on Intelligent Robots and Systems, pp. 1168-1174, 2018.
|
| 323 |
+
Zhang, D., Wang, C., Wang, W., Li, P., Qin, M., and Wang, H. Gaussian in the wild: 3d gaussian splatting for unconstrained image collections. In European Conference on Computer Vision, pp. 341-359, 2024.
|
| 324 |
+
Zhang, J., Peng, W., Xiao, A., Liu, T., Fu, J., Chen, J., and Yan, Z. Kans-detr: Enhancing detection transformer with kolmogorov-arnold networks for small object. High-Confidence Computing, pp. 100336, 2025.
|
| 325 |
+
Zhou, L., Luo, Z., Shen, T., Zhang, J., Zhen, M., Yao, Y., Fang, T., and Quan, L. Kfnet: Learning temporal camera relocalization using kalman filtering. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4919-4928, 2020.
|
| 326 |
+
Zhou, Q., Maximov, M., Litany, O., and Leal-Taixe, L. The nefrect match: Exploring nef features for visual localization. In European Conference on Computer Vision, pp. 108-127, 2024a.
|
| 327 |
+
Zhou, Q., Maximov, M., Litany, O., and Leal-Taixe, L. The nefrect match: Exploring nef features for visual localization. In European Conference on Computer Vision, pp. 108-127. Springer, 2024b.
|
| 328 |
+
Zhu, J., Yan, S., Wang, L., Zhang, S., Liu, Y., and Zhang, M. Lod-loc: Aerial visual localization using lod 3d map with neural wireframe alignment. Advances in Neural Information Processing Systems, 37:119063-119098, 2024.
|
| 329 |
+
|
| 330 |
+
# NeurIPS Paper Checklist
|
| 331 |
+
|
| 332 |
+
# 1. Claims
|
| 333 |
+
|
| 334 |
+
Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
|
| 335 |
+
|
| 336 |
+
Answer: [Yes]
|
| 337 |
+
|
| 338 |
+
Justification: [Yes]
|
| 339 |
+
|
| 340 |
+
Guidelines:
|
| 341 |
+
|
| 342 |
+
- The answer NA means that the abstract and introduction do not include the claims made in the paper.
|
| 343 |
+
- The abstract and/or introduction should clearly state the claims made, including the contributions made in the paper and important assumptions and limitations. A No or NA answer to this question will not be perceived well by the reviewers.
|
| 344 |
+
- The claims made should match theoretical and experimental results, and reflect how much the results can be expected to generalize to other settings.
|
| 345 |
+
- It is fine to include aspirational goals as motivation as long as it is clear that these goals are not attained by the paper.
|
| 346 |
+
|
| 347 |
+
# 2. Limitations
|
| 348 |
+
|
| 349 |
+
Question: Does the paper discuss the limitations of the work performed by the authors?
|
| 350 |
+
|
| 351 |
+
Answer: [Yes]
|
| 352 |
+
|
| 353 |
+
Justification: [Yes]
|
| 354 |
+
|
| 355 |
+
Guidelines:
|
| 356 |
+
|
| 357 |
+
- The answer NA means that the paper has no limitation while the answer No means that the paper has limitations, but those are not discussed in the paper.
|
| 358 |
+
- The authors are encouraged to create a separate "Limitations" section in their paper.
|
| 359 |
+
- The paper should point out any strong assumptions and how robust the results are to violations of these assumptions (e.g., independence assumptions, noiseless settings, model well-specification, asymptotic approximations only holding locally). The authors should reflect on how these assumptions might be violated in practice and what the implications would be.
|
| 360 |
+
- The authors should reflect on the scope of the claims made, e.g., if the approach was only tested on a few datasets or with a few runs. In general, empirical results often depend on implicit assumptions, which should be articulated.
|
| 361 |
+
- The authors should reflect on the factors that influence the performance of the approach. For example, a facial recognition algorithm may perform poorly when image resolution is low or images are taken in low lighting. Or a speech-to-text system might not be used reliably to provide closed captions for online lectures because it fails to handle technical jargon.
|
| 362 |
+
- The authors should discuss the computational efficiency of the proposed algorithms and how they scale with dataset size.
|
| 363 |
+
- If applicable, the authors should discuss possible limitations of their approach to address problems of privacy and fairness.
|
| 364 |
+
- While the authors might fear that complete honesty about limitations might be used by reviewers as grounds for rejection, a worse outcome might be that reviewers discover limitations that aren't acknowledged in the paper. The authors should use their best judgment and recognize that individual actions in favor of transparency play an important role in developing norms that preserve the integrity of the community. Reviewers will be specifically instructed to not penalize honesty concerning limitations.
|
| 365 |
+
|
| 366 |
+
# 3. Theory assumptions and proofs
|
| 367 |
+
|
| 368 |
+
Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?
|
| 369 |
+
|
| 370 |
+
Answer: [NA]
|
| 371 |
+
|
| 372 |
+
# Justification: [NA]
|
| 373 |
+
|
| 374 |
+
# Guidelines:
|
| 375 |
+
|
| 376 |
+
- The answer NA means that the paper does not include theoretical results.
|
| 377 |
+
- All the theorems, formulas, and proofs in the paper should be numbered and cross-referenced.
|
| 378 |
+
- All assumptions should be clearly stated or referenced in the statement of any theorems.
|
| 379 |
+
- The proofs can either appear in the main paper or the supplemental material, but if they appear in the supplemental material, the authors are encouraged to provide a short proof sketch to provide intuition.
|
| 380 |
+
- Inversely, any informal proof provided in the core of the paper should be complemented by formal proofs provided in appendix or supplemental material.
|
| 381 |
+
- Theorems and Lemmas that the proof relies upon should be properly referenced.
|
| 382 |
+
|
| 383 |
+
# 4. Experimental result reproducibility
|
| 384 |
+
|
| 385 |
+
Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?
|
| 386 |
+
|
| 387 |
+
# Answer: [Yes]
|
| 388 |
+
|
| 389 |
+
Justification: [Yes]
|
| 390 |
+
|
| 391 |
+
# Guidelines:
|
| 392 |
+
|
| 393 |
+
- The answer NA means that the paper does not include experiments.
|
| 394 |
+
- If the paper includes experiments, a No answer to this question will not be perceived well by the reviewers: Making the paper reproducible is important, regardless of whether the code and data are provided or not.
|
| 395 |
+
- If the contribution is a dataset and/or model, the authors should describe the steps taken to make their results reproducible or verifiable.
|
| 396 |
+
- Depending on the contribution, reproducibility can be accomplished in various ways. For example, if the contribution is a novel architecture, describing the architecture fully might suffice, or if the contribution is a specific model and empirical evaluation, it may be necessary to either make it possible for others to replicate the model with the same dataset, or provide access to the model. In general, releasing code and data is often one good way to accomplish this, but reproducibility can also be provided via detailed instructions for how to replicate the results, access to a hosted model (e.g., in the case of a large language model), releasing of a model checkpoint, or other means that are appropriate to the research performed.
|
| 397 |
+
- While NeurIPS does not require releasing code, the conference does require all submissions to provide some reasonable avenue for reproducibility, which may depend on the nature of the contribution. For example
|
| 398 |
+
(a) If the contribution is primarily a new algorithm, the paper should make it clear how to reproduce that algorithm.
|
| 399 |
+
(b) If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully.
|
| 400 |
+
(c) If the contribution is a new model (e.g., a large language model), then there should either be a way to access this model for reproducing the results or a way to reproduce the model (e.g., with an open-source dataset or instructions for how to construct the dataset).
|
| 401 |
+
(d) We recognize that reproducibility may be tricky in some cases, in which case authors are welcome to describe the particular way they provide for reproducibility. In the case of closed-source models, it may be that access to the model is limited in some way (e.g., to registered users), but it should be possible for other researchers to have some path to reproducing or verifying the results.
|
| 402 |
+
|
| 403 |
+
# 5. Open access to data and code
|
| 404 |
+
|
| 405 |
+
Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?
|
| 406 |
+
|
| 407 |
+
Answer: [Yes]
|
| 408 |
+
|
| 409 |
+
Justification: [Yes]
|
| 410 |
+
|
| 411 |
+
Guidelines:
|
| 412 |
+
|
| 413 |
+
- The answer NA means that paper does not include experiments requiring code.
|
| 414 |
+
- Please see the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 415 |
+
- While we encourage the release of code and data, we understand that this might not be possible, so "No" is an acceptable answer. Papers cannot be rejected simply for not including code, unless this is central to the contribution (e.g., for a new open-source benchmark).
|
| 416 |
+
- The instructions should contain the exact command and environment needed to run to reproduce the results. See the NeurIPS code and data submission guidelines (https://nips.cc/public/guides/CodeSubmissionPolicy) for more details.
|
| 417 |
+
- The authors should provide instructions on data access and preparation, including how to access the raw data, preprocessed data, intermediate data, and generated data, etc.
|
| 418 |
+
- The authors should provide scripts to reproduce all experimental results for the new proposed method and baselines. If only a subset of experiments are reproducible, they should state which ones are omitted from the script and why.
|
| 419 |
+
- At submission time, to preserve anonymity, the authors should release anonymized versions (if applicable).
|
| 420 |
+
- Providing as much information as possible in supplemental material (appended to the paper) is recommended, but including URLs to data and code is permitted.
|
| 421 |
+
|
| 422 |
+
# 6. Experimental setting/details
|
| 423 |
+
|
| 424 |
+
Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer, etc.) necessary to understand the results?
|
| 425 |
+
|
| 426 |
+
Answer: [Yes]
|
| 427 |
+
|
| 428 |
+
Justification: [Yes]
|
| 429 |
+
|
| 430 |
+
Guidelines:
|
| 431 |
+
|
| 432 |
+
- The answer NA means that the paper does not include experiments.
|
| 433 |
+
- The experimental setting should be presented in the core of the paper to a level of detail that is necessary to appreciate the results and make sense of them.
|
| 434 |
+
- The full details can be provided either with the code, in appendix, or as supplemental material.
|
| 435 |
+
|
| 436 |
+
# 7. Experiment statistical significance
|
| 437 |
+
|
| 438 |
+
Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?
|
| 439 |
+
|
| 440 |
+
Answer: [Yes]
|
| 441 |
+
|
| 442 |
+
Justification: [Yes]
|
| 443 |
+
|
| 444 |
+
Guidelines:
|
| 445 |
+
|
| 446 |
+
- The answer NA means that the paper does not include experiments.
|
| 447 |
+
- The authors should answer "Yes" if the results are accompanied by error bars, confidence intervals, or statistical significance tests, at least for the experiments that support the main claims of the paper.
|
| 448 |
+
- The factors of variability that the error bars are capturing should be clearly stated (for example, train/test split, initialization, random drawing of some parameter, or overall run with given experimental conditions).
|
| 449 |
+
- The method for calculating the error bars should be explained (closed form formula, call to a library function, bootstrap, etc.)
|
| 450 |
+
- The assumptions made should be given (e.g., Normally distributed errors).
|
| 451 |
+
- It should be clear whether the error bar is the standard deviation or the standard error of the mean.
|
| 452 |
+
|
| 453 |
+
- It is OK to report 1-sigma error bars, but one should state it. The authors should preferably report a 2-sigma error bar than state that they have a $96\%$ CI, if the hypothesis of Normality of errors is not verified.
|
| 454 |
+
- For asymmetric distributions, the authors should be careful not to show in tables or figures symmetric error bars that would yield results that are out of range (e.g. negative error rates).
|
| 455 |
+
- If error bars are reported in tables or plots, The authors should explain in the text how they were calculated and reference the corresponding figures or tables in the text.
|
| 456 |
+
|
| 457 |
+
# 8. Experiments compute resources
|
| 458 |
+
|
| 459 |
+
Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?
|
| 460 |
+
|
| 461 |
+
Answer: [Yes]
|
| 462 |
+
|
| 463 |
+
Justification: [Yes]
|
| 464 |
+
|
| 465 |
+
Guidelines:
|
| 466 |
+
|
| 467 |
+
- The answer NA means that the paper does not include experiments.
|
| 468 |
+
- The paper should indicate the type of compute workers CPU or GPU, internal cluster, or cloud provider, including relevant memory and storage.
|
| 469 |
+
- The paper should provide the amount of compute required for each of the individual experimental runs as well as estimate the total compute.
|
| 470 |
+
- The paper should disclose whether the full research project required more computer than the experiments reported in the paper (e.g., preliminary or failed experiments that didn't make it into the paper).
|
| 471 |
+
|
| 472 |
+
# 9. Code of ethics
|
| 473 |
+
|
| 474 |
+
Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics https://neurips.cc/public/EthicsGuidelines?
|
| 475 |
+
|
| 476 |
+
Answer: [Yes]
|
| 477 |
+
|
| 478 |
+
Justification: [Yes]
|
| 479 |
+
|
| 480 |
+
Guidelines:
|
| 481 |
+
|
| 482 |
+
- The answer NA means that the authors have not reviewed the NeurIPS Code of Ethics.
|
| 483 |
+
- If the authors answer No, they should explain the special circumstances that require a deviation from the Code of Ethics.
|
| 484 |
+
- The authors should make sure to preserve anonymity (e.g., if there is a special consideration due to laws or regulations in their jurisdiction).
|
| 485 |
+
|
| 486 |
+
# 10. Broader impacts
|
| 487 |
+
|
| 488 |
+
Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?
|
| 489 |
+
|
| 490 |
+
Answer: [NA]
|
| 491 |
+
|
| 492 |
+
Justification: [NA]
|
| 493 |
+
|
| 494 |
+
Guidelines:
|
| 495 |
+
|
| 496 |
+
- The answer NA means that there is no societal impact of the work performed.
|
| 497 |
+
- If the authors answer NA or No, they should explain why their work has no societal impact or why the paper does not address societal impact.
|
| 498 |
+
- Examples of negative societal impacts include potential malicious or unintended uses (e.g., disinformation, generating fake profiles, surveillance), fairness considerations (e.g., deployment of technologies that could make decisions that unfairly impact specific groups), privacy considerations, and security considerations.
|
| 499 |
+
- The conference expects that many papers will be foundational research and not tied to particular applications, let alone deployments. However, if there is a direct path to any negative applications, the authors should point it out. For example, it is legitimate to point out that an improvement in the quality of generative models could be used to
|
| 500 |
+
|
| 501 |
+
generate deepfakes for disinformation. On the other hand, it is not needed to point out that a generic algorithm for optimizing neural networks could enable people to train models that generate Deepfakes faster.
|
| 502 |
+
|
| 503 |
+
- The authors should consider possible harms that could arise when the technology is being used as intended and functioning correctly, harms that could arise when the technology is being used as intended but gives incorrect results, and harms following from (intentional or unintentional) misuse of the technology.
|
| 504 |
+
- If there are negative societal impacts, the authors could also discuss possible mitigation strategies (e.g., gated release of models, providing defenses in addition to attacks, mechanisms for monitoring misuse, mechanisms to monitor how a system learns from feedback over time, improving the efficiency and accessibility of ML).
|
| 505 |
+
|
| 506 |
+
# 11. Safeguards
|
| 507 |
+
|
| 508 |
+
Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pretrained language models, image generators, or scraped datasets)?
|
| 509 |
+
|
| 510 |
+
Answer: [NA]
|
| 511 |
+
|
| 512 |
+
Justification: [NA]
|
| 513 |
+
|
| 514 |
+
Guidelines:
|
| 515 |
+
|
| 516 |
+
- The answer NA means that the paper poses no such risks.
|
| 517 |
+
- Released models that have a high risk for misuse or dual-use should be released with necessary safeguards to allow for controlled use of the model, for example by requiring that users adhere to usage guidelines or restrictions to access the model or implementing safety filters.
|
| 518 |
+
- Datasets that have been scraped from the Internet could pose safety risks. The authors should describe how they avoided releasing unsafe images.
|
| 519 |
+
- We recognize that providing effective safeguards is challenging, and many papers do not require this, but we encourage authors to take this into account and make a best faith effort.
|
| 520 |
+
|
| 521 |
+
# 12. Licenses for existing assets
|
| 522 |
+
|
| 523 |
+
Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?
|
| 524 |
+
|
| 525 |
+
Answer: [Yes]
|
| 526 |
+
|
| 527 |
+
Justification: [Yes]
|
| 528 |
+
|
| 529 |
+
Guidelines:
|
| 530 |
+
|
| 531 |
+
- The answer NA means that the paper does not use existing assets.
|
| 532 |
+
- The authors should cite the original paper that produced the code package or dataset.
|
| 533 |
+
- The authors should state which version of the asset is used and, if possible, include a URL.
|
| 534 |
+
- The name of the license (e.g., CC-BY 4.0) should be included for each asset.
|
| 535 |
+
- For scraped data from a particular source (e.g., website), the copyright and terms of service of that source should be provided.
|
| 536 |
+
- If assets are released, the license, copyright information, and terms of use in the package should be provided. For popular datasets, paperswithcode.com/datasets has curated licenses for some datasets. Their licensing guide can help determine the license of a dataset.
|
| 537 |
+
- For existing datasets that are re-packaged, both the original license and the license of the derived asset (if it has changed) should be provided.
|
| 538 |
+
- If this information is not available online, the authors are encouraged to reach out to the asset's creators.
|
| 539 |
+
|
| 540 |
+
# 13. New assets
|
| 541 |
+
|
| 542 |
+
Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?
|
| 543 |
+
|
| 544 |
+
Answer: [NA]
|
| 545 |
+
|
| 546 |
+
Justification: [NA]
|
| 547 |
+
|
| 548 |
+
Guidelines:
|
| 549 |
+
|
| 550 |
+
- The answer NA means that the paper does not release new assets.
|
| 551 |
+
- Researchers should communicate the details of the dataset/code/model as part of their submissions via structured templates. This includes details about training, license, limitations, etc.
|
| 552 |
+
- The paper should discuss whether and how consent was obtained from people whose asset is used.
|
| 553 |
+
- At submission time, remember to anonymize your assets (if applicable). You can either create an anonymized URL or include an anonymized zip file.
|
| 554 |
+
|
| 555 |
+
# 14. Crowdsourcing and research with human subjects
|
| 556 |
+
|
| 557 |
+
Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?
|
| 558 |
+
|
| 559 |
+
Answer: [NA]
|
| 560 |
+
|
| 561 |
+
Justification: [NA]
|
| 562 |
+
|
| 563 |
+
Guidelines:
|
| 564 |
+
|
| 565 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 566 |
+
- Including this information in the supplemental material is fine, but if the main contribution of the paper involves human subjects, then as much detail as possible should be included in the main paper.
|
| 567 |
+
- According to the NeurIPS Code of Ethics, workers involved in data collection, curation, or other labor should be paid at least the minimum wage in the country of the data collector.
|
| 568 |
+
|
| 569 |
+
# 15. Institutional review board (IRB) approvals or equivalent for research with human subjects
|
| 570 |
+
|
| 571 |
+
Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?
|
| 572 |
+
|
| 573 |
+
Answer: [NA]
|
| 574 |
+
|
| 575 |
+
Justification: [NA]
|
| 576 |
+
|
| 577 |
+
Guidelines:
|
| 578 |
+
|
| 579 |
+
- The answer NA means that the paper does not involve crowdsourcing nor research with human subjects.
|
| 580 |
+
- Depending on the country in which research is conducted, IRB approval (or equivalent) may be required for any human subjects research. If you obtained IRB approval, you should clearly state this in the paper.
|
| 581 |
+
- We recognize that the procedures for this may vary significantly between institutions and locations, and we expect authors to adhere to the NeurIPS Code of Ethics and the guidelines for their institution.
|
| 582 |
+
- For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review.
|
| 583 |
+
|
| 584 |
+
# 16. Declaration of LLM usage
|
| 585 |
+
|
| 586 |
+
Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research? Note that if the LLM is used only for writing, editing, or formatting purposes and does not impact the core methodology, scientific rigorousness, or originality of the research, declaration is not required.
|
| 587 |
+
|
| 588 |
+
Answer: [NA]
|
| 589 |
+
|
| 590 |
+
# Justification: [NA]
|
| 591 |
+
|
| 592 |
+
# Guidelines:
|
| 593 |
+
|
| 594 |
+
- The answer NA means that the core method development in this research does not involve LLMs as any important, original, or non-standard components.
|
| 595 |
+
- Please refer to our LLM policy (https://neurips.cc/Conferences/2025/LLM) for what should or should not be described.
|
| 596 |
+
|
| 597 |
+
# A Technical Appendices and Supplementary Material
|
| 598 |
+
|
| 599 |
+
# A.1 Residual Coordinate Map for Pose Refinement
|
| 600 |
+
|
| 601 |
+
Given a pair of images, the residual coordinate map denotes the XYZ coordinate difference between the current and previous camera coordinate space Wang & Qi (2023a), which is used to predict the relative pose. In our paper, given the real input image and rendered view with initially predicted pose $R_{i}^{p}, T_{i}^{p}$ , we use the residual coordinate map to solve the relative pose $R_{r}^{p}, T_{r}^{p}$ between them. Then the refined pose is obtained by Eq. 2.
|
| 602 |
+
|
| 603 |
+
The construction process of residual coordinate map is as follows. We substitute the depth information (Z-axis value) with the grayscale value $M_{d}$ denotes the XYZ coordinates under the camera space of the input real view, obtained by uniformly sampling from the grayscale image. For each point $\mathbf{p} \in M_d$ , we first transform it to the world space. Then, it is converted to the camera space of the rendered frame. Finally, the relative point is obtained by subtracting the original coordinate from the transformed one. In summary, the coordinate representation is defined by the following formula.
|
| 604 |
+
|
| 605 |
+
$$
|
| 606 |
+
M _ {r} = \left(R _ {r} ^ {p} - E\right) M _ {d} + T _ {r} ^ {p}, \tag {5}
|
| 607 |
+
$$
|
| 608 |
+
|
| 609 |
+
where $E$ denotes the identity matrix. Through regressing the coordinate map by the refinement network, $R_{r}^{p}, T_{r}^{p}$ can be predicted by the PnP method with RANSAC.
|
| 610 |
+
|
| 611 |
+
# A.2 More Dataset Details
|
| 612 |
+
|
| 613 |
+
7 Scenes Shotton et al. (2013). This dataset includes seven indoor scenes, each containing 2 to 10 sequences. It provides depth images, color frames, and ground truth poses. All scenes were recorded using a handheld Kinect RGB-D camera at a resolution of $640 \times 480$ and are divided into separate training and testing sets. The ground truth poses were obtained using the KinectFusion system. This dataset has recently been used as a benchmark in studies Kendall & Cipolla (2017); Brachmann & Rother (2018); Zhou et al. (2020), making it convenient for comparison with other methods.
|
| 614 |
+
|
| 615 |
+
Cambridge Landmarks Kendall et al. (2015). This outdoor dataset includes several large outdoor environments. In this paper, we use five scenes (College, Hospital, Shop, and Church) to evaluate localization accuracy. Ground truth poses in each scene were calibrated using Visual SFM, and a sparse point cloud is also provided.
|
| 616 |
+
|
| 617 |
+
ScanNet Dai et al. (2017). ScanNet contains over 1,500 scans, amounting to around 2.5 million views. The dataset was captured using a user-friendly and scalable RGB-D capture system. To evaluate scene-independent performance, we train GS-RelocNet on ScanNet and tested it on the 7 Scenes dataset.
|
| 618 |
+
|
| 619 |
+
TUM RGB-D Sturm et al. (2012). The TUM RGB-D dataset is designed to benchmark visual odometry and SLAM systems. We select the four dynamic scenes (fr_walking_xyz, fr_walking-static, fr_walking_rpy, fr_walking_half) from TUM RGB-D to evaluate the localization performance in dynamic environments.
|
| 620 |
+
|
| 621 |
+
Bonn Palazzolo et al. (2019). The Bonn dataset is tailored for dynamic localization, featuring highly dynamic sequences. We select 20 sequences from the dynamic subset (same as LC-CRF SLAM Du et al. (2020)), where individuals perform various tasks such as manipulating boxes or interacting with balloons, along with 2 static sequences.
|
| 622 |
+
|
| 623 |
+
12 Scenes Valentin et al. (2016). The 12 Scenes dataset features 12 larger indoor environments, with volumes ranging from $14m^3$ to $79m^3$ .
|
| 624 |
+
|
| 625 |
+
# A.3 More Implementation Details
|
| 626 |
+
|
| 627 |
+
3D GS training details. To construct the 3D GS model, we first utilize COLMAP to generate an initial point cloud using ground truth poses. Subsequently, we employ the original 3D GS model with its default configuration settings.
|
| 628 |
+
|
| 629 |
+
-iterations: 30000. Total number of training iterations.
|
| 630 |
+
-position_lr_init: 0.00016. The initial learning rate of the Gaussian position.
|
| 631 |
+
-position_lr_final: 0.0000016. The final learning rate of the Gaussian position.
|
| 632 |
+
|
| 633 |
+
-position_lr_delay_mult: 0.01. The delay multiplier before the learning rate decay begins.
|
| 634 |
+
-position_lr_max_steps: 30000. The total number of steps for the learning rate decay.
|
| 635 |
+
-feature_lr: 0.0025. Learning rate of spherical harmonic function coefficients.
|
| 636 |
+
- opacity_lr: 0.05. Learning rate of opacity.
|
| 637 |
+
-scaling_lr: 0.005. Scaling learning rate.
|
| 638 |
+
-rotation_lr: 0.001. Learning rate of rotation.
|
| 639 |
+
-densify_from_iter: 500. Densification begins from which iteration.
|
| 640 |
+
-densify_until_iter: 15000. Densification ends at which iteration.
|
| 641 |
+
-densification_interval: 100. Perform densification and pruning checks every few iterations.
|
| 642 |
+
- opacity_prune_threshold: 0.005. Opacity pruning threshold.
|
| 643 |
+
-densify_grad_threshold: 0.0002. The gradient threshold for densifying the Gaussian sphere.
|
| 644 |
+
|
| 645 |
+
PnP details. The PnP with RANSAC uses OpenCV implementation with following parameters.
|
| 646 |
+
|
| 647 |
+
-iterationsCount: 100. The number of RANSAC iterations.
|
| 648 |
+
-reprojectionError: 8. Threshold for reprojection error.
|
| 649 |
+
-confidence: 0.99. Degree of confidence.
|
| 650 |
+
flags:SOLVEPNP.IterATIVE.PnP solver algorithm.
|
| 651 |
+
|
| 652 |
+
Pose extension in the refinement network. In the refinement network training, small pose differences between rendered and real frames require high coordinate accuracy, which increases the learning difficulty. To address this, we extend the relative pose using fixed coefficients. The position is scaled directly, and the orientation is expanded through a transformation between the quaternion and Euler angles. Specifically, the extension coefficient is set to 8.0.
|
| 653 |
+
|
| 654 |
+
Initial pose estimation by PnP. For the PnP solver with RANSAC, we adapt the traditional RANSAC framework by incorporating our predicted confidence scores. Conventionally, RANSAC determines the final result based on the number of inlier points. In our modified approach, we instead use the sum of the confidence values of the inlier points to make this determination, thereby improving the reliability of the pose estimation.
|
| 655 |
+
|
| 656 |
+
# A.4 More Results on 7 Scenes
|
| 657 |
+
|
| 658 |
+
Table 5: The percentage of localization error under $5cm$ , $5^{\circ}$ and $2cm$ , $2^{\circ}$ on indoor 7 Scenes compared with other methods. Specially, the sign $\ddagger$ means the result with SfM pseudo ground truth, while others leverage the original KinectFusion ground truth. The red and blue marks represent the first and second.
|
| 659 |
+
|
| 660 |
+
<table><tr><td></td><td>Method</td><td>5cm, 5° (↑)</td><td>2cm, 2° (↑)</td></tr><tr><td rowspan="2">APR</td><td>DFNet Chen et al. (2022)</td><td>43.1</td><td>8.4</td></tr><tr><td>Marepo Chen et al. (2024b)</td><td>84.0</td><td>33.7</td></tr><tr><td rowspan="3">SCR</td><td>DSAC*‡ Brachmann & Rother (2021)</td><td>97.8</td><td>80.7</td></tr><tr><td>ACE‡ Brachmann et al. (2023)</td><td>97.1</td><td>83.3</td></tr><tr><td>GLACE‡ Wang et al. (2024a)</td><td>95.6</td><td>82.2</td></tr><tr><td rowspan="3">NeRF</td><td>NeReS Chen et al. (2024a)</td><td>78.3</td><td>45.9</td></tr><tr><td>HR-APR Liu et al. (2024b)</td><td>76.4</td><td>40.2</td></tr><tr><td>NeRFMatch‡ Zhou et al. (2024b)</td><td>78.4</td><td>-</td></tr><tr><td rowspan="5">3D Gaussian</td><td>DFNet + GS-CPR‡ Liu et al. (2025) (Accepted by ICLR 2025)</td><td>94.2</td><td>76.5</td></tr><tr><td>ACE + GS-CPR‡ Liu et al. (2025) (Accepted by ICLR 2025)</td><td>100.0</td><td>93.1</td></tr><tr><td>STDLoc‡ Huang et al. (2025) (Accepted by CVPR 2025)</td><td>99.1</td><td>90.9</td></tr><tr><td>DFNet + GS-CPR‡ Liu et al. (2024a)</td><td>94.2</td><td>76.5</td></tr><tr><td>Ours‡</td><td>99.8</td><td>94.9</td></tr></table>
|
| 661 |
+
|
| 662 |
+
$5\mathrm{cm}, 5^{\circ}$ and $2\mathrm{cm}, 2^{\circ}$ metric on 7 Scenes. Besides the accuracy, the localization stability is also an important metric, always expressed by the percentage of position and orientation error under $5\mathrm{cm}, 5^{\circ}$ and $2\mathrm{cm}, 2^{\circ}$ . Table 5 presents a comparison of our results with those of DFNet Chen et al. (2022),
|
| 663 |
+
|
| 664 |
+
Marepo Chen et al. (2024b), DSAC* Brachmann & Rother (2021), ACE Brachmann et al. (2023), GLACE Wang et al. (2024a), NeReS Chen et al. (2024a), HR-APR Liu et al. (2024b), NeRFMatch Zhou et al. (2024b) and GSLoc Liu et al. (2024a).
|
| 665 |
+
|
| 666 |
+
Our approach achieves an accuracy of $99.8\%$ , slightly below the state-of-the-art performance of ACE Brachmann et al. (2023) + GS-CPR Liu et al. (2025) $(100\%)$ . However, our method outperforms all other competing approaches. For the more stringent $2cm, 2^{\circ}$ metric, our framework demonstrates at least a $1.7\%$ improvement in accuracy compared to the next-best method, underscoring its robustness in challenging indoor environments. In comparison to DFNet Chen et al. (2022) + GS-CPR Liu et al. (2025), our method consistently achieves higher accuracy across both metrics. Notably, while GS-CPR relies on accurate initial pose estimates, our approach excels independently, demonstrating superior generalization without requiring such priors.
|
| 667 |
+
|
| 668 |
+
# A.5 Results on 12 Scenes
|
| 669 |
+
|
| 670 |
+
Table 6: The percentage of localization error under $2cm,2^{\circ}$ on 12 Scenes compared with other methods. The red and blue marks represent the first and second. The results are reported in Liu et al. (2024a).
|
| 671 |
+
|
| 672 |
+
<table><tr><td></td><td>Method</td><td>2cm, 2° (↑)</td></tr><tr><td>APR</td><td>Marepo Chen et al. (2024b)</td><td>50.4</td></tr><tr><td rowspan="3">SCR</td><td>DSAC* Brachmann & Rother (2021)</td><td>96.7</td></tr><tr><td>ACE Brachmann et al. (2023)</td><td>97.2</td></tr><tr><td>GLACE Wang et al. (2024a)</td><td>97.5</td></tr><tr><td rowspan="2">3D Gaussian</td><td>Marepo Chen et al. (2024b) + GS-CPR Liu et al. (2025)</td><td>90.9</td></tr><tr><td>Ours</td><td>98.7</td></tr></table>
|
| 673 |
+
|
| 674 |
+
$2cm, 2^{\circ}$ metric on 12 Scenes. To further evaluate localization performance, we conduct experiments on the 12 Scenes dataset using the $2cm, 2^{\circ}$ metric. Table 6 presents the results in comparison with other approaches. Our method achieves the highest accuracy in localization for the $2cm, 2^{\circ}$ metric. Compared to the 3D Gaussian based refinement method, GS-CPR Liu et al. (2025), our approach demonstrates achieves a $7.6\%$ improvement.
|
| 675 |
+
|
| 676 |
+
# A.6 More Detailed Studies of GS-RelocNet
|
| 677 |
+
|
| 678 |
+
Table 7: Scene-independent localization results on 7 Scenes with original ground truth and Cambridge Landmarks with different settings.
|
| 679 |
+
|
| 680 |
+
<table><tr><td></td><td>GS-RelocNet
|
| 681 |
+
3D Gaussian Number</td><td>7 Scenes</td><td>Cambridge Landmarks</td></tr><tr><td>S1</td><td>512</td><td>2.4/1.14</td><td>14/0.36</td></tr><tr><td>S2</td><td>1024</td><td>2.0/0.94</td><td>12/0.29</td></tr><tr><td>S3</td><td>2048</td><td>1.7/0.89</td><td>10/0.22</td></tr><tr><td>S4</td><td>4096</td><td>1.4/0.82</td><td>9/0.21</td></tr><tr><td>S5</td><td>8192</td><td>1.5/0.88</td><td>9/0.21</td></tr><tr><td>S6</td><td>16384</td><td>1.9/0.97</td><td>12/0.28</td></tr><tr><td>S7</td><td>Using Point Cloud instead of 3D Gaussians</td><td>2.5/1.43</td><td>16/0.45</td></tr><tr><td></td><td>Refinement Network</td><td></td><td></td></tr><tr><td></td><td>Rendering Method</td><td></td><td></td></tr><tr><td>S8</td><td>3D model</td><td>1.8/0.92</td><td>12/0.28</td></tr><tr><td>S9</td><td>NeRF</td><td>1.6/0.84</td><td>10/0.23</td></tr><tr><td>S10</td><td>3D GS</td><td>1.4/0.82</td><td>9/0.21</td></tr></table>
|
| 682 |
+
|
| 683 |
+
Discussion of 3D Gaussian number in GS-RelocNet. In S1 - S6 of Table 7, we experiment with different numbers of 3D Gaussians. On one hand, the results with 512, 1024, and 16384 Gaussians are inferior to those with 4096 Gaussians, indicating that fewer Gaussians are insufficient for learning the scene features, while too many Gaussians increase the learning difficulty. On the other hand, the results with 2048, 4096, and 8192 Gaussians are comparable, suggesting that these configurations are sufficient for adequately learning the scene.
|
| 684 |
+
|
| 685 |
+
Subsequently, a common concern may lie in the large scene with 4096 Gaussians. To address this, our framework may adopt a coarse-to-fine strategy. In the coarse stage, we uniformly sample 3D
|
| 686 |
+
|
| 687 |
+
Gaussians across the entire scene and select those with high confidence scores, iterating this process as needed. In the refinement stage, we focus on Gaussians in proximity to the high-confidence selections, establishing 2D-3D correspondences between image pixels and these 3D Gaussians, followed by PnP to estimate the camera pose.
|
| 688 |
+
|
| 689 |
+
Discussion of 3D Gaussian sampling strategy. For the sampling strategy, we uniformly sample 3D Gaussians within grid cells to ensure a sufficient number of correspondences for robust pose estimation. Within each grid cell, Gaussians are randomly selected for training. Although we use 4096 Gaussians per training iteration, multiple iterations allow most Gaussians to be utilized. To evaluate the robustness of this strategy, we conducted experiments using random sampling instead of uniform grid based sampling. The results show mean pose errors of $0.76\mathrm{cm} / 0.26^{\circ}$ in the scene-dependent setting on the 7-Scenes dataset and $7\mathrm{cm} / 0.18^{\circ}$ on the Cambridge Landmarks dataset. These results indicate minimal performance degradation, demonstrating the robustness of our sampling strategy.
|
| 690 |
+
|
| 691 |
+
Discussion of usage of 3D Gaussians. In S7 of Table 7, we replace 3D Gaussians with the point cloud of the scene. The results clearly show a significant decrease in localization accuracy compared to using 3D Gaussians. We believe this is due to the fact that 3D Gaussians retain more texture and illumination information, while point clouds only capture geometric details.
|
| 692 |
+
|
| 693 |
+
Discussion of Spherical Harmonics (SH) in 3D GS. To evaluate the contribution of SH in 3D GS, we exclude SH and perform experiments on the 7 Scenes and Cambridge Landmarks datasets without using SH as the input of GS-RelocNet. The scene-independent results reveal a consistent increase in localization error. On 7 Scenes, the error rises from $1.4 / 0.82$ to $1.7 / 0.93$ ( $\uparrow 0.3 / 0.11$ ), and on Cambridge Landmarks, it increases from $9 / 0.21$ to $10 / 0.26$ ( $\uparrow 1 / 0.05$ ). This observed increase in error across both datasets confirms the effectiveness of SH in enhancing the performance of 3D GS for localization tasks.
|
| 694 |
+
|
| 695 |
+
Discussion of DINO v2 features. In GS-RelocNet, we also DINO v2 features as an additional input. To validate its effect, we conduct experiments without DINO v2 features. The results are $0.73 / 0.25$ ( $\uparrow 0.02 / 0.02$ with scene-dependent setting), $1.4 / 0.84$ ( $\uparrow 0.0 / 0.02$ with scene-independent setting) on 7 Scenes and $10 / 0.24$ ( $\uparrow 1 / 0.03$ ) on Cambridge Landmarks with scene-independent setting. We can see that the additional feature plays a slightly positive role on localization improvement. In the scene-dependent setting on 7 Scenes, it is even negligible. As an explanation, this is because that ScanNet contains large samples for training, making GS-RelocNet can learn the additional features itself.
|
| 696 |
+
|
| 697 |
+
# A.7 More Detailed Studies of Refinement
|
| 698 |
+
|
| 699 |
+
Discussion of rendering method. In S8 - S10 of Table 7, we present results using rendered images from both the 3D model and the NeRF network. The results demonstrate that the 3D GS based refinement yields the most significant improvements. When using rendered images from the 3D model, the results are comparable to those without refinement, likely due to the domain gap between rendered and real views. By incorporating NeRF and 3D GS, the lighting conditions are also considered, reducing this gap.
|
| 700 |
+
|
| 701 |
+
Discussion of cross-attention layers. To explore the potential of cross-attention layers, we conducted two ablation studies in the pose refinement stage. First, we integrated a cross-attention layer into the bidirectional feature fusion module of the refinement network. This yielded mean pose errors of $0.73\mathrm{cm} / 0.24^{\circ}$ on the 7 Scenes dataset and $7\mathrm{cm} / 0.17^{\circ}$ on the Cambridge Landmarks dataset in the scene-dependent setting, which are comparable to our original results. Second, we replaced the refinement network with Dust3R ViT variant, incorporating cross-attention. The scene-dependent errors are $0.76\mathrm{cm} / 0.27^{\circ}$ on 7 Scenes and $9\mathrm{cm} / 0.21^{\circ}$ on Cambridge Landmarks, indicating slightly reduced accuracy. We attribute the limited impact of cross-attention layers to the small input resolution of the refinement network $(128\times 128)$ , which constrains their effectiveness.
|
| 702 |
+
|
| 703 |
+
Discussion of environment conditions. Our current implementation does not include specific adaptations for environmental changes, such as variations in lighting or weather. Upon reviewing other 3D GS based methods, including GS-CPR and STDLoc, we found no explicit mention of specialized designs addressing such conditions. To mitigate the impact of environmental variations, we propose exploring 3D GS variants designed for enhanced robustness to environmental changes, as demonstrated in recent works Zhang et al. (2024); Kulhanek et al. (2024). These approaches could be integrated into our framework to improve performance under diverse conditions.
|
| 704 |
+
|
| 705 |
+
Discussion of RANSAC. To predict the initial pose, we use the PnP with RANSAC using predicted confidence. To validate this, we also make the experiments with the traditional RANSAC using inlier number. The scene-independent results are $1.6 / 0.86$ ( $\uparrow 0.1 / 0.04$ ) and $10 / 0.24$ ( $\uparrow 1 / 0.03$ ) on Cambridge Landmarks. The results show a slight accuracy improvement with confidence in RANSAC.
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e12c91dec82f84bcec603f97dba0e7ab0748c41730be912257aa0e12aed5e979
|
| 3 |
+
size 829478
|
3dgaussiansplattingbasedsceneindependentrelocalizationwithunidirectionalandbidirectionalfeaturefusion/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a71daf4039e15ad789189ef8ef973cd134d44c33cc21c7e0e80bcaf4423e9d80
|
| 3 |
+
size 783729
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1cc544eb84f04ebf0efeafbd7eaa69909ab7f1cd3a4770b5b084497168ffc99d
|
| 3 |
+
size 161417
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8f83aa22c9ad4af1426d534a2227338914c204076f965d56a9330b50739b663
|
| 3 |
+
size 210841
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/07703aac-b0e9-44a7-9c91-88716b6109c0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88d614729fb092b8b7cbc8dc13d718a225978f8ac396861aa3abde4c9ced30a6
|
| 3 |
+
size 1916155
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a560d145a20552cbf4e857a00ce06290c8b3350b1a3faa33a46f21567907f57d
|
| 3 |
+
size 702028
|
3dgsrd3dmoleculargraphautoencoderwithselectiveremaskdecoding/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b69602f155ca1a64db00289b18421129548dd880a86b85929cfade7e5e02e9f6
|
| 3 |
+
size 847899
|
3dhumanposeestimationwithmuscles/63b1c657-eda7-47cd-9f63-6882cee2567e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba68cb74f833b88a863a0b19be4b0d9d6d73553fd3ffd4fde113321ee0c320b1
|
| 3 |
+
size 165761
|
3dhumanposeestimationwithmuscles/63b1c657-eda7-47cd-9f63-6882cee2567e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e1384bbe4fba739920b2a325aa041fdb4d2f8538b1131dcd0f424ed4db2d83bb
|
| 3 |
+
size 215534
|