first commit
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +16 -5
- .gitignore +5 -0
- gaussian-grouping/.gitignore +8 -0
- gaussian-grouping/LICENSE +201 -0
- gaussian-grouping/README.md +82 -0
- gaussian-grouping/arguments/__init__.py +120 -0
- gaussian-grouping/config/gaussian_dataset/train.json +9 -0
- gaussian-grouping/config/object_inpaint/bear.json +10 -0
- gaussian-grouping/config/object_inpaint/mipnerf360/kitchen.json +10 -0
- gaussian-grouping/config/object_removal/bear.json +5 -0
- gaussian-grouping/config/object_removal/mipnerf360/kitchen.json +5 -0
- gaussian-grouping/convert.py +124 -0
- gaussian-grouping/docs/dataset.md +54 -0
- gaussian-grouping/docs/edit_removal_inpaint.md +97 -0
- gaussian-grouping/docs/install.md +46 -0
- gaussian-grouping/docs/train.md +69 -0
- gaussian-grouping/edit_object_inpaint.py +246 -0
- gaussian-grouping/edit_object_removal.py +901 -0
- gaussian-grouping/ext/grounded_sam.py +117 -0
- gaussian-grouping/gaussian_renderer/__init__.py +111 -0
- gaussian-grouping/gaussian_renderer/network_gui.py +86 -0
- gaussian-grouping/labelling.sh +44 -0
- gaussian-grouping/metrics.py +103 -0
- gaussian-grouping/render.py +172 -0
- gaussian-grouping/render_lerf_mask.py +150 -0
- gaussian-grouping/run.sh +13 -0
- gaussian-grouping/scene/__init__.py +220 -0
- gaussian-grouping/scene/cameras.py +81 -0
- gaussian-grouping/scene/colmap_loader.py +294 -0
- gaussian-grouping/scene/dataset_readers.py +388 -0
- gaussian-grouping/scene/gaussian_model.py +663 -0
- gaussian-grouping/script/edit_object_inpaint.sh +22 -0
- gaussian-grouping/script/edit_object_removal.sh +21 -0
- gaussian-grouping/script/eval_lerf_mask.py +121 -0
- gaussian-grouping/script/prepare_pseudo_label.sh +63 -0
- gaussian-grouping/script/prepare_pseudo_label_with_text.sh +57 -0
- gaussian-grouping/script/train.sh +24 -0
- gaussian-grouping/script/train_lerf.sh +24 -0
- gaussian-grouping/script/train_lerf_randominit.sh +24 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/CMakeLists.txt +36 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/LICENSE.md +83 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/README.md +21 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/auxiliary.h +175 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu +686 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.h +66 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/config.h +18 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu +489 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.h +69 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer.h +93 -0
- gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu +448 -0
.gitattributes
CHANGED
|
@@ -9,7 +9,6 @@
|
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
@@ -22,7 +21,6 @@
|
|
| 22 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
@@ -54,6 +52,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 54 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
-
#
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 12 |
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 21 |
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
# 3dgs files
|
| 56 |
+
|
| 57 |
+
## We do have
|
| 58 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
## We do have
|
| 60 |
+
*.ply filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
## We do have
|
| 62 |
+
cameras.json filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
# ## We do have
|
| 64 |
+
# multi-coords filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
# ## We do have
|
| 66 |
+
# coord filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
|
| 68 |
+
*.splat filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
*.splatv filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*/object_mask/
|
| 2 |
+
*/train/
|
| 3 |
+
point_cloud.ply
|
| 4 |
+
*.pyc
|
| 5 |
+
__pycache__/
|
gaussian-grouping/.gitignore
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data/*
|
| 2 |
+
output/*
|
| 3 |
+
Tracking-Anything-with-DEVA/example/*
|
| 4 |
+
submodules/diff-gaussian-rasterization-cp/*
|
| 5 |
+
*.pyc
|
| 6 |
+
*.pth
|
| 7 |
+
*.jpg
|
| 8 |
+
*.png
|
gaussian-grouping/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
gaussian-grouping/README.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Gaussian Grouping
|
| 2 |
+
|
| 3 |
+
> [**Gaussian Grouping: Segment and Edit Anything in 3D Scenes**](https://arxiv.org/abs/2312.00732)
|
| 4 |
+
> [[Project Page]](https://ymq2017.github.io/gaussian-grouping)
|
| 5 |
+
> arXiv 2023
|
| 6 |
+
> ETH Zurich
|
| 7 |
+
|
| 8 |
+
We propose Gaussian Grouping, which extends Gaussian Splatting to jointly **reconstruct** and **segment** anything in open-world 3D scenes via **lifting 2D SAM**. It also efficiently supports versatile 3D scene **editing** tasks. Refer to our [paper](https://arxiv.org/abs/2312.00732) for more details.
|
| 9 |
+
|
| 10 |
+
<img width="1000" alt="image" src='media/teaser_github_demo.gif'>
|
| 11 |
+
|
| 12 |
+
Updates
|
| 13 |
+
-----------------
|
| 14 |
+
:fire::fire: 2024/01/16: We released the [LERF-Mask dataset](docs/dataset.md) and evaluation code.
|
| 15 |
+
|
| 16 |
+
2024/01/06: We released the [3D Object Removal & Inpainting](docs/edit_removal_inpaint.md) code.
|
| 17 |
+
|
| 18 |
+
2023/12/20: We released the [Install Notes](docs/install.md) and [Training & Rendering](docs/train.md) code.
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Introduction
|
| 22 |
+
The recent Gaussian Splatting achieves high-quality and real-time novel-view synthesis of the 3D scenes. However, it is solely concentrated on the appearance and geometry modeling, while lacking in fine-grained object-level scene understanding. To address this issue, we propose Gaussian Grouping, which extends Gaussian Splatting to jointly reconstruct and segment anything in open-world 3D scenes. We augment each Gaussian with a compact Identity Encoding, allowing the Gaussians to be grouped according to their object instance or stuff membership in the 3D scene. Instead of resorting to expensive 3D labels, we supervise the Identity Encodings during the differentiable rendering by leveraging the 2D mask predictions by SAM, along with introduced 3D spatial consistency regularization. Comparing to the implicit NeRF representation, we show that the discrete and grouped 3D Gaussians can reconstruct, segment and edit anything in 3D with high visual quality, fine granularity and efficiency. Based on Gaussian Grouping, we further propose a local Gaussian Editing scheme, which shows efficacy in versatile scene editing applications, including 3D object removal, inpainting, colorization and scene recomposition.
|
| 23 |
+
|
| 24 |
+
<img width="1096" alt="image" src='media/github_method.png'>
|
| 25 |
+
|
| 26 |
+
# Application Overview
|
| 27 |
+
**Local Gaussian Editing scheme**: Grouped Gaussians after training. Each group represents a specific instance / stuff of the 3D scene and can be fully decoupled.
|
| 28 |
+
<img width="1096" alt="image" src='media/editing_operation.png'>
|
| 29 |
+
|
| 30 |
+
## 3D Object Removal
|
| 31 |
+
Our Gaussian Grouping can remove the large-scale objects on the Tanks & Temples dataset, from the whole 3D scene with greatly reduced artifacts. Zoom for better view.
|
| 32 |
+
|
| 33 |
+
https://github.com/lkeab/gaussian-grouping/assets/17427852/f3b0f964-a610-49ab-8332-f2caa64fbf45
|
| 34 |
+
|
| 35 |
+
## 3D Object Inpainting
|
| 36 |
+
Comparison on 3D object inpainting cases, where SPIn-NeRF requires 5h training while our method with better inpainting quality only needs 1 hour training and 20 minutes tuning.
|
| 37 |
+
|
| 38 |
+
https://github.com/lkeab/gaussian-grouping/assets/17427852/9f5050da-6a50-4a5f-a755-3bdc55eab1bc
|
| 39 |
+
|
| 40 |
+
https://github.com/lkeab/gaussian-grouping/assets/17427852/3ed0203c-0047-4333-8bf0-0c10f5a078d1
|
| 41 |
+
|
| 42 |
+
## 3D Object Style Transfer
|
| 43 |
+
Comparison on 3D object style transfer cases, Our Gaussian Grouping produces more coherent and natural transfer results across views, with faithfully preserved background.
|
| 44 |
+
|
| 45 |
+
https://github.com/lkeab/gaussian-grouping/assets/17427852/2f00eab5-590b-4295-bb1c-2076acc63d4a
|
| 46 |
+
|
| 47 |
+
## 3D Open-world Segmentation
|
| 48 |
+
Our Gaussian Grouping approach jointly reconstructs and segments anything in full open-world 3D scenes. The masks predicted by Gaussian Grouping contains much sharp and accurate boundary than LERF.
|
| 49 |
+
|
| 50 |
+
https://github.com/lkeab/gaussian-grouping/assets/60028943/38241b99-1497-4a7c-bd22-5b018b85548c
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
## 3D Multi-Object Editing
|
| 55 |
+
Our Gaussian Grouping approach jointly reconstructs and segments anything in full open-world 3D scenes. Then we concurrently perform 3D object editing for several objects.
|
| 56 |
+
|
| 57 |
+
https://github.com/lkeab/gaussian-grouping/assets/17427852/d9638a1c-1569-4c72-91b9-ee68e9e017e5
|
| 58 |
+
|
| 59 |
+
# Installation
|
| 60 |
+
You can refer to the [install document](./docs/install.md) to build the Python environment.
|
| 61 |
+
|
| 62 |
+
# Training and Masks Rendering
|
| 63 |
+
Then refer to the [train document](./docs/train.md) to train your own scene.
|
| 64 |
+
|
| 65 |
+
# Open-Vocabulary Segmentation
|
| 66 |
+
For evaluation on the **LERF-Mask dataset** proposed in our paper, you can refer to the [dataset document](./docs/dataset.md).
|
| 67 |
+
|
| 68 |
+
# 3D Object Removal and Inpainting
|
| 69 |
+
You can select the 3D object for removal and inpainting after training. Details are in the [edit removal inpaint document](./docs/edit_removal_inpaint.md).
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
Citation
|
| 73 |
+
---------------
|
| 74 |
+
If you find Gaussian Grouping useful in your research or refer to the provided baseline results, please star :star: this repository and consider citing :pencil::
|
| 75 |
+
```
|
| 76 |
+
@article{gaussian_grouping,
|
| 77 |
+
title={Gaussian Grouping: Segment and Edit Anything in 3D Scenes},
|
| 78 |
+
author={Ye, Mingqiao and Danelljan, Martin and Yu, Fisher and Ke, Lei},
|
| 79 |
+
journal={arXiv preprint arXiv:2312.00732},
|
| 80 |
+
year={2023}
|
| 81 |
+
}
|
| 82 |
+
```
|
gaussian-grouping/arguments/__init__.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
from argparse import ArgumentParser, Namespace
|
| 10 |
+
import sys
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
class GroupParams:
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
class ParamGroup:
|
| 17 |
+
def __init__(self, parser: ArgumentParser, name : str, fill_none = False):
|
| 18 |
+
group = parser.add_argument_group(name)
|
| 19 |
+
for key, value in vars(self).items():
|
| 20 |
+
shorthand = False
|
| 21 |
+
if key.startswith("_"):
|
| 22 |
+
shorthand = True
|
| 23 |
+
key = key[1:]
|
| 24 |
+
t = type(value)
|
| 25 |
+
value = value if not fill_none else None
|
| 26 |
+
if shorthand:
|
| 27 |
+
if t == bool:
|
| 28 |
+
group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true")
|
| 29 |
+
else:
|
| 30 |
+
group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t)
|
| 31 |
+
else:
|
| 32 |
+
if t == bool:
|
| 33 |
+
group.add_argument("--" + key, default=value, action="store_true")
|
| 34 |
+
else:
|
| 35 |
+
group.add_argument("--" + key, default=value, type=t)
|
| 36 |
+
|
| 37 |
+
def extract(self, args):
|
| 38 |
+
group = GroupParams()
|
| 39 |
+
for arg in vars(args).items():
|
| 40 |
+
if arg[0] in vars(self) or ("_" + arg[0]) in vars(self):
|
| 41 |
+
setattr(group, arg[0], arg[1])
|
| 42 |
+
return group
|
| 43 |
+
|
| 44 |
+
class ModelParams(ParamGroup):
|
| 45 |
+
def __init__(self, parser, sentinel=False):
|
| 46 |
+
self.sh_degree = 3
|
| 47 |
+
self._source_path = ""
|
| 48 |
+
self._model_path = ""
|
| 49 |
+
self._images = "images"
|
| 50 |
+
self._resolution = -1
|
| 51 |
+
self._white_background = False
|
| 52 |
+
self.data_device = "cuda"
|
| 53 |
+
self.eval = False
|
| 54 |
+
self.n_views = 100
|
| 55 |
+
self.random_init = False
|
| 56 |
+
self.train_split = False
|
| 57 |
+
self._object_path = "object_mask"
|
| 58 |
+
self.num_classes = 200
|
| 59 |
+
super().__init__(parser, "Loading Parameters", sentinel)
|
| 60 |
+
|
| 61 |
+
def extract(self, args):
|
| 62 |
+
g = super().extract(args)
|
| 63 |
+
g.source_path = os.path.abspath(g.source_path)
|
| 64 |
+
return g
|
| 65 |
+
|
| 66 |
+
class PipelineParams(ParamGroup):
|
| 67 |
+
def __init__(self, parser):
|
| 68 |
+
self.convert_SHs_python = False
|
| 69 |
+
self.compute_cov3D_python = False
|
| 70 |
+
self.debug = False
|
| 71 |
+
super().__init__(parser, "Pipeline Parameters")
|
| 72 |
+
|
| 73 |
+
class OptimizationParams(ParamGroup):
|
| 74 |
+
def __init__(self, parser):
|
| 75 |
+
self.iterations = 90_000 #
|
| 76 |
+
self.position_lr_init = 0.00016
|
| 77 |
+
self.position_lr_final = 0.0000016
|
| 78 |
+
self.position_lr_delay_mult = 0.01
|
| 79 |
+
self.position_lr_max_steps = 90_000 #
|
| 80 |
+
self.feature_lr = 0.0025 #
|
| 81 |
+
self.opacity_lr = 0.05
|
| 82 |
+
self.scaling_lr = 0.005
|
| 83 |
+
self.rotation_lr = 0.001
|
| 84 |
+
self.percent_dense = 0.01
|
| 85 |
+
self.lambda_dssim = 0.2
|
| 86 |
+
self.densification_interval = 100
|
| 87 |
+
self.opacity_reset_interval = 3000
|
| 88 |
+
self.densify_from_iter = 500
|
| 89 |
+
self.densify_until_iter = 90_000 #
|
| 90 |
+
self.densify_grad_threshold = 0.0002
|
| 91 |
+
|
| 92 |
+
self.reg3d_interval = 2
|
| 93 |
+
self.reg3d_k = 5
|
| 94 |
+
self.reg3d_lambda_val = 2
|
| 95 |
+
self.reg3d_max_points = 300000
|
| 96 |
+
self.reg3d_sample_size = 1000
|
| 97 |
+
|
| 98 |
+
super().__init__(parser, "Optimization Parameters")
|
| 99 |
+
|
| 100 |
+
def get_combined_args(parser : ArgumentParser):
|
| 101 |
+
cmdlne_string = sys.argv[1:]
|
| 102 |
+
cfgfile_string = "Namespace()"
|
| 103 |
+
args_cmdline = parser.parse_args(cmdlne_string)
|
| 104 |
+
|
| 105 |
+
try:
|
| 106 |
+
cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args")
|
| 107 |
+
print("Looking for config file in", cfgfilepath)
|
| 108 |
+
with open(cfgfilepath) as cfg_file:
|
| 109 |
+
print("Config file found: {}".format(cfgfilepath))
|
| 110 |
+
cfgfile_string = cfg_file.read()
|
| 111 |
+
except TypeError:
|
| 112 |
+
print("Config file not found at")
|
| 113 |
+
pass
|
| 114 |
+
args_cfgfile = eval(cfgfile_string)
|
| 115 |
+
|
| 116 |
+
merged_dict = vars(args_cfgfile).copy()
|
| 117 |
+
for k,v in vars(args_cmdline).items():
|
| 118 |
+
if merged_dict.get(k) == None or v != None: # nt
|
| 119 |
+
merged_dict[k] = v
|
| 120 |
+
return Namespace(**merged_dict)
|
gaussian-grouping/config/gaussian_dataset/train.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"densify_until_iter": 10000 ,
|
| 3 |
+
"num_classes": 256,
|
| 4 |
+
"reg3d_interval": 5,
|
| 5 |
+
"reg3d_k": 5,
|
| 6 |
+
"reg3d_lambda_val": 2,
|
| 7 |
+
"reg3d_max_points": 500000,
|
| 8 |
+
"reg3d_sample_size": 2000
|
| 9 |
+
}
|
gaussian-grouping/config/object_inpaint/bear.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_classes": 256,
|
| 3 |
+
"removal_thresh": 0.3,
|
| 4 |
+
"select_obj_id" : [34],
|
| 5 |
+
"images": "images_inpaint_unseen",
|
| 6 |
+
"object_path" : "inpaint_object_mask_255",
|
| 7 |
+
"r" : 1,
|
| 8 |
+
"lambda_dlpips" : 0.5,
|
| 9 |
+
"finetune_iteration": 10000
|
| 10 |
+
}
|
gaussian-grouping/config/object_inpaint/mipnerf360/kitchen.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_classes": 256,
|
| 3 |
+
"removal_thresh": 0.3,
|
| 4 |
+
"select_obj_id" : [49],
|
| 5 |
+
"images": "images_inpaint_unseen",
|
| 6 |
+
"object_path" : "inpaint_object_mask_255",
|
| 7 |
+
"r" : 1,
|
| 8 |
+
"lambda_dlpips" : 0.5,
|
| 9 |
+
"finetune_iteration": 10000
|
| 10 |
+
}
|
gaussian-grouping/config/object_removal/bear.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_classes": 256,
|
| 3 |
+
"removal_thresh": 0.3,
|
| 4 |
+
"select_obj_id" : [34]
|
| 5 |
+
}
|
gaussian-grouping/config/object_removal/mipnerf360/kitchen.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_classes": 256,
|
| 3 |
+
"removal_thresh": 0.3,
|
| 4 |
+
"select_obj_id" : [49]
|
| 5 |
+
}
|
gaussian-grouping/convert.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import logging
|
| 14 |
+
from argparse import ArgumentParser
|
| 15 |
+
import shutil
|
| 16 |
+
|
| 17 |
+
# This Python script is based on the shell converter script provided in the MipNerF 360 repository.
|
| 18 |
+
parser = ArgumentParser("Colmap converter")
|
| 19 |
+
parser.add_argument("--no_gpu", action='store_true')
|
| 20 |
+
parser.add_argument("--skip_matching", action='store_true')
|
| 21 |
+
parser.add_argument("--source_path", "-s", required=True, type=str)
|
| 22 |
+
parser.add_argument("--camera", default="OPENCV", type=str)
|
| 23 |
+
parser.add_argument("--colmap_executable", default="", type=str)
|
| 24 |
+
parser.add_argument("--resize", action="store_true")
|
| 25 |
+
parser.add_argument("--magick_executable", default="", type=str)
|
| 26 |
+
args = parser.parse_args()
|
| 27 |
+
colmap_command = '"{}"'.format(args.colmap_executable) if len(args.colmap_executable) > 0 else "colmap"
|
| 28 |
+
magick_command = '"{}"'.format(args.magick_executable) if len(args.magick_executable) > 0 else "magick"
|
| 29 |
+
use_gpu = 1 if not args.no_gpu else 0
|
| 30 |
+
|
| 31 |
+
if not args.skip_matching:
|
| 32 |
+
os.makedirs(args.source_path + "/distorted/sparse", exist_ok=True)
|
| 33 |
+
|
| 34 |
+
## Feature extraction
|
| 35 |
+
feat_extracton_cmd = colmap_command + " feature_extractor "\
|
| 36 |
+
"--database_path " + args.source_path + "/distorted/database.db \
|
| 37 |
+
--image_path " + args.source_path + "/input \
|
| 38 |
+
--ImageReader.single_camera 1 \
|
| 39 |
+
--ImageReader.camera_model " + args.camera + " \
|
| 40 |
+
--SiftExtraction.use_gpu " + str(use_gpu)
|
| 41 |
+
exit_code = os.system(feat_extracton_cmd)
|
| 42 |
+
if exit_code != 0:
|
| 43 |
+
logging.error(f"Feature extraction failed with code {exit_code}. Exiting.")
|
| 44 |
+
exit(exit_code)
|
| 45 |
+
|
| 46 |
+
## Feature matching
|
| 47 |
+
feat_matching_cmd = colmap_command + " exhaustive_matcher \
|
| 48 |
+
--database_path " + args.source_path + "/distorted/database.db \
|
| 49 |
+
--SiftMatching.use_gpu " + str(use_gpu)
|
| 50 |
+
exit_code = os.system(feat_matching_cmd)
|
| 51 |
+
if exit_code != 0:
|
| 52 |
+
logging.error(f"Feature matching failed with code {exit_code}. Exiting.")
|
| 53 |
+
exit(exit_code)
|
| 54 |
+
|
| 55 |
+
### Bundle adjustment
|
| 56 |
+
# The default Mapper tolerance is unnecessarily large,
|
| 57 |
+
# decreasing it speeds up bundle adjustment steps.
|
| 58 |
+
mapper_cmd = (colmap_command + " mapper \
|
| 59 |
+
--database_path " + args.source_path + "/distorted/database.db \
|
| 60 |
+
--image_path " + args.source_path + "/input \
|
| 61 |
+
--output_path " + args.source_path + "/distorted/sparse \
|
| 62 |
+
--Mapper.ba_global_function_tolerance=0.000001")
|
| 63 |
+
exit_code = os.system(mapper_cmd)
|
| 64 |
+
if exit_code != 0:
|
| 65 |
+
logging.error(f"Mapper failed with code {exit_code}. Exiting.")
|
| 66 |
+
exit(exit_code)
|
| 67 |
+
|
| 68 |
+
### Image undistortion
|
| 69 |
+
## We need to undistort our images into ideal pinhole intrinsics.
|
| 70 |
+
img_undist_cmd = (colmap_command + " image_undistorter \
|
| 71 |
+
--image_path " + args.source_path + "/input \
|
| 72 |
+
--input_path " + args.source_path + "/distorted/sparse/0 \
|
| 73 |
+
--output_path " + args.source_path + "\
|
| 74 |
+
--output_type COLMAP")
|
| 75 |
+
exit_code = os.system(img_undist_cmd)
|
| 76 |
+
if exit_code != 0:
|
| 77 |
+
logging.error(f"Mapper failed with code {exit_code}. Exiting.")
|
| 78 |
+
exit(exit_code)
|
| 79 |
+
|
| 80 |
+
files = os.listdir(args.source_path + "/sparse")
|
| 81 |
+
os.makedirs(args.source_path + "/sparse/0", exist_ok=True)
|
| 82 |
+
# Copy each file from the source directory to the destination directory
|
| 83 |
+
for file in files:
|
| 84 |
+
if file == '0':
|
| 85 |
+
continue
|
| 86 |
+
source_file = os.path.join(args.source_path, "sparse", file)
|
| 87 |
+
destination_file = os.path.join(args.source_path, "sparse", "0", file)
|
| 88 |
+
shutil.move(source_file, destination_file)
|
| 89 |
+
|
| 90 |
+
if(args.resize):
|
| 91 |
+
print("Copying and resizing...")
|
| 92 |
+
|
| 93 |
+
# Resize images.
|
| 94 |
+
os.makedirs(args.source_path + "/images_2", exist_ok=True)
|
| 95 |
+
os.makedirs(args.source_path + "/images_4", exist_ok=True)
|
| 96 |
+
os.makedirs(args.source_path + "/images_8", exist_ok=True)
|
| 97 |
+
# Get the list of files in the source directory
|
| 98 |
+
files = os.listdir(args.source_path + "/images")
|
| 99 |
+
# Copy each file from the source directory to the destination directory
|
| 100 |
+
for file in files:
|
| 101 |
+
source_file = os.path.join(args.source_path, "images", file)
|
| 102 |
+
|
| 103 |
+
destination_file = os.path.join(args.source_path, "images_2", file)
|
| 104 |
+
shutil.copy2(source_file, destination_file)
|
| 105 |
+
exit_code = os.system(magick_command + " mogrify -resize 50% " + destination_file)
|
| 106 |
+
if exit_code != 0:
|
| 107 |
+
logging.error(f"50% resize failed with code {exit_code}. Exiting.")
|
| 108 |
+
exit(exit_code)
|
| 109 |
+
|
| 110 |
+
destination_file = os.path.join(args.source_path, "images_4", file)
|
| 111 |
+
shutil.copy2(source_file, destination_file)
|
| 112 |
+
exit_code = os.system(magick_command + " mogrify -resize 25% " + destination_file)
|
| 113 |
+
if exit_code != 0:
|
| 114 |
+
logging.error(f"25% resize failed with code {exit_code}. Exiting.")
|
| 115 |
+
exit(exit_code)
|
| 116 |
+
|
| 117 |
+
destination_file = os.path.join(args.source_path, "images_8", file)
|
| 118 |
+
shutil.copy2(source_file, destination_file)
|
| 119 |
+
exit_code = os.system(magick_command + " mogrify -resize 12.5% " + destination_file)
|
| 120 |
+
if exit_code != 0:
|
| 121 |
+
logging.error(f"12.5% resize failed with code {exit_code}. Exiting.")
|
| 122 |
+
exit(exit_code)
|
| 123 |
+
|
| 124 |
+
print("Done.")
|
gaussian-grouping/docs/dataset.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Gaussian Grouping: Segment and Edit Anything in 3D Scenes
|
| 2 |
+
|
| 3 |
+
We provide dataset format and custom dataset preparation in the [training doc](./train.md). Here we introduce the LERF-Mask dataset proposed in our paper and its evaluation.
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
## 1. LERF-Mask dataset
|
| 7 |
+
You can download LERF-Mask dataset from [this hugging-face link](https://huggingface.co/mqye/Gaussian-Grouping/tree/main/data/lerf_mask). Test set of LERF-Mask dataset includes 2-4 novel view images. The mask annotations are saved in `test_mask` folder. The name of each mask image corresponds to the input text-prompt.
|
| 8 |
+
|
| 9 |
+
```
|
| 10 |
+
lerf_mask
|
| 11 |
+
|____figurines
|
| 12 |
+
| |____distorted
|
| 13 |
+
| |____images
|
| 14 |
+
| |____images_train
|
| 15 |
+
| |____object_mask
|
| 16 |
+
| |____sparse
|
| 17 |
+
| |____stereo
|
| 18 |
+
| |____test_mask
|
| 19 |
+
| |____<novel view 0>
|
| 20 |
+
| | |____<text prompt 0>.png
|
| 21 |
+
| | |____...
|
| 22 |
+
| |____<novel view 1>
|
| 23 |
+
| | |____<text prompt 0>.png
|
| 24 |
+
| | |____...
|
| 25 |
+
|____ramen
|
| 26 |
+
| |____...
|
| 27 |
+
|____teatime
|
| 28 |
+
| |____...
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
## 2. Render mask with text-prompt
|
| 32 |
+
For semantic information of each mask output, since SAM masks are class-agnostic, we can use a vision-language detector's mask output, for example [grounded-sam](https://github.com/IDEA-Research/Grounded-Segment-Anything), to match our mask to give semantic information.
|
| 33 |
+
|
| 34 |
+
We test our segmentation with a simple strategy using grounded-sam on the first frame for text-prompt. You can use the following command with the provided checkpoints on [hugging face](https://huggingface.co/mqye/Gaussian-Grouping/tree/main/checkpoint) or your own training result. In the future we can also explore better detectors and prompt formats.
|
| 35 |
+
|
| 36 |
+
```
|
| 37 |
+
python render_lerf_mask.py -m output/lerf_pretrain/figurines --skip_train
|
| 38 |
+
python render_lerf_mask.py -m output/lerf_pretrain/ramen --skip_train
|
| 39 |
+
python render_lerf_mask.py -m output/lerf_pretrain/teatime --skip_train
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
## 3. LERF-Mask evaluation
|
| 46 |
+
We provide our result on [hugging face](https://huggingface.co/mqye/Gaussian-Grouping/tree/main/result). We also provide a script for evaluating IoU and Boundary-IoU. You can change the output path to your output folder and run the script.
|
| 47 |
+
|
| 48 |
+
For example,
|
| 49 |
+
```
|
| 50 |
+
python script/eval_lerf_mask.py figurines
|
| 51 |
+
python script/eval_lerf_mask.py ramen
|
| 52 |
+
python script/eval_lerf_mask.py teatime
|
| 53 |
+
```
|
| 54 |
+
|
gaussian-grouping/docs/edit_removal_inpaint.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Gaussian Grouping: Segment and Edit Anything in 3D Scenes
|
| 2 |
+
|
| 3 |
+
## 1. 3D Object Removal
|
| 4 |
+
|
| 5 |
+
### 1.1 Training
|
| 6 |
+
|
| 7 |
+
First finish training as described in [training doc](./train.md). Save the output file then we can edit on it.
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
### 1.2 Remove the selected object
|
| 11 |
+
|
| 12 |
+
You can choose one or more object id(s) for removal and indicate it in the config file.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
Example1. Bear dataset
|
| 16 |
+
```
|
| 17 |
+
bash script/edit_object_removal.sh output/bear config/object_removal/bear.json
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
Example2. Kitchen dataset
|
| 21 |
+
```
|
| 22 |
+
bash script/edit_object_removal.sh output/mipnerf360/kitchen config/object_removal/mipnerf360/kitchen.json
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## 2. 3D Object inpainting
|
| 27 |
+
|
| 28 |
+
For 3D object inpainting, our pipeline includes three steps.
|
| 29 |
+
|
| 30 |
+
1. Remove the object
|
| 31 |
+
|
| 32 |
+
2. Inpaint the unseen region (always invisible due to occlusion) in 2D
|
| 33 |
+
|
| 34 |
+
3. Use 2D inpainting as pseudo label, finetune 3D Gaussians
|
| 35 |
+
|
| 36 |
+
For your custom datasets, you can follow these three steps to inpaint the object.
|
| 37 |
+
|
| 38 |
+
For our example datasets, we provide the pseudo labels on [hugging face](https://huggingface.co/mqye/Gaussian-Grouping/tree/main/data) and you can skip the first two steps below and directly finetune 3D Gaussians in **2.3 3D inpaint**.
|
| 39 |
+
|
| 40 |
+
### 2.1 (Optional) Unseen mask preparation
|
| 41 |
+
|
| 42 |
+
First finish training and remove the object you want to inpaint. After removal, we can get the **unseen region mask** for inpainting.
|
| 43 |
+
|
| 44 |
+
Unseen region mask is the empty region left after removing the object, and we can perform 2D inpainting on it. An example is shown in the bottom of fig8 in our paper. We can obtain the unseen region mask with DEVA. For example,
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
#!/bin/bash
|
| 48 |
+
|
| 49 |
+
cd Tracking-Anything-with-DEVA/
|
| 50 |
+
|
| 51 |
+
img_path=../output/mipnerf360/kitchen/train/ours_object_removal/iteration_30000/renders
|
| 52 |
+
mask_path=./output_2d_inpaint_mask/mipnerf360/kitchen
|
| 53 |
+
lama_path=../lama/LaMa_test_images/mipnerf360/kitchen
|
| 54 |
+
|
| 55 |
+
python demo/demo_with_text.py --chunk_size 4 --img_path $img_path --amp \
|
| 56 |
+
--temporal_setting semionline --size 480 --output $mask_path \
|
| 57 |
+
--prompt "black blurry hole"
|
| 58 |
+
|
| 59 |
+
python prepare_lama_input.py $img_path $mask_path $lama_path
|
| 60 |
+
cd ..
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
You can also try other prompts like "black region" and change the mask score threshold to get the best unseen region mask result.
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
### 2.2 (Optional) 2D inpaint
|
| 68 |
+
|
| 69 |
+
We follow [SPIN-NeRF](https://github.com/SamsungLabs/SPIn-NeRF) pipeline of 2D guidance for inpainting. We use LaMa to inpaint on 2D images rendered after removing the object with unseen region mask. We only need 2D inpainting on RGB and do not need inpainting on depth map.
|
| 70 |
+
|
| 71 |
+
Now, make sure to follow the [LaMa](https://github.com/advimman/lama) instructions for downloading the big-lama model.
|
| 72 |
+
|
| 73 |
+
```bash
|
| 74 |
+
#!/bin/bash
|
| 75 |
+
|
| 76 |
+
cd lama
|
| 77 |
+
export TORCH_HOME=$(pwd) && export PYTHONPATH=$(pwd)
|
| 78 |
+
|
| 79 |
+
dataset=mipnerf360/kitchen
|
| 80 |
+
img_dir=../data/$dataset
|
| 81 |
+
|
| 82 |
+
python bin/predict.py refine=True model.path=$(pwd)/big-lama indir=$(pwd)/LaMa_test_images/$dataset outdir=$(pwd)/output/$dataset
|
| 83 |
+
python prepare_pseudo_label.py $(pwd)/output/$dataset $img_dir
|
| 84 |
+
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
### 2.3 3D inpaint
|
| 89 |
+
Example1. Bear dataset
|
| 90 |
+
```
|
| 91 |
+
bash script/edit_object_inpaint.sh output/bear config/object_inpaint/bear.json
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
Example2. Kitchen dataset
|
| 95 |
+
```
|
| 96 |
+
bash script/edit_object_inpaint.sh output/mipnerf360/kitchen config/object_inpaint/mipnerf360/kitchen.json
|
| 97 |
+
```
|
gaussian-grouping/docs/install.md
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### **Standard Installation**
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
Clone the repository locally
|
| 5 |
+
```
|
| 6 |
+
git clone https://github.com/lkeab/gaussian-grouping.git
|
| 7 |
+
cd gaussian-grouping
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
Our default, provided install method is based on Conda package and environment management:
|
| 11 |
+
```bash
|
| 12 |
+
conda create -n gaussian_grouping python=3.8 -y
|
| 13 |
+
conda activate gaussian_grouping
|
| 14 |
+
|
| 15 |
+
conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch
|
| 16 |
+
pip install plyfile==0.8.1
|
| 17 |
+
pip install tqdm scipy wandb opencv-python scikit-learn lpips
|
| 18 |
+
|
| 19 |
+
pip install submodules/diff-gaussian-rasterization
|
| 20 |
+
pip install submodules/simple-knn
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
(Optional) If you want to prepare masks on your own dataset, you will also need to prepare [DEVA](https://github.com/hkchengrex/Tracking-Anything-with-DEVA) environment.
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
cd Tracking-Anything-with-DEVA
|
| 27 |
+
pip install -e .
|
| 28 |
+
bash scripts/download_models.sh # Download the pretrained models
|
| 29 |
+
|
| 30 |
+
git clone https://github.com/hkchengrex/Grounded-Segment-Anything.git
|
| 31 |
+
cd Grounded-Segment-Anything
|
| 32 |
+
export AM_I_DOCKER=False
|
| 33 |
+
export BUILD_WITH_CUDA=True
|
| 34 |
+
python -m pip install -e segment_anything
|
| 35 |
+
python -m pip install -e GroundingDINO
|
| 36 |
+
|
| 37 |
+
cd ../..
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
(Optional) If you want to inpaint on your own dataset, you will also need to prepare [LaMa](https://github.com/advimman/lama) environment.
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
cd lama
|
| 44 |
+
pip install -r requirements.txt
|
| 45 |
+
cd ..
|
| 46 |
+
```
|
gaussian-grouping/docs/train.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Gaussian Grouping: Segment and Edit Anything in 3D Scenes
|
| 2 |
+
|
| 3 |
+
## 1. Prepare associated SAM masks
|
| 4 |
+
|
| 5 |
+
### 1.1 Pre-converted datasets
|
| 6 |
+
We provide converted datasets in our paper, You can use directly train on datasets from [hugging face link](https://huggingface.co/mqye/Gaussian-Grouping/tree/main)
|
| 7 |
+
|
| 8 |
+
```
|
| 9 |
+
data
|
| 10 |
+
|____bear
|
| 11 |
+
|____lerf
|
| 12 |
+
| |____figurines
|
| 13 |
+
|____mipnerf360
|
| 14 |
+
| |____counter
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
### 1.2 (Optional) Prepare your own datasets
|
| 19 |
+
For your custom dataset, you can follow this step to create masks for training. If you want to prepare masks on your own dataset, you will need [DEVA](../Tracking-Anything-with-DEVA/README.md) python environment and checkpoints.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
```
|
| 23 |
+
<location>
|
| 24 |
+
|---input
|
| 25 |
+
|---<image 0>
|
| 26 |
+
|---<image 1>
|
| 27 |
+
|---...
|
| 28 |
+
```
|
| 29 |
+
Firstly, convert initial camera pose and point cloud with colmap
|
| 30 |
+
```
|
| 31 |
+
python convert.py -s <location>
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
Then, convert SAM associated object masks. Note that the quality of converted-mask will largely affect the results of 3D segmentation and editing. And getting the mask is very fast. So it is best to adjust the parameters of anything segment first to make the mask as consistent and reasonable as possible from multiple views.
|
| 35 |
+
|
| 36 |
+
Example1. Bear dataset
|
| 37 |
+
```
|
| 38 |
+
bash script/prepare_pseudo_label.sh bear 1
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
Example2. figurines dataset
|
| 42 |
+
```
|
| 43 |
+
bash script/prepare_pseudo_label.sh lerf/figurines 1
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
Example3. counter dataset
|
| 47 |
+
```
|
| 48 |
+
bash script/prepare_pseudo_label.sh mipnerf360/counter 2
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
## 2. Training and Masks Rendering
|
| 52 |
+
|
| 53 |
+
For Gaussian Grouping training and segmentation rendering of trained 3D Gaussian Grouping model:
|
| 54 |
+
|
| 55 |
+
Example1. Bear dataset
|
| 56 |
+
```
|
| 57 |
+
bash script/train.sh bear 1
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Example2. figurines dataset
|
| 61 |
+
```
|
| 62 |
+
bash script/train_lerf.sh lerf/figurines 1
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
Example3. counter dataset
|
| 66 |
+
```
|
| 67 |
+
bash script/train.sh mipnerf360/counter 2
|
| 68 |
+
```
|
| 69 |
+
|
gaussian-grouping/edit_object_inpaint.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from scene import Scene
|
| 11 |
+
import os
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from os import makedirs
|
| 14 |
+
from gaussian_renderer import render
|
| 15 |
+
import torchvision
|
| 16 |
+
from utils.general_utils import safe_state
|
| 17 |
+
from argparse import ArgumentParser
|
| 18 |
+
from arguments import ModelParams, PipelineParams, OptimizationParams, get_combined_args
|
| 19 |
+
from gaussian_renderer import GaussianModel
|
| 20 |
+
import numpy as np
|
| 21 |
+
from PIL import Image
|
| 22 |
+
import cv2
|
| 23 |
+
from utils.loss_utils import masked_l1_loss
|
| 24 |
+
from random import randint
|
| 25 |
+
import lpips
|
| 26 |
+
import json
|
| 27 |
+
|
| 28 |
+
from render import feature_to_rgb, visualize_obj
|
| 29 |
+
from edit_object_removal import points_inside_convex_hull
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def mask_to_bbox(mask):
|
| 33 |
+
# Find the rows and columns where the mask is non-zero
|
| 34 |
+
rows = torch.any(mask, dim=1)
|
| 35 |
+
cols = torch.any(mask, dim=0)
|
| 36 |
+
ymin, ymax = torch.where(rows)[0][[0, -1]]
|
| 37 |
+
xmin, xmax = torch.where(cols)[0][[0, -1]]
|
| 38 |
+
|
| 39 |
+
return xmin, ymin, xmax, ymax
|
| 40 |
+
|
| 41 |
+
def crop_using_bbox(image, bbox):
|
| 42 |
+
xmin, ymin, xmax, ymax = bbox
|
| 43 |
+
return image[:, ymin:ymax+1, xmin:xmax+1]
|
| 44 |
+
|
| 45 |
+
# Function to divide image into K x K patches
|
| 46 |
+
def divide_into_patches(image, K):
|
| 47 |
+
B, C, H, W = image.shape
|
| 48 |
+
patch_h, patch_w = H // K, W // K
|
| 49 |
+
patches = torch.nn.functional.unfold(image, (patch_h, patch_w), stride=(patch_h, patch_w))
|
| 50 |
+
patches = patches.view(B, C, patch_h, patch_w, -1)
|
| 51 |
+
return patches.permute(0, 4, 1, 2, 3)
|
| 52 |
+
|
| 53 |
+
def finetune_inpaint(opt, model_path, iteration, views, gaussians, pipeline, background, classifier, selected_obj_ids, cameras_extent, removal_thresh, finetune_iteration):
|
| 54 |
+
|
| 55 |
+
# get 3d gaussians idx corresponding to select obj id
|
| 56 |
+
with torch.no_grad():
|
| 57 |
+
logits3d = classifier(gaussians._objects_dc.permute(2,0,1))
|
| 58 |
+
prob_obj3d = torch.softmax(logits3d,dim=0)
|
| 59 |
+
mask = prob_obj3d[selected_obj_ids, :, :] > removal_thresh
|
| 60 |
+
mask3d = mask.any(dim=0).squeeze()
|
| 61 |
+
|
| 62 |
+
mask3d_convex = points_inside_convex_hull(gaussians._xyz.detach(),mask3d,outlier_factor=1.0)
|
| 63 |
+
mask3d = torch.logical_or(mask3d,mask3d_convex)
|
| 64 |
+
mask3d = mask3d.float()[:,None,None]
|
| 65 |
+
|
| 66 |
+
# fix some gaussians
|
| 67 |
+
gaussians.inpaint_setup(opt,mask3d)
|
| 68 |
+
iterations = finetune_iteration
|
| 69 |
+
progress_bar = tqdm(range(iterations), desc="Finetuning progress")
|
| 70 |
+
LPIPS = lpips.LPIPS(net='vgg')
|
| 71 |
+
for param in LPIPS.parameters():
|
| 72 |
+
param.requires_grad = False
|
| 73 |
+
LPIPS.cuda()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
for iteration in range(iterations):
|
| 77 |
+
viewpoint_stack = views.copy()
|
| 78 |
+
viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
|
| 79 |
+
render_pkg = render(viewpoint_cam, gaussians, pipeline, background)
|
| 80 |
+
image, viewspace_point_tensor, visibility_filter, radii, objects = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"], render_pkg["render_object"]
|
| 81 |
+
|
| 82 |
+
mask2d = viewpoint_cam.objects > 128
|
| 83 |
+
gt_image = viewpoint_cam.original_image.cuda()
|
| 84 |
+
Ll1 = masked_l1_loss(image, gt_image, ~mask2d)
|
| 85 |
+
|
| 86 |
+
bbox = mask_to_bbox(mask2d)
|
| 87 |
+
cropped_image = crop_using_bbox(image, bbox)
|
| 88 |
+
cropped_gt_image = crop_using_bbox(gt_image, bbox)
|
| 89 |
+
K = 2
|
| 90 |
+
rendering_patches = divide_into_patches(cropped_image[None, ...], K)
|
| 91 |
+
gt_patches = divide_into_patches(cropped_gt_image[None, ...], K)
|
| 92 |
+
lpips_loss = LPIPS(rendering_patches.squeeze()*2-1,gt_patches.squeeze()*2-1).mean()
|
| 93 |
+
|
| 94 |
+
loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * lpips_loss
|
| 95 |
+
loss.backward()
|
| 96 |
+
|
| 97 |
+
with torch.no_grad():
|
| 98 |
+
if iteration < 5000 :
|
| 99 |
+
# Keep track of max radii in image-space for pruning
|
| 100 |
+
gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
|
| 101 |
+
gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
|
| 102 |
+
|
| 103 |
+
if iteration % 300 == 0:
|
| 104 |
+
size_threshold = 20
|
| 105 |
+
gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, cameras_extent, size_threshold)
|
| 106 |
+
|
| 107 |
+
gaussians.optimizer.step()
|
| 108 |
+
gaussians.optimizer.zero_grad(set_to_none = True)
|
| 109 |
+
|
| 110 |
+
if iteration % 10 == 0:
|
| 111 |
+
progress_bar.set_postfix({"Loss": f"{loss:.{7}f}"})
|
| 112 |
+
progress_bar.update(10)
|
| 113 |
+
progress_bar.close()
|
| 114 |
+
|
| 115 |
+
# save gaussians
|
| 116 |
+
point_cloud_path = os.path.join(model_path, "point_cloud_object_inpaint/iteration_{}".format(iteration))
|
| 117 |
+
gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 118 |
+
|
| 119 |
+
return gaussians
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def render_set(model_path, name, iteration, views, gaussians, pipeline, background, classifier):
|
| 125 |
+
render_path = os.path.join(model_path, name, "ours{}".format(iteration), "renders")
|
| 126 |
+
gts_path = os.path.join(model_path, name, "ours{}".format(iteration), "gt")
|
| 127 |
+
colormask_path = os.path.join(model_path, name, "ours{}".format(iteration), "objects_feature16")
|
| 128 |
+
gt_colormask_path = os.path.join(model_path, name, "ours{}".format(iteration), "gt_objects_color")
|
| 129 |
+
pred_obj_path = os.path.join(model_path, name, "ours{}".format(iteration), "objects_pred")
|
| 130 |
+
makedirs(render_path, exist_ok=True)
|
| 131 |
+
makedirs(gts_path, exist_ok=True)
|
| 132 |
+
makedirs(colormask_path, exist_ok=True)
|
| 133 |
+
makedirs(gt_colormask_path, exist_ok=True)
|
| 134 |
+
makedirs(pred_obj_path, exist_ok=True)
|
| 135 |
+
|
| 136 |
+
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
|
| 137 |
+
results = render(view, gaussians, pipeline, background)
|
| 138 |
+
rendering = results["render"]
|
| 139 |
+
rendering_obj = results["render_object"]
|
| 140 |
+
logits = classifier(rendering_obj)
|
| 141 |
+
pred_obj = torch.argmax(logits,dim=0)
|
| 142 |
+
pred_obj_mask = visualize_obj(pred_obj.cpu().numpy().astype(np.uint8))
|
| 143 |
+
|
| 144 |
+
gt_objects = view.objects
|
| 145 |
+
gt_rgb_mask = visualize_obj(gt_objects.cpu().numpy().astype(np.uint8))
|
| 146 |
+
|
| 147 |
+
rgb_mask = feature_to_rgb(rendering_obj)
|
| 148 |
+
Image.fromarray(rgb_mask).save(os.path.join(colormask_path, '{0:05d}'.format(idx) + ".png"))
|
| 149 |
+
Image.fromarray(gt_rgb_mask).save(os.path.join(gt_colormask_path, '{0:05d}'.format(idx) + ".png"))
|
| 150 |
+
Image.fromarray(pred_obj_mask).save(os.path.join(pred_obj_path, '{0:05d}'.format(idx) + ".png"))
|
| 151 |
+
gt = view.original_image[0:3, :, :]
|
| 152 |
+
torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
|
| 153 |
+
torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
|
| 154 |
+
|
| 155 |
+
out_path = os.path.join(render_path[:-8],'concat')
|
| 156 |
+
makedirs(out_path,exist_ok=True)
|
| 157 |
+
fourcc = cv2.VideoWriter.fourcc(*'DIVX')
|
| 158 |
+
size = (gt.shape[-1]*5,gt.shape[-2])
|
| 159 |
+
fps = float(5) if 'train' in out_path else float(1)
|
| 160 |
+
writer = cv2.VideoWriter(os.path.join(out_path,'result.mp4'), fourcc, fps, size)
|
| 161 |
+
|
| 162 |
+
for file_name in sorted(os.listdir(gts_path)):
|
| 163 |
+
gt = np.array(Image.open(os.path.join(gts_path,file_name)))
|
| 164 |
+
rgb = np.array(Image.open(os.path.join(render_path,file_name)))
|
| 165 |
+
gt_obj = np.array(Image.open(os.path.join(gt_colormask_path,file_name)))
|
| 166 |
+
render_obj = np.array(Image.open(os.path.join(colormask_path,file_name)))
|
| 167 |
+
pred_obj = np.array(Image.open(os.path.join(pred_obj_path,file_name)))
|
| 168 |
+
|
| 169 |
+
result = np.hstack([gt,rgb,gt_obj,pred_obj,render_obj])
|
| 170 |
+
result = result.astype('uint8')
|
| 171 |
+
|
| 172 |
+
Image.fromarray(result).save(os.path.join(out_path,file_name))
|
| 173 |
+
writer.write(result[:,:,::-1])
|
| 174 |
+
|
| 175 |
+
writer.release()
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def inpaint(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool, opt : OptimizationParams, select_obj_id : int, removal_thresh : float, finetune_iteration: int):
|
| 179 |
+
# 1. load gaussian checkpoint
|
| 180 |
+
gaussians = GaussianModel(dataset.sh_degree)
|
| 181 |
+
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
|
| 182 |
+
num_classes = dataset.num_classes
|
| 183 |
+
print("Num classes: ",num_classes)
|
| 184 |
+
classifier = torch.nn.Conv2d(gaussians.num_objects, num_classes, kernel_size=1)
|
| 185 |
+
classifier.cuda()
|
| 186 |
+
classifier.load_state_dict(torch.load(os.path.join(dataset.model_path,"point_cloud","iteration_"+str(scene.loaded_iter),"classifier.pth")))
|
| 187 |
+
bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
|
| 188 |
+
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
# 2. inpaint selected object
|
| 192 |
+
gaussians = finetune_inpaint(opt, dataset.model_path, scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, classifier, select_obj_id, scene.cameras_extent, removal_thresh, finetune_iteration)
|
| 193 |
+
|
| 194 |
+
# 3. render new result
|
| 195 |
+
dataset.object_path = 'object_mask'
|
| 196 |
+
dataset.images = 'images'
|
| 197 |
+
scene = Scene(dataset, gaussians, load_iteration='_object_inpaint/iteration_'+str(finetune_iteration-1), shuffle=False)
|
| 198 |
+
with torch.no_grad():
|
| 199 |
+
if not skip_train:
|
| 200 |
+
render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, classifier)
|
| 201 |
+
|
| 202 |
+
if not skip_test:
|
| 203 |
+
render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background, classifier)
|
| 204 |
+
|
| 205 |
+
if __name__ == "__main__":
|
| 206 |
+
# Set up command line argument parser
|
| 207 |
+
parser = ArgumentParser(description="Testing script parameters")
|
| 208 |
+
model = ModelParams(parser, sentinel=True)
|
| 209 |
+
opt = OptimizationParams(parser)
|
| 210 |
+
pipeline = PipelineParams(parser)
|
| 211 |
+
parser.add_argument("--iteration", default=-1, type=int)
|
| 212 |
+
parser.add_argument("--skip_train", action="store_true")
|
| 213 |
+
parser.add_argument("--skip_test", action="store_true")
|
| 214 |
+
parser.add_argument("--quiet", action="store_true")
|
| 215 |
+
|
| 216 |
+
parser.add_argument("--config_file", type=str, default="config/object_removal/bear.json", help="Path to the configuration file")
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
args = get_combined_args(parser)
|
| 220 |
+
print("Rendering " + args.model_path)
|
| 221 |
+
|
| 222 |
+
# Read and parse the configuration file
|
| 223 |
+
try:
|
| 224 |
+
with open(args.config_file, 'r') as file:
|
| 225 |
+
config = json.load(file)
|
| 226 |
+
except FileNotFoundError:
|
| 227 |
+
print(f"Error: Configuration file '{args.config_file}' not found.")
|
| 228 |
+
exit(1)
|
| 229 |
+
except json.JSONDecodeError as e:
|
| 230 |
+
print(f"Error: Failed to parse the JSON configuration file: {e}")
|
| 231 |
+
exit(1)
|
| 232 |
+
|
| 233 |
+
args.num_classes = config.get("num_classes", 200)
|
| 234 |
+
args.removal_thresh = config.get("removal_thresh", 0.3)
|
| 235 |
+
args.select_obj_id = config.get("select_obj_id", [34])
|
| 236 |
+
args.images = config.get("images", "images")
|
| 237 |
+
args.object_path = config.get("object_path", "object_mask")
|
| 238 |
+
args.resolution = config.get("r", 1)
|
| 239 |
+
args.lambda_dssim = config.get("lambda_dlpips", 0.5)
|
| 240 |
+
args.finetune_iteration = config.get("finetune_iteration", 10_000)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
# Initialize system state (RNG)
|
| 244 |
+
safe_state(args.quiet)
|
| 245 |
+
|
| 246 |
+
inpaint(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test, opt.extract(args), args.select_obj_id, args.removal_thresh, args.finetune_iteration)
|
gaussian-grouping/edit_object_removal.py
ADDED
|
@@ -0,0 +1,901 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from scene import Scene
|
| 11 |
+
import os
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from os import makedirs
|
| 14 |
+
from gaussian_renderer import render
|
| 15 |
+
import torchvision
|
| 16 |
+
from utils.general_utils import safe_state
|
| 17 |
+
from argparse import ArgumentParser
|
| 18 |
+
from arguments import ModelParams, PipelineParams, OptimizationParams, get_combined_args
|
| 19 |
+
from gaussian_renderer import GaussianModel
|
| 20 |
+
import numpy as np
|
| 21 |
+
from PIL import Image
|
| 22 |
+
import json
|
| 23 |
+
|
| 24 |
+
import cv2
|
| 25 |
+
|
| 26 |
+
from scipy.spatial import Delaunay
|
| 27 |
+
|
| 28 |
+
from sklearn.cluster import DBSCAN
|
| 29 |
+
from collections import Counter
|
| 30 |
+
|
| 31 |
+
def points_inside_convex_hull(point_cloud, mask, remove_outliers=True, outlier_factor=1.0):
|
| 32 |
+
|
| 33 |
+
masked_points = point_cloud[mask].cpu().numpy()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if remove_outliers:
|
| 37 |
+
Q1 = np.percentile(masked_points, 25, axis=0)
|
| 38 |
+
Q3 = np.percentile(masked_points, 75, axis=0)
|
| 39 |
+
IQR = Q3 - Q1
|
| 40 |
+
outlier_mask = (masked_points < (Q1 - outlier_factor * IQR)) | (masked_points > (Q3 + outlier_factor * IQR))
|
| 41 |
+
filtered_masked_points = masked_points[~np.any(outlier_mask, axis=1)]
|
| 42 |
+
else:
|
| 43 |
+
filtered_masked_points = masked_points
|
| 44 |
+
|
| 45 |
+
if filtered_masked_points.shape[0] < 5:
|
| 46 |
+
return mask
|
| 47 |
+
|
| 48 |
+
delaunay = Delaunay(filtered_masked_points)
|
| 49 |
+
|
| 50 |
+
points_inside_hull_mask = delaunay.find_simplex(point_cloud.cpu().numpy()) >= 0
|
| 51 |
+
|
| 52 |
+
inside_hull_tensor_mask = torch.tensor(points_inside_hull_mask, device='cuda')
|
| 53 |
+
|
| 54 |
+
return inside_hull_tensor_mask
|
| 55 |
+
|
| 56 |
+
def get_center(point_cloud, mask):
|
| 57 |
+
|
| 58 |
+
selected_points = point_cloud[mask.bool().squeeze()]
|
| 59 |
+
|
| 60 |
+
selected_points_np = selected_points.cpu().numpy()
|
| 61 |
+
Q1 = np.percentile(selected_points_np, 1, axis=0)
|
| 62 |
+
Q3 = np.percentile(selected_points_np, 99, axis=0)
|
| 63 |
+
outlier_mask = (selected_points_np < Q1) | (selected_points_np > Q3)
|
| 64 |
+
filtered_selected_points = torch.Tensor(selected_points_np[~np.any(outlier_mask, axis=1)]).cuda()
|
| 65 |
+
|
| 66 |
+
min_coor, _tmp = torch.min(filtered_selected_points, dim=0)
|
| 67 |
+
max_coor, _tmp = torch.max(filtered_selected_points, dim=0)
|
| 68 |
+
|
| 69 |
+
shrink_rate = 0.02
|
| 70 |
+
delta = (max_coor - min_coor) * shrink_rate
|
| 71 |
+
min_coor += delta
|
| 72 |
+
max_coor -= delta
|
| 73 |
+
|
| 74 |
+
center = (min_coor + max_coor) / 2
|
| 75 |
+
|
| 76 |
+
return center
|
| 77 |
+
|
| 78 |
+
def get_pixel_coord(viewpoint_camera, point):
|
| 79 |
+
point_coord = list(point)
|
| 80 |
+
point_coord.append(1)
|
| 81 |
+
quad_coord = np.array([point_coord], dtype=np.float32)
|
| 82 |
+
proj_coord = (np.dot(viewpoint_camera.full_proj_transform.cpu().numpy().T, quad_coord.T)).T
|
| 83 |
+
|
| 84 |
+
proj_coord[:, :2] /= proj_coord[:, 3, np.newaxis]
|
| 85 |
+
proj_coord = proj_coord[:, :2]
|
| 86 |
+
pixel_coord = ((proj_coord + 1) / 2 * np.array([viewpoint_camera.image_width, viewpoint_camera.image_height])).astype(np.int32)
|
| 87 |
+
return np.array(pixel_coord).squeeze()
|
| 88 |
+
|
| 89 |
+
def check_pos(mask, left_up, right_down):
|
| 90 |
+
l,u = left_up
|
| 91 |
+
r,d = right_down
|
| 92 |
+
rect = mask[u:d+1, l:r+1]
|
| 93 |
+
|
| 94 |
+
white_pixels = np.sum(rect == 255)
|
| 95 |
+
total_pixels = rect.size
|
| 96 |
+
|
| 97 |
+
ratio = white_pixels / total_pixels
|
| 98 |
+
if ratio < 0.8:
|
| 99 |
+
return 0
|
| 100 |
+
else:
|
| 101 |
+
rect[:] = 0
|
| 102 |
+
return 1
|
| 103 |
+
|
| 104 |
+
def crop_image_to_size(image, target_width, target_height):
|
| 105 |
+
original_width, original_height = image.size
|
| 106 |
+
crop_left = (original_width - target_width) // 2
|
| 107 |
+
crop_right = original_width - crop_left
|
| 108 |
+
crop_top = (original_height - target_height) // 2
|
| 109 |
+
crop_bottom = original_height - crop_top
|
| 110 |
+
cropped_image = image.crop((crop_left, crop_top, crop_right, crop_bottom))
|
| 111 |
+
return cropped_image
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def find_clusters(vertices: np.ndarray):
|
| 115 |
+
|
| 116 |
+
dbscan = DBSCAN(eps=0.05, min_samples=15)
|
| 117 |
+
labels = dbscan.fit_predict(vertices)
|
| 118 |
+
|
| 119 |
+
label_counts = Counter(labels)
|
| 120 |
+
|
| 121 |
+
most_common_label, most_common_count = label_counts.most_common(1)[0]
|
| 122 |
+
|
| 123 |
+
members = vertices[labels == most_common_label]
|
| 124 |
+
centroid = np.mean(members, axis=0)
|
| 125 |
+
|
| 126 |
+
sx = np.max(members[:, 0]) - np.min(members[:, 0])
|
| 127 |
+
sy = np.max(members[:, 1]) - np.min(members[:, 1])
|
| 128 |
+
sz = np.max(members[:, 2]) - np.min(members[:, 2])
|
| 129 |
+
|
| 130 |
+
extend = (sx, sy, sz)
|
| 131 |
+
|
| 132 |
+
return centroid, extend
|
| 133 |
+
|
| 134 |
+
def get_bb(point_cloud, mask, bb):
|
| 135 |
+
|
| 136 |
+
selected_points = point_cloud[mask.bool().squeeze()]
|
| 137 |
+
selected_points_np = selected_points.cpu().numpy()
|
| 138 |
+
|
| 139 |
+
centroid, extend = find_clusters(selected_points_np)
|
| 140 |
+
|
| 141 |
+
bb['size'] = [float(extend[i] / 2) for i in range(3)]
|
| 142 |
+
bb['centroid'] = [float(centroid[i]) for i in range(3)]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def compute_camera_extrinsics(center, size, fov_x, fov_y, theta):
|
| 146 |
+
Cx, Cy, Cz = center
|
| 147 |
+
maxl = 2 * max(size)
|
| 148 |
+
|
| 149 |
+
r_x = 2 * maxl / (2 * np.tan(fov_x / 2))
|
| 150 |
+
r_y = 2 * maxl / (2 * np.tan(fov_y / 2))
|
| 151 |
+
r = max(r_x, r_y)
|
| 152 |
+
|
| 153 |
+
Px = Cx + r * np.cos(theta)
|
| 154 |
+
Py = Cy + r * np.sin(theta)
|
| 155 |
+
Pz = Cz + size[2] * 2
|
| 156 |
+
|
| 157 |
+
P_cam = np.array([Px, Py, Pz])
|
| 158 |
+
d = np.array([Cx - Px, Cy - Py, Cz - Pz])
|
| 159 |
+
d = d / np.linalg.norm(d)
|
| 160 |
+
|
| 161 |
+
up = np.array([0, 0, 1])
|
| 162 |
+
|
| 163 |
+
right = -np.cross(up, d)
|
| 164 |
+
right = right / np.linalg.norm(right)
|
| 165 |
+
|
| 166 |
+
up = np.cross(d, right)
|
| 167 |
+
|
| 168 |
+
R = np.vstack([right, up, d]).T
|
| 169 |
+
T = -np.linalg.inv(R) @ P_cam
|
| 170 |
+
|
| 171 |
+
return R, T, r
|
| 172 |
+
|
| 173 |
+
def cal_dis(point_cloud, T_init, threshold_xy = 0.1):
|
| 174 |
+
|
| 175 |
+
points = point_cloud.cpu().numpy()
|
| 176 |
+
filtered_points = points[
|
| 177 |
+
(np.abs(points[:, 0] - T_init[0]) < threshold_xy) &
|
| 178 |
+
(np.abs(points[:, 1] - T_init[1]) < threshold_xy) &
|
| 179 |
+
(points[:, 2] < T_init[2])
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
print(len(filtered_points), np.percentile(filtered_points, 85, axis=0)[2], np.percentile(filtered_points, 90, axis=0)[2], np.percentile(filtered_points, 95, axis=0)[2])
|
| 183 |
+
|
| 184 |
+
upper_center = np.percentile(filtered_points, 95, axis=0)
|
| 185 |
+
return T_init[2] - upper_center[2]
|
| 186 |
+
|
| 187 |
+
def generate_spiral_path(T_init, radius, num_frames=180):
|
| 188 |
+
|
| 189 |
+
theta = np.linspace(0, 2 * np.pi * 5, num_frames)
|
| 190 |
+
|
| 191 |
+
z = np.linspace(T_init[2], T_init[2] - radius / 2, num_frames)
|
| 192 |
+
|
| 193 |
+
r = np.sqrt(radius ** 2 - (z - (T_init[2] - radius)) ** 2)
|
| 194 |
+
r[0] += 1e-5
|
| 195 |
+
|
| 196 |
+
x = T_init[0] + r * np.cos(theta)
|
| 197 |
+
y = T_init[1] + r * np.sin(theta)
|
| 198 |
+
|
| 199 |
+
return np.vstack((x, y, z)).T
|
| 200 |
+
|
| 201 |
+
def generate_circle_path(T_init, T_obj, num_frames=60):
|
| 202 |
+
|
| 203 |
+
init_theta = np.arctan2(T_init[1]-T_obj[1], T_init[0]-T_obj[0])
|
| 204 |
+
theta = np.linspace(0, 2 * np.pi, num_frames) + init_theta
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
r = np.sqrt((T_init[0] - T_obj[0]) ** 2 + (T_init[1] - T_obj[1]) ** 2)
|
| 208 |
+
x = T_obj[0] + r * np.cos(theta)
|
| 209 |
+
y = T_obj[1] + r * np.sin(theta)
|
| 210 |
+
z = np.full(x.shape, T_init[2])
|
| 211 |
+
|
| 212 |
+
return np.vstack((x, y, z)).T
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def look_at(camera_position, target_position):
|
| 216 |
+
forward = target_position - camera_position
|
| 217 |
+
forward = forward / np.linalg.norm(forward)
|
| 218 |
+
|
| 219 |
+
up = np.array([0.0, 0.0, 1.0])
|
| 220 |
+
|
| 221 |
+
if np.allclose(up, forward):
|
| 222 |
+
return np.array([[1,0,0],[0,1,0],[0,0,1]])
|
| 223 |
+
if np.allclose(-up, forward):
|
| 224 |
+
return np.array([[1,0,0],[0,-1,0],[0,0,-1]])
|
| 225 |
+
|
| 226 |
+
right = np.cross(forward, up)
|
| 227 |
+
right = right / np.linalg.norm(right)
|
| 228 |
+
|
| 229 |
+
up = np.cross(right, forward)
|
| 230 |
+
|
| 231 |
+
rotation_matrix = np.vstack((right, -up, forward)).T
|
| 232 |
+
return rotation_matrix
|
| 233 |
+
|
| 234 |
+
def find_first_intersection(ray_origin, ray_direction, points, tolerance=0.1):
|
| 235 |
+
ray_direction = ray_direction / np.linalg.norm(ray_direction)
|
| 236 |
+
|
| 237 |
+
intersections = []
|
| 238 |
+
|
| 239 |
+
for point in points:
|
| 240 |
+
vec_to_point = point - ray_origin
|
| 241 |
+
projection_length = np.dot(vec_to_point, ray_direction)
|
| 242 |
+
|
| 243 |
+
if projection_length < 0:
|
| 244 |
+
continue
|
| 245 |
+
projection_point = ray_origin + projection_length * ray_direction
|
| 246 |
+
distance_to_ray = np.linalg.norm(point - projection_point)
|
| 247 |
+
if distance_to_ray < tolerance:
|
| 248 |
+
distance_from_origin = np.linalg.norm(point - ray_origin)
|
| 249 |
+
intersections.append((point, distance_from_origin))
|
| 250 |
+
|
| 251 |
+
if intersections:
|
| 252 |
+
intersections.sort(key=lambda x: x[1])
|
| 253 |
+
count_to_take = max(1, len(intersections) * 5 // 100)
|
| 254 |
+
return intersections[count_to_take - 1][0]
|
| 255 |
+
|
| 256 |
+
else:
|
| 257 |
+
print('find intersection failed!')
|
| 258 |
+
return find_first_intersection(ray_origin, ray_direction, points, tolerance=tolerance + 0.05)
|
| 259 |
+
raise
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def editing_setup(opt, model_path, iteration, views, gaussians, pipeline, background, cameras_extent, removal_thresh, args):
|
| 263 |
+
|
| 264 |
+
selected_obj_ids = torch.tensor(args.select_obj_id).cuda()
|
| 265 |
+
|
| 266 |
+
parameter = args.dst_center if args.operation == "translate" else args.euler_angle if args.operation == "rotate" else ""
|
| 267 |
+
print(f"Editing: {args.operation} {selected_obj_ids.item()} {parameter}")
|
| 268 |
+
|
| 269 |
+
if selected_obj_ids == -1:
|
| 270 |
+
mask3d = torch.ones(gaussians._xyz.shape[0])
|
| 271 |
+
masked_center = torch.zeros(3).cuda()
|
| 272 |
+
|
| 273 |
+
elif selected_obj_ids == -2:
|
| 274 |
+
mask3d = torch.zeros(gaussians._xyz.shape[0])
|
| 275 |
+
masked_center = torch.zeros(3).cuda()
|
| 276 |
+
|
| 277 |
+
else:
|
| 278 |
+
with torch.no_grad():
|
| 279 |
+
mask3d = gaussians._objects_dc[:,0,0] == selected_obj_ids
|
| 280 |
+
mask3d = mask3d.float()[:,None,None]
|
| 281 |
+
|
| 282 |
+
masked_center = get_center(gaussians._xyz.detach(), mask3d)
|
| 283 |
+
|
| 284 |
+
if args.operation == 'translate':
|
| 285 |
+
dst_center = np.array(args.dst_center, dtype=float)
|
| 286 |
+
|
| 287 |
+
if dst_center[2] > 98:
|
| 288 |
+
import pickle
|
| 289 |
+
with open('config/cam_info.pkl', 'rb') as f:
|
| 290 |
+
cam_info_0 = pickle.load(f)
|
| 291 |
+
fx_color = cam_info_0.FovX
|
| 292 |
+
fy_color = cam_info_0.FovY
|
| 293 |
+
|
| 294 |
+
points = gaussians._xyz.detach().cpu().numpy()
|
| 295 |
+
z_avg = np.average(points[:,2], axis=0)
|
| 296 |
+
|
| 297 |
+
if args.render_coord is None:
|
| 298 |
+
x_min = np.percentile(points[:,0], 1, axis=0)
|
| 299 |
+
x_max = np.percentile(points[:,0], 99, axis=0)
|
| 300 |
+
y_min = np.percentile(points[:,1], 1, axis=0)
|
| 301 |
+
y_max = np.percentile(points[:,1], 99, axis=0)
|
| 302 |
+
|
| 303 |
+
else:
|
| 304 |
+
x_min, x_max, y_min, y_max = args.render_coord
|
| 305 |
+
|
| 306 |
+
pixels_per_unit = 1200 / max((x_max - x_min), (y_max - y_min))
|
| 307 |
+
|
| 308 |
+
z_cam = z_avg + fx_color / pixels_per_unit
|
| 309 |
+
T_real = np.array([(x_min + x_max) / 2, (y_min + y_max) / 2, z_cam])
|
| 310 |
+
dst_center[2] = z_avg
|
| 311 |
+
|
| 312 |
+
if not mask3d.any():
|
| 313 |
+
|
| 314 |
+
new_dst_center = find_first_intersection(T_real, dst_center-T_real, points)
|
| 315 |
+
new_dst_center[2] += 0.1
|
| 316 |
+
print(dst_center)
|
| 317 |
+
|
| 318 |
+
dataset = args.model_path.split('/')[-1]
|
| 319 |
+
with open(f"data/{dataset}/coord", 'w') as file:
|
| 320 |
+
file.write(f"{new_dst_center[0]} {new_dst_center[1]} {new_dst_center[2]}\n")
|
| 321 |
+
|
| 322 |
+
point_cloud_path = os.path.join(model_path, "point_cloud"+"/iteration_{}".format(iteration))
|
| 323 |
+
gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 324 |
+
|
| 325 |
+
return gaussians
|
| 326 |
+
|
| 327 |
+
else:
|
| 328 |
+
points = gaussians._xyz.detach()
|
| 329 |
+
obj_bb = {}
|
| 330 |
+
get_bb(points, mask3d, obj_bb)
|
| 331 |
+
other_points = points[~(mask3d.bool().squeeze())]
|
| 332 |
+
new_dst_center = find_first_intersection(T_real, dst_center-T_real, other_points.cpu().numpy())
|
| 333 |
+
new_dst_center[2] += obj_bb["size"][2] / 2
|
| 334 |
+
print(obj_bb["size"], ' | ', dst_center, ' | ', new_dst_center)
|
| 335 |
+
|
| 336 |
+
dataset = args.model_path.split('/')[-1]
|
| 337 |
+
with open(f"data/{dataset}/coord", 'w') as file:
|
| 338 |
+
file.write(f"{new_dst_center[0]} {new_dst_center[1]} {new_dst_center[2]}\n")
|
| 339 |
+
|
| 340 |
+
for i in range(3):
|
| 341 |
+
args.dst_center[i] = new_dst_center[i]
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
if args.operation == 'removal':
|
| 345 |
+
gaussians.removal_setup(opt, mask3d)
|
| 346 |
+
elif args.operation == 'translate':
|
| 347 |
+
|
| 348 |
+
dst_center = torch.FloatTensor(args.dst_center).cuda()
|
| 349 |
+
|
| 350 |
+
gaussians.translate_setup(opt, mask3d, masked_center, dst_center)
|
| 351 |
+
|
| 352 |
+
elif args.operation == 'rotate':
|
| 353 |
+
|
| 354 |
+
from scipy.spatial.transform import Rotation
|
| 355 |
+
rotate_matrix = Rotation.from_euler('xyz', args.euler_angle, degrees=True).as_matrix()
|
| 356 |
+
gaussians.rotate_setup(opt, mask3d, masked_center, rotate_matrix)
|
| 357 |
+
|
| 358 |
+
point_cloud_path = os.path.join(model_path, "point_cloud"+"/iteration_{}".format(iteration))
|
| 359 |
+
gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 360 |
+
|
| 361 |
+
return gaussians
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def render_set(model_path, name, iteration, views, gaussians, pipeline, background, args):
|
| 365 |
+
|
| 366 |
+
render_path = os.path.join(model_path, name, "ours{}".format(iteration), "renders")
|
| 367 |
+
render_path_with_axis = os.path.join(model_path, name, "ours{}".format(iteration), "renders_with_axis")
|
| 368 |
+
render_path_with_labels = os.path.join(model_path, name, "ours{}".format(iteration), "renders_with_labels")
|
| 369 |
+
render_path_with_highlights = os.path.join(model_path, name, "ours{}".format(iteration), "renders_with_highlights")
|
| 370 |
+
makedirs(render_path, exist_ok=True)
|
| 371 |
+
makedirs(render_path_with_axis, exist_ok=True)
|
| 372 |
+
makedirs(render_path_with_labels, exist_ok=True)
|
| 373 |
+
makedirs(render_path_with_highlights, exist_ok=True)
|
| 374 |
+
|
| 375 |
+
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
|
| 376 |
+
results = render(view, gaussians, pipeline, background)
|
| 377 |
+
rendering = results["render"]
|
| 378 |
+
|
| 379 |
+
torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
|
| 380 |
+
|
| 381 |
+
if args.render_highlights:
|
| 382 |
+
|
| 383 |
+
rendering_with_highlights = rendering.permute(1,2,0).cpu().numpy()
|
| 384 |
+
rendering_with_highlights = np.clip(rendering_with_highlights * 255, 0, 255).astype(np.uint8)
|
| 385 |
+
# rendering_with_highlights = (rendering_with_highlights * 255).astype(np.uint8)
|
| 386 |
+
rendering_with_highlights = cv2.cvtColor(rendering_with_highlights.astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 387 |
+
|
| 388 |
+
color_list = [
|
| 389 |
+
("black", (0, 0, 0)),
|
| 390 |
+
("Red", (0, 0, 255)),
|
| 391 |
+
("Green", (0, 255, 0)),
|
| 392 |
+
("Blue", (255, 0, 0)),
|
| 393 |
+
("Yellow", (0, 255, 255)),
|
| 394 |
+
("Gray", (128, 128, 128)),
|
| 395 |
+
("Cyan", (255, 255, 0)),
|
| 396 |
+
("Orange", (0, 165, 255)),
|
| 397 |
+
("Purple", (128, 0, 128)),
|
| 398 |
+
("Pink", (203, 192, 255)),
|
| 399 |
+
("Brown", (42, 42, 165)),
|
| 400 |
+
]
|
| 401 |
+
dir_list = np.array([[1,1,1],[1,1,-1],[1,-1,1],[1,-1,-1],[-1,1,1],[-1,1,-1],[-1,-1,1],[-1,-1,-1]])
|
| 402 |
+
|
| 403 |
+
if args.render_highlights[0] == -1:
|
| 404 |
+
obj_list = range(1,256)
|
| 405 |
+
else:
|
| 406 |
+
obj_list = args.render_highlights
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
for obj_id in obj_list:
|
| 410 |
+
with torch.no_grad():
|
| 411 |
+
mask3d = gaussians._objects_dc[:,0,0] == obj_id
|
| 412 |
+
if torch.count_nonzero(mask3d) < 5:
|
| 413 |
+
break
|
| 414 |
+
mask3d = mask3d.float()[:,None,None]
|
| 415 |
+
|
| 416 |
+
bb = {}
|
| 417 |
+
get_bb(gaussians._xyz.detach(), mask3d, bb)
|
| 418 |
+
|
| 419 |
+
pixel_coord = []
|
| 420 |
+
for i in range(8):
|
| 421 |
+
point_coord = np.array(bb["centroid"]) + np.array(bb["size"]) * dir_list[i]
|
| 422 |
+
pixel_coord.append(get_pixel_coord(view, point_coord))
|
| 423 |
+
|
| 424 |
+
min_coord = tuple(np.min(pixel_coord, axis=0))
|
| 425 |
+
max_coord = tuple(np.max(pixel_coord, axis=0))
|
| 426 |
+
|
| 427 |
+
cv2.rectangle(rendering_with_highlights, min_coord, max_coord, color_list[obj_id][1], 2)
|
| 428 |
+
|
| 429 |
+
cv2.imwrite(os.path.join(render_path_with_highlights, '{0:05d}'.format(idx) + ".png"), rendering_with_highlights)
|
| 430 |
+
|
| 431 |
+
if args.get_bbox_2d:
|
| 432 |
+
dir_list = np.array([[1,1,1],[1,1,-1],[1,-1,1],[1,-1,-1],[-1,1,1],[-1,1,-1],[-1,-1,1],[-1,-1,-1]])
|
| 433 |
+
centroid = [(args.get_bbox_2d[0]+args.get_bbox_2d[1])/2, (args.get_bbox_2d[2]+args.get_bbox_2d[3])/2, (args.get_bbox_2d[4]+args.get_bbox_2d[5])/2]
|
| 434 |
+
size = [(args.get_bbox_2d[1]-args.get_bbox_2d[0])/2, (args.get_bbox_2d[3]-args.get_bbox_2d[2])/2, (args.get_bbox_2d[5]-args.get_bbox_2d[4])/2]
|
| 435 |
+
|
| 436 |
+
pixel_coord = []
|
| 437 |
+
for i in range(8):
|
| 438 |
+
point_coord = np.array(centroid) + np.array(size) * dir_list[i]
|
| 439 |
+
pixel_coord.append(get_pixel_coord(view, point_coord))
|
| 440 |
+
|
| 441 |
+
min_coord = tuple(np.min(pixel_coord, axis=0))
|
| 442 |
+
max_coord = tuple(np.max(pixel_coord, axis=0))
|
| 443 |
+
|
| 444 |
+
output = {}
|
| 445 |
+
output['x_min'] = min_coord[0].item()
|
| 446 |
+
output['x_max'] = max_coord[0].item()
|
| 447 |
+
output['y_min'] = min_coord[1].item()
|
| 448 |
+
output['y_max'] = max_coord[1].item()
|
| 449 |
+
|
| 450 |
+
with open(args.model_path + '/bbox_2d.json', 'w') as f:
|
| 451 |
+
json.dump(output, f, indent=4)
|
| 452 |
+
|
| 453 |
+
print("bbox_2d saved.")
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def editing(args):
|
| 457 |
+
gaussians = GaussianModel(args.sh_degree)
|
| 458 |
+
scene = Scene(args, gaussians, load_iteration=args.iteration, shuffle=False, only_gaussians=args.scanrefer)
|
| 459 |
+
|
| 460 |
+
num_classes = args.num_classes
|
| 461 |
+
print("Num classes: ",num_classes)
|
| 462 |
+
bg_color = [1,1,1] if args.white_background else [0, 0, 0]
|
| 463 |
+
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
|
| 464 |
+
|
| 465 |
+
if args.operation == "skip":
|
| 466 |
+
point_cloud_path = os.path.join(args.model_path, "point_cloud", "iteration_{}".format(scene.loaded_iter))
|
| 467 |
+
gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 468 |
+
|
| 469 |
+
elif args.operation == 'crop':
|
| 470 |
+
|
| 471 |
+
objects_dc = gaussians._objects_dc.detach()
|
| 472 |
+
objects_dc[:,0,0] = 0
|
| 473 |
+
gaussians._objects_dc = objects_dc
|
| 474 |
+
|
| 475 |
+
if args.crop_coord is None:
|
| 476 |
+
points = gaussians._xyz.cpu().numpy()
|
| 477 |
+
x_min = np.percentile(points[:,0], 5, axis=0)
|
| 478 |
+
x_max = np.percentile(points[:,0], 95, axis=0)
|
| 479 |
+
y_min = np.percentile(points[:,1], 5, axis=0)
|
| 480 |
+
y_max = np.percentile(points[:,1], 95, axis=0)
|
| 481 |
+
z_min = np.percentile(points[:,2], 5, axis=0)
|
| 482 |
+
z_max = np.percentile(points[:,2], 95, axis=0)
|
| 483 |
+
else:
|
| 484 |
+
x_min, x_max, y_min, y_max, z_min, z_max = args.crop_coord
|
| 485 |
+
|
| 486 |
+
mask = (gaussians._xyz[:, 0] >= x_min) & (gaussians._xyz[:, 0] <= x_max) & (gaussians._xyz[:, 1] >= y_min) & (gaussians._xyz[:, 1] <= y_max) & (gaussians._xyz[:, 2] >= z_min) & (gaussians._xyz[:, 2] <= z_max)
|
| 487 |
+
|
| 488 |
+
gaussians._xyz = gaussians._xyz[mask].detach()
|
| 489 |
+
gaussians._features_dc = gaussians._features_dc[mask].detach()
|
| 490 |
+
gaussians._features_rest = gaussians._features_rest[mask].detach()
|
| 491 |
+
gaussians._opacity = gaussians._opacity[mask].detach()
|
| 492 |
+
gaussians._scaling = gaussians._scaling[mask].detach()
|
| 493 |
+
gaussians._rotation = gaussians._rotation[mask].detach()
|
| 494 |
+
gaussians._objects_dc = gaussians._objects_dc[mask].detach()
|
| 495 |
+
|
| 496 |
+
point_cloud_path = os.path.join(args.model_path, "point_cloud", "iteration_{}".format(scene.loaded_iter))
|
| 497 |
+
gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 498 |
+
|
| 499 |
+
elif args.operation == 'multi-editing':
|
| 500 |
+
for i in range(len(args.operation_list)):
|
| 501 |
+
args.operation = args.operation_list[i]
|
| 502 |
+
args.select_obj_id = args.select_obj_id_list[i]
|
| 503 |
+
args.dst_center = args.parameter_list[i]
|
| 504 |
+
args.euler_angle = args.parameter_list[i]
|
| 505 |
+
gaussians = editing_setup(opt, args.model_path, scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, scene.cameras_extent, args.removal_thresh, args)
|
| 506 |
+
|
| 507 |
+
else:
|
| 508 |
+
gaussians = editing_setup(opt, args.model_path, scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, scene.cameras_extent, args.removal_thresh, args)
|
| 509 |
+
|
| 510 |
+
print(args.model_path)
|
| 511 |
+
scene = Scene(args, gaussians, load_iteration='/iteration_'+str(scene.loaded_iter), shuffle=False, only_gaussians=args.scanrefer)
|
| 512 |
+
if args.scanrefer:
|
| 513 |
+
scene.loaded_iter = f'/iteration_{scene.loaded_iter}'
|
| 514 |
+
|
| 515 |
+
with torch.no_grad():
|
| 516 |
+
|
| 517 |
+
if args.render_obj != 256:
|
| 518 |
+
|
| 519 |
+
mask3d = gaussians._objects_dc[:,0,0] == abs(args.render_obj)
|
| 520 |
+
if not mask3d.any():
|
| 521 |
+
raise ValueError("Non-existent object!")
|
| 522 |
+
|
| 523 |
+
if args.render_obj < 0:
|
| 524 |
+
mask3d = ~mask3d
|
| 525 |
+
|
| 526 |
+
mask3d = mask3d.bool().squeeze()
|
| 527 |
+
gaussians._xyz = gaussians._xyz[mask3d].detach()
|
| 528 |
+
gaussians._features_dc = gaussians._features_dc[mask3d].detach()
|
| 529 |
+
gaussians._features_rest = gaussians._features_rest[mask3d].detach()
|
| 530 |
+
gaussians._opacity = gaussians._opacity[mask3d].detach()
|
| 531 |
+
gaussians._scaling = gaussians._scaling[mask3d].detach()
|
| 532 |
+
gaussians._rotation = gaussians._rotation[mask3d].detach()
|
| 533 |
+
gaussians._objects_dc = gaussians._objects_dc[mask3d].detach()
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
if args.render_ori:
|
| 537 |
+
render_set(args.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, args)
|
| 538 |
+
|
| 539 |
+
elif args.render_all:
|
| 540 |
+
|
| 541 |
+
if args.scanrefer:
|
| 542 |
+
scene_id = args.model_path.split('/')[1]
|
| 543 |
+
fovfile_path = f'/media/shared_space/data/scannet/scans/{scene_id}/{scene_id}.txt'
|
| 544 |
+
with open(fovfile_path, 'r') as file:
|
| 545 |
+
for line in file:
|
| 546 |
+
if line.startswith('fx_color'):
|
| 547 |
+
fx_color = float(line.split('=')[1].strip())
|
| 548 |
+
elif line.startswith('fy_color'):
|
| 549 |
+
fy_color = float(line.split('=')[1].strip())
|
| 550 |
+
|
| 551 |
+
else:
|
| 552 |
+
import pickle
|
| 553 |
+
with open('config/cam_info.pkl', 'rb') as f:
|
| 554 |
+
cam_info_0 = pickle.load(f)
|
| 555 |
+
fx_color = cam_info_0.FovX
|
| 556 |
+
fy_color = cam_info_0.FovY
|
| 557 |
+
|
| 558 |
+
R_list = [[[1,0,0],[0,0,-1],[0,1,0]], [[1,0,0],[0,0,1],[0,-1,0]], [[0,0,1],[0,1,0],[-1,0,0]], [[0,0,-1],[0,1,0],[1,0,0]], [[1,0,0],[0,1,0],[0,0,1]], [[1,0,0],[0,-1,0],[0,0,-1]]]
|
| 559 |
+
T_list = [[0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,6]]
|
| 560 |
+
|
| 561 |
+
from scene.dataset_readers import CameraInfo
|
| 562 |
+
from utils.camera_utils import cameraList_from_camInfos
|
| 563 |
+
from utils.graphics_utils import focal2fov
|
| 564 |
+
|
| 565 |
+
cam_infos = []
|
| 566 |
+
|
| 567 |
+
if args.render_angle:
|
| 568 |
+
|
| 569 |
+
points = gaussians._xyz.cpu().numpy()
|
| 570 |
+
x_min = np.percentile(points[:,0], 1, axis=0)
|
| 571 |
+
x_max = np.percentile(points[:,0], 99, axis=0)
|
| 572 |
+
y_min = np.percentile(points[:,1], 1, axis=0)
|
| 573 |
+
y_max = np.percentile(points[:,1], 99, axis=0)
|
| 574 |
+
z_min = np.percentile(points[:,2], 1, axis=0)
|
| 575 |
+
z_max = np.percentile(points[:,2], 99, axis=0)
|
| 576 |
+
|
| 577 |
+
view_point_init = np.array( [(x_min + x_max) / 2, (y_min + y_max) / 2, z_max + (z_max - z_min) * 0.1] )
|
| 578 |
+
view_point_shift = np.array( [args.render_angle[3], args.render_angle[4], args.render_angle[5]] )
|
| 579 |
+
view_point = view_point_init + view_point_shift
|
| 580 |
+
dst_point = view_point + np.array( [args.render_angle[0], args.render_angle[1], args.render_angle[2]] )
|
| 581 |
+
|
| 582 |
+
R = look_at(view_point, dst_point)
|
| 583 |
+
T = np.array(view_point)
|
| 584 |
+
T = -np.linalg.inv(R) @ T
|
| 585 |
+
|
| 586 |
+
new_height = 2400
|
| 587 |
+
new_width = 2400
|
| 588 |
+
|
| 589 |
+
new_fovy = focal2fov(fy_color, new_height)
|
| 590 |
+
new_fovx = focal2fov(fx_color, new_width)
|
| 591 |
+
tmp_img = Image.new('RGB', (int(new_width), int(new_height)), (0, 0, 0))
|
| 592 |
+
cam_info = CameraInfo(5, R, T, new_fovy, new_fovx, \
|
| 593 |
+
tmp_img, " ", " ", \
|
| 594 |
+
new_width, new_height, tmp_img)
|
| 595 |
+
cam_infos.append(cam_info)
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
else:
|
| 599 |
+
points = gaussians._xyz.cpu().numpy()
|
| 600 |
+
z_avg = np.average(points[:,2], axis=0)
|
| 601 |
+
|
| 602 |
+
if args.render_coord is None:
|
| 603 |
+
x_min = np.percentile(points[:,0], 1, axis=0)
|
| 604 |
+
x_max = np.percentile(points[:,0], 99, axis=0)
|
| 605 |
+
y_min = np.percentile(points[:,1], 1, axis=0)
|
| 606 |
+
y_max = np.percentile(points[:,1], 99, axis=0)
|
| 607 |
+
|
| 608 |
+
else:
|
| 609 |
+
x_min, x_max, y_min, y_max = args.render_coord
|
| 610 |
+
|
| 611 |
+
print(x_min, x_max, y_min, y_max, z_avg)
|
| 612 |
+
|
| 613 |
+
for i in [5]:
|
| 614 |
+
|
| 615 |
+
pixels_per_unit = 1200 / max((x_max - x_min), (y_max - y_min))
|
| 616 |
+
new_width = 1200
|
| 617 |
+
new_height = 1200
|
| 618 |
+
|
| 619 |
+
z_cam = z_avg + fx_color / pixels_per_unit
|
| 620 |
+
T_real = [(x_min + x_max) / 2, (y_min + y_max) / 2, z_cam]
|
| 621 |
+
|
| 622 |
+
R = np.array(R_list[i])
|
| 623 |
+
T = np.array(T_real)
|
| 624 |
+
T = -np.linalg.inv(R) @ T
|
| 625 |
+
|
| 626 |
+
print(T_real)
|
| 627 |
+
|
| 628 |
+
new_fovy = focal2fov(fy_color, new_height)
|
| 629 |
+
new_fovx = focal2fov(fx_color, new_width)
|
| 630 |
+
tmp_img = Image.new('RGB', (int(new_width), int(new_height)), (0, 0, 0))
|
| 631 |
+
cam_info = CameraInfo(i, R, T, new_fovy, new_fovx, \
|
| 632 |
+
tmp_img, " ", " ", \
|
| 633 |
+
new_width, new_height, tmp_img)
|
| 634 |
+
cam_infos.append(cam_info)
|
| 635 |
+
|
| 636 |
+
views = cameraList_from_camInfos(cam_infos, 1.0, args)
|
| 637 |
+
|
| 638 |
+
render_set(args.model_path, "train", scene.loaded_iter, views, gaussians, pipeline, background, args)
|
| 639 |
+
|
| 640 |
+
elif args.render_video:
|
| 641 |
+
|
| 642 |
+
import pickle
|
| 643 |
+
if args.scanrefer:
|
| 644 |
+
scene_id = args.model_path.split('/')[1]
|
| 645 |
+
fovfile_path = f'/media/shared_space/data/scannet/scans/{scene_id}/{scene_id}.txt'
|
| 646 |
+
with open(fovfile_path, 'r') as file:
|
| 647 |
+
for line in file:
|
| 648 |
+
if line.startswith('fx_color'):
|
| 649 |
+
fx_color = float(line.split('=')[1].strip())
|
| 650 |
+
elif line.startswith('fy_color'):
|
| 651 |
+
fy_color = float(line.split('=')[1].strip())
|
| 652 |
+
|
| 653 |
+
else:
|
| 654 |
+
with open('config/cam_info.pkl', 'rb') as f:
|
| 655 |
+
cam_info_0 = pickle.load(f)
|
| 656 |
+
fx_color = cam_info_0.FovX
|
| 657 |
+
fy_color = cam_info_0.FovY
|
| 658 |
+
|
| 659 |
+
from scene.dataset_readers import CameraInfo
|
| 660 |
+
from utils.camera_utils import cameraList_from_camInfos
|
| 661 |
+
from utils.graphics_utils import focal2fov
|
| 662 |
+
|
| 663 |
+
cam_infos = []
|
| 664 |
+
|
| 665 |
+
if args.render_coord is None and args.render_angle is None:
|
| 666 |
+
print("Invalid coord/angle!")
|
| 667 |
+
exit(0)
|
| 668 |
+
|
| 669 |
+
elif args.render_coord:
|
| 670 |
+
|
| 671 |
+
x_min, x_max, y_min, y_max = args.render_coord
|
| 672 |
+
|
| 673 |
+
points = gaussians._xyz.cpu().numpy()
|
| 674 |
+
z_avg = np.average(points[:,2], axis=0)
|
| 675 |
+
|
| 676 |
+
print(x_min, x_max, y_min, y_max, z_avg)
|
| 677 |
+
|
| 678 |
+
new_width = 1200
|
| 679 |
+
new_height = 1200
|
| 680 |
+
pixels_per_unit = min(new_width / (x_max - x_min), new_height / (y_max - y_min))
|
| 681 |
+
|
| 682 |
+
z_cam = z_avg + fx_color / pixels_per_unit
|
| 683 |
+
T_real = np.array([(x_min + x_max) / 2, (y_min + y_max) / 2, z_cam])
|
| 684 |
+
|
| 685 |
+
dis = cal_dis(gaussians._xyz, T_real)
|
| 686 |
+
obj_center = np.array([T_real[0], T_real[1], T_real[2] - dis])
|
| 687 |
+
|
| 688 |
+
camera_positions = generate_spiral_path(T_real, dis)
|
| 689 |
+
|
| 690 |
+
camera_orientations = []
|
| 691 |
+
|
| 692 |
+
for pos in camera_positions:
|
| 693 |
+
camera_orientations.append(look_at(pos, obj_center))
|
| 694 |
+
|
| 695 |
+
camera_orientations = np.array(camera_orientations)
|
| 696 |
+
|
| 697 |
+
elif args.render_angle:
|
| 698 |
+
|
| 699 |
+
new_width = 2400
|
| 700 |
+
new_height = 2400
|
| 701 |
+
|
| 702 |
+
points = gaussians._xyz.cpu().numpy()
|
| 703 |
+
x_min = np.percentile(points[:,0], 1, axis=0)
|
| 704 |
+
x_max = np.percentile(points[:,0], 99, axis=0)
|
| 705 |
+
y_min = np.percentile(points[:,1], 1, axis=0)
|
| 706 |
+
y_max = np.percentile(points[:,1], 99, axis=0)
|
| 707 |
+
z_min = np.percentile(points[:,2], 1, axis=0)
|
| 708 |
+
z_max = np.percentile(points[:,2], 99, axis=0)
|
| 709 |
+
|
| 710 |
+
view_point_init = np.array( [(x_min + x_max) / 2, (y_min + y_max) / 2, z_max + (z_max - z_min) * 0.1] )
|
| 711 |
+
view_point_shift = np.array( [args.render_angle[3], args.render_angle[4], args.render_angle[5]] )
|
| 712 |
+
view_point = view_point_init + view_point_shift
|
| 713 |
+
direction = np.array( [args.render_angle[0], args.render_angle[1], args.render_angle[2]] )
|
| 714 |
+
dst_point = view_point + direction
|
| 715 |
+
|
| 716 |
+
obj_center = find_first_intersection(view_point, direction, gaussians._xyz.detach().cpu().numpy())
|
| 717 |
+
|
| 718 |
+
camera_positions = generate_circle_path(view_point, obj_center)
|
| 719 |
+
|
| 720 |
+
camera_orientations = []
|
| 721 |
+
|
| 722 |
+
for pos in camera_positions:
|
| 723 |
+
camera_orientations.append(look_at(pos, obj_center))
|
| 724 |
+
camera_orientations[0] = look_at(view_point, dst_point) # nt
|
| 725 |
+
|
| 726 |
+
camera_orientations = np.array(camera_orientations)
|
| 727 |
+
|
| 728 |
+
for i in range(len(camera_positions)):
|
| 729 |
+
|
| 730 |
+
R = np.array(camera_orientations[i])
|
| 731 |
+
T = np.array(camera_positions[i])
|
| 732 |
+
T = -np.linalg.inv(R) @ T
|
| 733 |
+
|
| 734 |
+
new_fovy = focal2fov(fy_color, new_height)
|
| 735 |
+
new_fovx = focal2fov(fx_color, new_width)
|
| 736 |
+
tmp_img = Image.new('RGB', (int(new_width), int(new_height)), (0, 0, 0))
|
| 737 |
+
cam_info = CameraInfo(i, R, T, new_fovy, new_fovx, \
|
| 738 |
+
tmp_img, " ", " ", \
|
| 739 |
+
new_width, new_height, tmp_img)
|
| 740 |
+
cam_infos.append(cam_info)
|
| 741 |
+
|
| 742 |
+
camera_path = os.path.join(args.model_path, "train", "ours{}".format(scene.loaded_iter), 'cam_infos.pkl')
|
| 743 |
+
with open(camera_path, 'wb') as f:
|
| 744 |
+
pickle.dump(cam_infos, f)
|
| 745 |
+
|
| 746 |
+
views = cameraList_from_camInfos(cam_infos, 1.0, args)
|
| 747 |
+
|
| 748 |
+
render_set(args.model_path, "train", scene.loaded_iter, views, gaussians, pipeline, background, args)
|
| 749 |
+
|
| 750 |
+
elif args.render_obj_theta:
|
| 751 |
+
|
| 752 |
+
import pickle
|
| 753 |
+
with open('config/cam_info.pkl', 'rb') as f:
|
| 754 |
+
cam_info_0 = pickle.load(f)
|
| 755 |
+
|
| 756 |
+
from scene.dataset_readers import CameraInfo
|
| 757 |
+
from utils.camera_utils import cameraList_from_camInfos
|
| 758 |
+
from utils.graphics_utils import focal2fov
|
| 759 |
+
|
| 760 |
+
mask3d = torch.ones(gaussians._xyz.shape[0], dtype=torch.bool)
|
| 761 |
+
bb = {}
|
| 762 |
+
get_bb(gaussians._xyz.detach(), torch.tensor, bb)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
new_width = cam_info_0.height + 60
|
| 766 |
+
new_height = cam_info_0.height + 40
|
| 767 |
+
new_fovy = focal2fov(cam_info_0.FovY, new_height)
|
| 768 |
+
new_fovx = focal2fov(cam_info_0.FovX, new_width)
|
| 769 |
+
|
| 770 |
+
R_list = []
|
| 771 |
+
T_list = []
|
| 772 |
+
|
| 773 |
+
for theta in args.render_obj_theta:
|
| 774 |
+
R, T, r = compute_camera_extrinsics(bb['centroid'], bb['size'], new_fovx, new_fovy, theta / 180 * np.pi)
|
| 775 |
+
R_list.append(R)
|
| 776 |
+
T_list.append(T)
|
| 777 |
+
|
| 778 |
+
cam_infos = []
|
| 779 |
+
|
| 780 |
+
for i in range(len(R_list)):
|
| 781 |
+
|
| 782 |
+
cam_info = CameraInfo(i, np.array(R_list[i]), np.array(T_list[i]), new_fovy, new_fovx, \
|
| 783 |
+
crop_image_to_size(cam_info_0.image, new_width, new_height), cam_info_0.image_path, cam_info_0.image_name+f'{i}', \
|
| 784 |
+
new_width, new_height, crop_image_to_size(cam_info_0.objects, new_width, new_height))
|
| 785 |
+
cam_infos.append(cam_info)
|
| 786 |
+
|
| 787 |
+
views = cameraList_from_camInfos(cam_infos, 1.0, args)
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
render_set(args.model_path, "train", scene.loaded_iter, views, gaussians, pipeline, background, args)
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
def get_bounding_box(args):
|
| 794 |
+
gaussians = GaussianModel(args.sh_degree)
|
| 795 |
+
scene = Scene(args, gaussians, load_iteration=args.iteration, shuffle=False,only_gaussians=args.scanrefer)
|
| 796 |
+
num_classes = args.num_classes
|
| 797 |
+
print("Num classes: ",num_classes)
|
| 798 |
+
bg_color = [1,1,1] if args.white_background else [0, 0, 0]
|
| 799 |
+
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
|
| 800 |
+
|
| 801 |
+
bb_list = []
|
| 802 |
+
|
| 803 |
+
for obj_id in range(1, 256):
|
| 804 |
+
|
| 805 |
+
with torch.no_grad():
|
| 806 |
+
mask3d = gaussians._objects_dc[:,0,0] == obj_id
|
| 807 |
+
if torch.count_nonzero(mask3d) < 5:
|
| 808 |
+
break
|
| 809 |
+
mask3d = mask3d.float()[:,None,None]
|
| 810 |
+
|
| 811 |
+
bb = {}
|
| 812 |
+
bb["id"] = obj_id
|
| 813 |
+
get_bb(gaussians._xyz.detach(), mask3d, bb)
|
| 814 |
+
bb_list.append(bb)
|
| 815 |
+
|
| 816 |
+
output = {}
|
| 817 |
+
output["Scene configurations"] = bb_list
|
| 818 |
+
|
| 819 |
+
with open(args.model_path + '/bounding_boxes.json', 'w') as f:
|
| 820 |
+
json.dump(output, f, indent=4)
|
| 821 |
+
|
| 822 |
+
print("Bounding box saved.")
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
if __name__ == "__main__":
|
| 826 |
+
parser = ArgumentParser(description="Testing script parameters")
|
| 827 |
+
model = ModelParams(parser, sentinel=True)
|
| 828 |
+
opt = OptimizationParams(parser)
|
| 829 |
+
pipeline = PipelineParams(parser)
|
| 830 |
+
parser.add_argument("--iteration", default=-1, type=int)
|
| 831 |
+
parser.add_argument("--skip_train", action="store_true")
|
| 832 |
+
parser.add_argument("--quiet", action="store_true")
|
| 833 |
+
|
| 834 |
+
parser.add_argument("--config_file", type=str, help="Path to the configuration file")
|
| 835 |
+
parser.add_argument("--operation", type=str, default="skip", help="removal/translate/rotate/skip/crop/multi-editing")
|
| 836 |
+
parser.add_argument("--crop_coord", type=float, nargs='+')
|
| 837 |
+
parser.add_argument("--select_obj_id", type=int, nargs='+')
|
| 838 |
+
parser.add_argument("--dst_center", type=float, nargs='+')
|
| 839 |
+
parser.add_argument("--euler_angle", type=float, nargs='+')
|
| 840 |
+
|
| 841 |
+
parser.add_argument("--render_obj", type=int, default=256)
|
| 842 |
+
|
| 843 |
+
parser.add_argument("--render_ori", action="store_true")
|
| 844 |
+
parser.add_argument("--render_all", action="store_true")
|
| 845 |
+
parser.add_argument("--render_video", action="store_true")
|
| 846 |
+
parser.add_argument("--render_coord", type=float, nargs='+', default=None)
|
| 847 |
+
parser.add_argument("--render_angle", type=float, nargs='+', default=None)
|
| 848 |
+
parser.add_argument("--render_obj_theta", type=float, nargs='+', default=None)
|
| 849 |
+
|
| 850 |
+
parser.add_argument("--render_highlights", type=int, nargs='+')
|
| 851 |
+
parser.add_argument("--render_labels", action="store_true")
|
| 852 |
+
|
| 853 |
+
parser.add_argument("--get_bbox_2d", type=float, nargs='+', default=None)
|
| 854 |
+
parser.add_argument("--get_bounding_box", action="store_true")
|
| 855 |
+
|
| 856 |
+
parser.add_argument("--scanrefer", action="store_true")
|
| 857 |
+
|
| 858 |
+
args = parser.parse_args()
|
| 859 |
+
# print(args)
|
| 860 |
+
|
| 861 |
+
if not args.scanrefer:
|
| 862 |
+
args = get_combined_args(parser)
|
| 863 |
+
else:
|
| 864 |
+
args.sh_degree = 0
|
| 865 |
+
args.iteration = 1
|
| 866 |
+
args.resolution = 1
|
| 867 |
+
args.data_device="cuda"
|
| 868 |
+
|
| 869 |
+
print("Rendering " + args.model_path)
|
| 870 |
+
|
| 871 |
+
args.num_classes = 256
|
| 872 |
+
args.removal_thresh = 0.3
|
| 873 |
+
|
| 874 |
+
if hasattr(args, 'select_obj_id') and args.select_obj_id != None:
|
| 875 |
+
args.select_obj_id = [ [args.select_obj_id[i]] for i in range(len(args.select_obj_id)) ]
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
if args.operation == 'multi-editing':
|
| 879 |
+
|
| 880 |
+
try:
|
| 881 |
+
with open(args.config_file, 'r') as file:
|
| 882 |
+
config = json.load(file)
|
| 883 |
+
except FileNotFoundError:
|
| 884 |
+
print(f"Error: Configuration file '{args.config_file}' not found.")
|
| 885 |
+
exit(1)
|
| 886 |
+
except json.JSONDecodeError as e:
|
| 887 |
+
print(f"Error: Failed to parse the JSON configuration file: {e}")
|
| 888 |
+
exit(1)
|
| 889 |
+
|
| 890 |
+
args.operation_list = config.get("operation_list", ["removal"])
|
| 891 |
+
args.select_obj_id_list = config.get("select_obj_id_list", [1])
|
| 892 |
+
args.parameter_list = config.get("parameter_list", [[]])
|
| 893 |
+
|
| 894 |
+
safe_state(args.quiet)
|
| 895 |
+
|
| 896 |
+
if args.get_bounding_box:
|
| 897 |
+
get_bounding_box(args)
|
| 898 |
+
else:
|
| 899 |
+
editing(args)
|
| 900 |
+
|
| 901 |
+
|
gaussian-grouping/ext/grounded_sam.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import copy
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
# Grounding DINO
|
| 12 |
+
import groundingdino.datasets.transforms as T
|
| 13 |
+
from groundingdino.models import build_model
|
| 14 |
+
from groundingdino.util import box_ops
|
| 15 |
+
from groundingdino.util.slconfig import SLConfig
|
| 16 |
+
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
|
| 17 |
+
from groundingdino.util.inference import annotate, load_image, predict
|
| 18 |
+
|
| 19 |
+
# segment anything
|
| 20 |
+
from segment_anything import build_sam, SamPredictor
|
| 21 |
+
import cv2
|
| 22 |
+
import numpy as np
|
| 23 |
+
import matplotlib.pyplot as plt
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
from huggingface_hub import hf_hub_download
|
| 27 |
+
|
| 28 |
+
def load_model_hf(repo_id, filename, ckpt_config_filename, device='cpu'):
|
| 29 |
+
cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)
|
| 30 |
+
|
| 31 |
+
args = SLConfig.fromfile(cache_config_file)
|
| 32 |
+
model = build_model(args)
|
| 33 |
+
args.device = device
|
| 34 |
+
|
| 35 |
+
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 36 |
+
checkpoint = torch.load(cache_file, map_location='cpu')
|
| 37 |
+
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
|
| 38 |
+
print("Model loaded from {} \n => {}".format(cache_file, log))
|
| 39 |
+
_ = model.eval()
|
| 40 |
+
return model
|
| 41 |
+
|
| 42 |
+
def show_mask(mask, image, random_color=True):
|
| 43 |
+
if random_color:
|
| 44 |
+
color = np.concatenate([np.random.random(3), np.array([0.8])], axis=0)
|
| 45 |
+
else:
|
| 46 |
+
color = np.array([30/255, 144/255, 255/255, 0.6])
|
| 47 |
+
h, w = mask.shape[-2:]
|
| 48 |
+
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
|
| 49 |
+
|
| 50 |
+
annotated_frame_pil = Image.fromarray(image).convert("RGBA")
|
| 51 |
+
mask_image_pil = Image.fromarray((mask_image * 255).astype(np.uint8)).convert("RGBA")
|
| 52 |
+
|
| 53 |
+
return np.array(Image.alpha_composite(annotated_frame_pil, mask_image_pil))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def grouned_sam_output(groundingdino_model, sam_predictor, TEXT_PROMPT, image, BOX_TRESHOLD = 0.3, TEXT_TRESHOLD = 0.45, device='cuda' ):
|
| 59 |
+
image_source = image
|
| 60 |
+
transform = T.Compose(
|
| 61 |
+
[
|
| 62 |
+
T.RandomResize([800], max_size=1333),
|
| 63 |
+
T.ToTensor(),
|
| 64 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 65 |
+
]
|
| 66 |
+
)
|
| 67 |
+
image, _ = transform(Image.fromarray(image_source), None)
|
| 68 |
+
|
| 69 |
+
boxes, logits, phrases = predict(
|
| 70 |
+
model=groundingdino_model,
|
| 71 |
+
image=image,
|
| 72 |
+
caption=TEXT_PROMPT,
|
| 73 |
+
box_threshold=BOX_TRESHOLD,
|
| 74 |
+
text_threshold=TEXT_TRESHOLD
|
| 75 |
+
)
|
| 76 |
+
annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)
|
| 77 |
+
annotated_frame = annotated_frame[...,::-1] # BGR to RGB
|
| 78 |
+
|
| 79 |
+
# set image
|
| 80 |
+
sam_predictor.set_image(image_source)
|
| 81 |
+
# box: normalized box xywh -> unnormalized xyxy
|
| 82 |
+
H, W, _ = image_source.shape
|
| 83 |
+
boxes_xyxy = box_ops.box_cxcywh_to_xyxy(boxes) * torch.Tensor([W, H, W, H])
|
| 84 |
+
|
| 85 |
+
if len(boxes_xyxy) > 0:
|
| 86 |
+
transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_xyxy, image_source.shape[:2]).to(device)
|
| 87 |
+
masks, _, _ = sam_predictor.predict_torch(
|
| 88 |
+
point_coords = None,
|
| 89 |
+
point_labels = None,
|
| 90 |
+
boxes = transformed_boxes,
|
| 91 |
+
multimask_output = False,
|
| 92 |
+
)
|
| 93 |
+
else:
|
| 94 |
+
masks = torch.zeros((1,1,H,W)).cuda()
|
| 95 |
+
|
| 96 |
+
for i in range(len(masks)):
|
| 97 |
+
annotated_frame_with_mask = show_mask(masks[i][0].cpu().numpy(), annotated_frame)
|
| 98 |
+
|
| 99 |
+
return torch.sum(masks,dim=0).squeeze().bool(), annotated_frame_with_mask
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def select_obj_ioa(classification_maps, mask, ioa_thresh=0.7):
|
| 103 |
+
unique_classes = classification_maps.unique()
|
| 104 |
+
classes_above_threshold = []
|
| 105 |
+
|
| 106 |
+
for class_id in unique_classes:
|
| 107 |
+
class_mask = (classification_maps == class_id).byte()
|
| 108 |
+
class_area = class_mask.sum()
|
| 109 |
+
intersection = torch.sum(class_mask * mask)
|
| 110 |
+
ioa = intersection / class_area if class_area != 0 else 0 # Avoid division by zero
|
| 111 |
+
|
| 112 |
+
if ioa > ioa_thresh:
|
| 113 |
+
classes_above_threshold.append(class_id)
|
| 114 |
+
|
| 115 |
+
return torch.tensor(classes_above_threshold).cuda()
|
| 116 |
+
|
| 117 |
+
|
gaussian-grouping/gaussian_renderer/__init__.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import math
|
| 14 |
+
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
|
| 15 |
+
from scene.gaussian_model import GaussianModel
|
| 16 |
+
from utils.sh_utils import eval_sh
|
| 17 |
+
|
| 18 |
+
def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
|
| 19 |
+
"""
|
| 20 |
+
Render the scene.
|
| 21 |
+
|
| 22 |
+
Background tensor (bg_color) must be on GPU!
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
| 26 |
+
screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
|
| 27 |
+
try:
|
| 28 |
+
screenspace_points.retain_grad()
|
| 29 |
+
except:
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
# Set up rasterization configuration
|
| 33 |
+
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
| 34 |
+
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
| 35 |
+
|
| 36 |
+
# print(int(viewpoint_camera.image_height), int(viewpoint_camera.image_width))
|
| 37 |
+
|
| 38 |
+
raster_settings = GaussianRasterizationSettings(
|
| 39 |
+
image_height=int(viewpoint_camera.image_height),
|
| 40 |
+
image_width=int(viewpoint_camera.image_width),
|
| 41 |
+
tanfovx=tanfovx,
|
| 42 |
+
tanfovy=tanfovy,
|
| 43 |
+
bg=bg_color,
|
| 44 |
+
scale_modifier=scaling_modifier,
|
| 45 |
+
viewmatrix=viewpoint_camera.world_view_transform,
|
| 46 |
+
projmatrix=viewpoint_camera.full_proj_transform,
|
| 47 |
+
sh_degree=pc.active_sh_degree,
|
| 48 |
+
campos=viewpoint_camera.camera_center,
|
| 49 |
+
prefiltered=False,
|
| 50 |
+
debug=pipe.debug
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
| 54 |
+
|
| 55 |
+
means3D = pc.get_xyz
|
| 56 |
+
means2D = screenspace_points
|
| 57 |
+
opacity = pc.get_opacity
|
| 58 |
+
|
| 59 |
+
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
|
| 60 |
+
# scaling / rotation by the rasterizer.
|
| 61 |
+
scales = None
|
| 62 |
+
rotations = None
|
| 63 |
+
cov3D_precomp = None
|
| 64 |
+
if pipe.compute_cov3D_python:
|
| 65 |
+
cov3D_precomp = pc.get_covariance(scaling_modifier)
|
| 66 |
+
else:
|
| 67 |
+
scales = pc.get_scaling
|
| 68 |
+
rotations = pc.get_rotation
|
| 69 |
+
|
| 70 |
+
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
|
| 71 |
+
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
|
| 72 |
+
shs = None
|
| 73 |
+
colors_precomp = None
|
| 74 |
+
if override_color is None:
|
| 75 |
+
if pipe.convert_SHs_python:
|
| 76 |
+
shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
|
| 77 |
+
dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
|
| 78 |
+
dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
|
| 79 |
+
sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
|
| 80 |
+
colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
|
| 81 |
+
else:
|
| 82 |
+
shs = pc.get_features
|
| 83 |
+
sh_objs = pc.get_objects
|
| 84 |
+
else:
|
| 85 |
+
colors_precomp = override_color
|
| 86 |
+
|
| 87 |
+
alpha_mask = viewpoint_camera.objects.cuda().float()
|
| 88 |
+
# print(int(viewpoint_camera.image_height), int(viewpoint_camera.image_width))
|
| 89 |
+
# print(alpha_mask.shape)
|
| 90 |
+
|
| 91 |
+
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
| 92 |
+
rendered_image, radii, rendered_objects, max_opacity = rasterizer(
|
| 93 |
+
means3D = means3D,
|
| 94 |
+
means2D = means2D,
|
| 95 |
+
alpha = alpha_mask,
|
| 96 |
+
shs = shs,
|
| 97 |
+
sh_objs = sh_objs,
|
| 98 |
+
colors_precomp = colors_precomp,
|
| 99 |
+
opacities = opacity,
|
| 100 |
+
scales = scales,
|
| 101 |
+
rotations = rotations,
|
| 102 |
+
cov3D_precomp = cov3D_precomp)
|
| 103 |
+
|
| 104 |
+
# Those Gaussians that were frustum culled or had a radius of 0 were not visible.
|
| 105 |
+
# They will be excluded from value updates used in the splitting criteria.
|
| 106 |
+
return {"render": rendered_image,
|
| 107 |
+
"viewspace_points": screenspace_points,
|
| 108 |
+
"visibility_filter" : radii > 0,
|
| 109 |
+
"radii": radii,
|
| 110 |
+
"render_object": rendered_objects,
|
| 111 |
+
"max_opacity": max_opacity}
|
gaussian-grouping/gaussian_renderer/network_gui.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import traceback
|
| 14 |
+
import socket
|
| 15 |
+
import json
|
| 16 |
+
from scene.cameras import MiniCam
|
| 17 |
+
|
| 18 |
+
host = "127.0.0.1"
|
| 19 |
+
port = 6009
|
| 20 |
+
|
| 21 |
+
conn = None
|
| 22 |
+
addr = None
|
| 23 |
+
|
| 24 |
+
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 25 |
+
|
| 26 |
+
def init(wish_host, wish_port):
|
| 27 |
+
global host, port, listener
|
| 28 |
+
host = wish_host
|
| 29 |
+
port = wish_port
|
| 30 |
+
listener.bind((host, port))
|
| 31 |
+
listener.listen()
|
| 32 |
+
listener.settimeout(0)
|
| 33 |
+
|
| 34 |
+
def try_connect():
|
| 35 |
+
global conn, addr, listener
|
| 36 |
+
try:
|
| 37 |
+
conn, addr = listener.accept()
|
| 38 |
+
print(f"\nConnected by {addr}")
|
| 39 |
+
conn.settimeout(None)
|
| 40 |
+
except Exception as inst:
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
def read():
|
| 44 |
+
global conn
|
| 45 |
+
messageLength = conn.recv(4)
|
| 46 |
+
messageLength = int.from_bytes(messageLength, 'little')
|
| 47 |
+
message = conn.recv(messageLength)
|
| 48 |
+
return json.loads(message.decode("utf-8"))
|
| 49 |
+
|
| 50 |
+
def send(message_bytes, verify):
|
| 51 |
+
global conn
|
| 52 |
+
if message_bytes != None:
|
| 53 |
+
conn.sendall(message_bytes)
|
| 54 |
+
conn.sendall(len(verify).to_bytes(4, 'little'))
|
| 55 |
+
conn.sendall(bytes(verify, 'ascii'))
|
| 56 |
+
|
| 57 |
+
def receive():
|
| 58 |
+
message = read()
|
| 59 |
+
|
| 60 |
+
width = message["resolution_x"]
|
| 61 |
+
height = message["resolution_y"]
|
| 62 |
+
|
| 63 |
+
if width != 0 and height != 0:
|
| 64 |
+
try:
|
| 65 |
+
do_training = bool(message["train"])
|
| 66 |
+
fovy = message["fov_y"]
|
| 67 |
+
fovx = message["fov_x"]
|
| 68 |
+
znear = message["z_near"]
|
| 69 |
+
zfar = message["z_far"]
|
| 70 |
+
do_shs_python = bool(message["shs_python"])
|
| 71 |
+
do_rot_scale_python = bool(message["rot_scale_python"])
|
| 72 |
+
keep_alive = bool(message["keep_alive"])
|
| 73 |
+
scaling_modifier = message["scaling_modifier"]
|
| 74 |
+
world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda()
|
| 75 |
+
world_view_transform[:,1] = -world_view_transform[:,1]
|
| 76 |
+
world_view_transform[:,2] = -world_view_transform[:,2]
|
| 77 |
+
full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda()
|
| 78 |
+
full_proj_transform[:,1] = -full_proj_transform[:,1]
|
| 79 |
+
custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform)
|
| 80 |
+
except Exception as e:
|
| 81 |
+
print("")
|
| 82 |
+
traceback.print_exc()
|
| 83 |
+
raise e
|
| 84 |
+
return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier
|
| 85 |
+
else:
|
| 86 |
+
return None, None, None, None, None, None
|
gaussian-grouping/labelling.sh
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset='dining-table'
|
| 2 |
+
iteration=15000
|
| 3 |
+
|
| 4 |
+
# Get the parent directory of the script
|
| 5 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 6 |
+
PARENT_FOLDER="$(dirname "$SCRIPT_DIR")"
|
| 7 |
+
|
| 8 |
+
data_path="${PARENT_FOLDER}/gaussian-grouping/data/${dataset}"
|
| 9 |
+
# output_path="${PARENT_FOLDER}/gaussian-grouping/output/${dataset}" # deprecated
|
| 10 |
+
|
| 11 |
+
zooming_file="${PARENT_FOLDER}/llm/tracking/searcher-zooming/output.txt"
|
| 12 |
+
loc_coord=$(head -n 1 "$zooming_file")
|
| 13 |
+
|
| 14 |
+
cd "${PARENT_FOLDER}/gaussian-grouping"
|
| 15 |
+
|
| 16 |
+
locating_file="${PARENT_FOLDER}/llm/tracking/searcher-locating/output.txt"
|
| 17 |
+
obj_coord=$(tail -n 1 "$locating_file")
|
| 18 |
+
|
| 19 |
+
log_and_run() {
|
| 20 |
+
echo "[ $(basename "$0") ] -> $*"
|
| 21 |
+
"$@"
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
# echo "$(basename "$0")"; python edit_object_removal.py -m output/${dataset} --iteration ${iteration} --operation skip --render_video --render_coord ${obj_coord}
|
| 25 |
+
|
| 26 |
+
cd "${PARENT_FOLDER}/segment-anything-2/notebooks/"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
python get_mask.py --dataset ${dataset}
|
| 30 |
+
|
| 31 |
+
# test
|
| 32 |
+
python render.py --dataset ${dataset}
|
| 33 |
+
|
| 34 |
+
cp outputs/${dataset}/* ${data_path}/object_mask/
|
| 35 |
+
cp ${data_path}/train/ours/iteration_${iteration}/cam_infos.pkl ${data_path}/
|
| 36 |
+
|
| 37 |
+
cd "${PARENT_FOLDER}/gaussian-grouping"
|
| 38 |
+
python train.py -s data/${dataset} -r 1 -m output/${dataset} --config_file config/gaussian_dataset/train.json --train_split --train_labels --iteration ${iteration}
|
| 39 |
+
|
| 40 |
+
# test
|
| 41 |
+
log_and_run python edit_object_removal.py -m output/${dataset} --iteration ${iteration} --operation skip --render_all --render_obj 1 --render_coord ${loc_coord}
|
| 42 |
+
cp ${data_path}/train/ours/iteration_${iteration}/renders/00000.png ${PARENT_FOLDER}/llm/tracking/find-to-seg-obj.png
|
| 43 |
+
log_and_run python edit_object_removal.py -m output/${dataset} --iteration ${iteration} --operation skip --render_all --render_obj 0 --render_coord ${loc_coord}
|
| 44 |
+
cp ${data_path}/train/ours/iteration_${iteration}/renders/00000.png ${PARENT_FOLDER}/llm/tracking/find-to-seg-woobj.png
|
gaussian-grouping/metrics.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import os
|
| 14 |
+
from PIL import Image
|
| 15 |
+
import torch
|
| 16 |
+
import torchvision.transforms.functional as tf
|
| 17 |
+
from utils.loss_utils import ssim
|
| 18 |
+
from lpipsPyTorch import lpips
|
| 19 |
+
import json
|
| 20 |
+
from tqdm import tqdm
|
| 21 |
+
from utils.image_utils import psnr
|
| 22 |
+
from argparse import ArgumentParser
|
| 23 |
+
|
| 24 |
+
def readImages(renders_dir, gt_dir):
|
| 25 |
+
renders = []
|
| 26 |
+
gts = []
|
| 27 |
+
image_names = []
|
| 28 |
+
for fname in os.listdir(renders_dir):
|
| 29 |
+
render = Image.open(renders_dir / fname)
|
| 30 |
+
gt = Image.open(gt_dir / fname)
|
| 31 |
+
renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda())
|
| 32 |
+
gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda())
|
| 33 |
+
image_names.append(fname)
|
| 34 |
+
return renders, gts, image_names
|
| 35 |
+
|
| 36 |
+
def evaluate(model_paths):
|
| 37 |
+
|
| 38 |
+
full_dict = {}
|
| 39 |
+
per_view_dict = {}
|
| 40 |
+
full_dict_polytopeonly = {}
|
| 41 |
+
per_view_dict_polytopeonly = {}
|
| 42 |
+
print("")
|
| 43 |
+
|
| 44 |
+
for scene_dir in model_paths:
|
| 45 |
+
try:
|
| 46 |
+
print("Scene:", scene_dir)
|
| 47 |
+
full_dict[scene_dir] = {}
|
| 48 |
+
per_view_dict[scene_dir] = {}
|
| 49 |
+
full_dict_polytopeonly[scene_dir] = {}
|
| 50 |
+
per_view_dict_polytopeonly[scene_dir] = {}
|
| 51 |
+
|
| 52 |
+
test_dir = Path(scene_dir) / "test"
|
| 53 |
+
|
| 54 |
+
for method in os.listdir(test_dir):
|
| 55 |
+
print("Method:", method)
|
| 56 |
+
|
| 57 |
+
full_dict[scene_dir][method] = {}
|
| 58 |
+
per_view_dict[scene_dir][method] = {}
|
| 59 |
+
full_dict_polytopeonly[scene_dir][method] = {}
|
| 60 |
+
per_view_dict_polytopeonly[scene_dir][method] = {}
|
| 61 |
+
|
| 62 |
+
method_dir = test_dir / method
|
| 63 |
+
gt_dir = method_dir/ "gt"
|
| 64 |
+
renders_dir = method_dir / "renders"
|
| 65 |
+
renders, gts, image_names = readImages(renders_dir, gt_dir)
|
| 66 |
+
|
| 67 |
+
ssims = []
|
| 68 |
+
psnrs = []
|
| 69 |
+
lpipss = []
|
| 70 |
+
|
| 71 |
+
for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
|
| 72 |
+
ssims.append(ssim(renders[idx], gts[idx]))
|
| 73 |
+
psnrs.append(psnr(renders[idx], gts[idx]))
|
| 74 |
+
lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg'))
|
| 75 |
+
|
| 76 |
+
print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5"))
|
| 77 |
+
print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5"))
|
| 78 |
+
print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5"))
|
| 79 |
+
print("")
|
| 80 |
+
|
| 81 |
+
full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(),
|
| 82 |
+
"PSNR": torch.tensor(psnrs).mean().item(),
|
| 83 |
+
"LPIPS": torch.tensor(lpipss).mean().item()})
|
| 84 |
+
per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)},
|
| 85 |
+
"PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)},
|
| 86 |
+
"LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}})
|
| 87 |
+
|
| 88 |
+
with open(scene_dir + "/results.json", 'w') as fp:
|
| 89 |
+
json.dump(full_dict[scene_dir], fp, indent=True)
|
| 90 |
+
with open(scene_dir + "/per_view.json", 'w') as fp:
|
| 91 |
+
json.dump(per_view_dict[scene_dir], fp, indent=True)
|
| 92 |
+
except:
|
| 93 |
+
print("Unable to compute metrics for model", scene_dir)
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
device = torch.device("cuda:0")
|
| 97 |
+
torch.cuda.set_device(device)
|
| 98 |
+
|
| 99 |
+
# Set up command line argument parser
|
| 100 |
+
parser = ArgumentParser(description="Training script parameters")
|
| 101 |
+
parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[])
|
| 102 |
+
args = parser.parse_args()
|
| 103 |
+
evaluate(args.model_paths)
|
gaussian-grouping/render.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from scene import Scene
|
| 11 |
+
import os
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from os import makedirs
|
| 14 |
+
from gaussian_renderer import render
|
| 15 |
+
import torchvision
|
| 16 |
+
from utils.general_utils import safe_state
|
| 17 |
+
from argparse import ArgumentParser
|
| 18 |
+
from arguments import ModelParams, PipelineParams, get_combined_args
|
| 19 |
+
from gaussian_renderer import GaussianModel
|
| 20 |
+
import numpy as np
|
| 21 |
+
from PIL import Image
|
| 22 |
+
import colorsys
|
| 23 |
+
import cv2
|
| 24 |
+
from sklearn.decomposition import PCA
|
| 25 |
+
|
| 26 |
+
def feature_to_rgb(features):
|
| 27 |
+
# Input features shape: (16, H, W)
|
| 28 |
+
|
| 29 |
+
# Reshape features for PCA
|
| 30 |
+
H, W = features.shape[1], features.shape[2]
|
| 31 |
+
features_reshaped = features.view(features.shape[0], -1).T
|
| 32 |
+
|
| 33 |
+
# Apply PCA and get the first 3 components
|
| 34 |
+
pca = PCA(n_components=3)
|
| 35 |
+
pca_result = pca.fit_transform(features_reshaped.cpu().numpy())
|
| 36 |
+
|
| 37 |
+
# Reshape back to (H, W, 3)
|
| 38 |
+
pca_result = pca_result.reshape(H, W, 3)
|
| 39 |
+
|
| 40 |
+
# Normalize to [0, 255]
|
| 41 |
+
pca_normalized = 255 * (pca_result - pca_result.min()) / (pca_result.max() - pca_result.min())
|
| 42 |
+
|
| 43 |
+
rgb_array = pca_normalized.astype('uint8')
|
| 44 |
+
|
| 45 |
+
return rgb_array
|
| 46 |
+
|
| 47 |
+
def id2rgb(id, max_num_obj=256):
|
| 48 |
+
if not 0 <= id <= max_num_obj:
|
| 49 |
+
raise ValueError("ID should be in range(0, max_num_obj)")
|
| 50 |
+
|
| 51 |
+
# Convert the ID into a hue value
|
| 52 |
+
golden_ratio = 1.6180339887
|
| 53 |
+
h = ((id * golden_ratio) % 1) # Ensure value is between 0 and 1
|
| 54 |
+
s = 0.5 + (id % 2) * 0.5 # Alternate between 0.5 and 1.0
|
| 55 |
+
l = 0.5
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# Use colorsys to convert HSL to RGB
|
| 59 |
+
rgb = np.zeros((3, ), dtype=np.uint8)
|
| 60 |
+
if id==0: #invalid region
|
| 61 |
+
return rgb
|
| 62 |
+
r, g, b = colorsys.hls_to_rgb(h, l, s)
|
| 63 |
+
rgb[0], rgb[1], rgb[2] = int(r*255), int(g*255), int(b*255)
|
| 64 |
+
|
| 65 |
+
return rgb
|
| 66 |
+
|
| 67 |
+
def visualize_obj(objects):
|
| 68 |
+
rgb_mask = np.zeros((*objects.shape[-2:], 3), dtype=np.uint8)
|
| 69 |
+
all_obj_ids = np.unique(objects)
|
| 70 |
+
for id in all_obj_ids:
|
| 71 |
+
colored_mask = id2rgb(id)
|
| 72 |
+
rgb_mask[objects == id] = colored_mask
|
| 73 |
+
return rgb_mask
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def render_set(model_path, name, iteration, views, gaussians, pipeline, background, classifier):
|
| 77 |
+
render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders")
|
| 78 |
+
gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt")
|
| 79 |
+
colormask_path = os.path.join(model_path, name, "ours_{}".format(iteration), "objects_feature16")
|
| 80 |
+
gt_colormask_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt_objects_color")
|
| 81 |
+
# pred_obj_path = os.path.join(model_path, name, "ours_{}".format(iteration), "objects_pred")
|
| 82 |
+
makedirs(render_path, exist_ok=True)
|
| 83 |
+
makedirs(gts_path, exist_ok=True)
|
| 84 |
+
makedirs(colormask_path, exist_ok=True)
|
| 85 |
+
makedirs(gt_colormask_path, exist_ok=True)
|
| 86 |
+
# makedirs(pred_obj_path, exist_ok=True)
|
| 87 |
+
|
| 88 |
+
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
|
| 89 |
+
# if idx < 540 or idx > 640:
|
| 90 |
+
# continue
|
| 91 |
+
results = render(view, gaussians, pipeline, background)
|
| 92 |
+
rendering = results["render"]
|
| 93 |
+
rendering_obj = results["render_object"]
|
| 94 |
+
|
| 95 |
+
# logits = classifier(rendering_obj)
|
| 96 |
+
# pred_obj = torch.argmax(logits,dim=0)
|
| 97 |
+
# pred_obj_mask = visualize_obj(pred_obj.cpu().numpy().astype(np.uint8))
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
gt_objects = view.objects
|
| 101 |
+
gt_rgb_mask = visualize_obj(gt_objects.cpu().numpy().astype(np.uint8))
|
| 102 |
+
|
| 103 |
+
rgb_mask = feature_to_rgb(rendering_obj)
|
| 104 |
+
Image.fromarray(rgb_mask).save(os.path.join(colormask_path, '{0:05d}'.format(idx) + ".png"))
|
| 105 |
+
Image.fromarray(gt_rgb_mask).save(os.path.join(gt_colormask_path, '{0:05d}'.format(idx) + ".png"))
|
| 106 |
+
# Image.fromarray(pred_obj_mask).save(os.path.join(pred_obj_path, '{0:05d}'.format(idx) + ".png"))
|
| 107 |
+
gt = view.original_image[0:3, :, :]
|
| 108 |
+
torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
|
| 109 |
+
torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
|
| 110 |
+
|
| 111 |
+
'''
|
| 112 |
+
out_path = os.path.join(render_path[:-8],'concat')
|
| 113 |
+
makedirs(out_path,exist_ok=True)
|
| 114 |
+
fourcc = cv2.VideoWriter.fourcc(*'mp4v')
|
| 115 |
+
# fourcc = cv2.VideoWriter.fourcc(*'DIVX')
|
| 116 |
+
size = (gt.shape[-1]*5,gt.shape[-2])
|
| 117 |
+
fps = float(5) if 'train' in out_path else float(1)
|
| 118 |
+
writer = cv2.VideoWriter(os.path.join(out_path,'result.mp4'), fourcc, fps, size)
|
| 119 |
+
|
| 120 |
+
for file_name in sorted(os.listdir(gts_path)):
|
| 121 |
+
gt = np.array(Image.open(os.path.join(gts_path,file_name)))
|
| 122 |
+
rgb = np.array(Image.open(os.path.join(render_path,file_name)))
|
| 123 |
+
gt_obj = np.array(Image.open(os.path.join(gt_colormask_path,file_name)))
|
| 124 |
+
render_obj = np.array(Image.open(os.path.join(colormask_path,file_name)))
|
| 125 |
+
# pred_obj = np.array(Image.open(os.path.join(pred_obj_path,file_name)))
|
| 126 |
+
|
| 127 |
+
result = np.hstack([gt,rgb,gt_obj,pred_obj,render_obj])
|
| 128 |
+
result = result.astype('uint8')
|
| 129 |
+
|
| 130 |
+
Image.fromarray(result).save(os.path.join(out_path,file_name))
|
| 131 |
+
writer.write(result[:,:,::-1])
|
| 132 |
+
writer.release()
|
| 133 |
+
'''
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool):
|
| 137 |
+
with torch.no_grad():
|
| 138 |
+
gaussians = GaussianModel(dataset.sh_degree)
|
| 139 |
+
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
|
| 140 |
+
|
| 141 |
+
num_classes = dataset.num_classes
|
| 142 |
+
print("Num classes: ",num_classes)
|
| 143 |
+
|
| 144 |
+
classifier = torch.nn.Conv2d(gaussians.num_objects, num_classes, kernel_size=1)
|
| 145 |
+
classifier.cuda()
|
| 146 |
+
# classifier.load_state_dict(torch.load(os.path.join(dataset.model_path,"point_cloud","iteration_"+str(scene.loaded_iter),"classifier.pth")))
|
| 147 |
+
|
| 148 |
+
bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
|
| 149 |
+
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
|
| 150 |
+
|
| 151 |
+
if not skip_train:
|
| 152 |
+
render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, classifier)
|
| 153 |
+
|
| 154 |
+
if (not skip_test) and (len(scene.getTestCameras()) > 0):
|
| 155 |
+
render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background, classifier)
|
| 156 |
+
|
| 157 |
+
if __name__ == "__main__":
|
| 158 |
+
# Set up command line argument parser
|
| 159 |
+
parser = ArgumentParser(description="Testing script parameters")
|
| 160 |
+
model = ModelParams(parser, sentinel=True)
|
| 161 |
+
pipeline = PipelineParams(parser)
|
| 162 |
+
parser.add_argument("--iteration", default=-1, type=int)
|
| 163 |
+
parser.add_argument("--skip_train", action="store_true")
|
| 164 |
+
parser.add_argument("--skip_test", action="store_true")
|
| 165 |
+
parser.add_argument("--quiet", action="store_true")
|
| 166 |
+
args = get_combined_args(parser)
|
| 167 |
+
print("Rendering " + args.model_path)
|
| 168 |
+
|
| 169 |
+
# Initialize system state (RNG)
|
| 170 |
+
safe_state(args.quiet)
|
| 171 |
+
|
| 172 |
+
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test)
|
gaussian-grouping/render_lerf_mask.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from scene import Scene
|
| 11 |
+
import os
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from os import makedirs
|
| 14 |
+
from gaussian_renderer import render
|
| 15 |
+
import torchvision
|
| 16 |
+
from utils.general_utils import safe_state
|
| 17 |
+
from argparse import ArgumentParser
|
| 18 |
+
from arguments import ModelParams, PipelineParams, get_combined_args
|
| 19 |
+
from gaussian_renderer import GaussianModel
|
| 20 |
+
import numpy as np
|
| 21 |
+
from PIL import Image
|
| 22 |
+
import cv2
|
| 23 |
+
|
| 24 |
+
from ext.grounded_sam import grouned_sam_output, load_model_hf, select_obj_ioa
|
| 25 |
+
from segment_anything import sam_model_registry, SamPredictor
|
| 26 |
+
|
| 27 |
+
from render import feature_to_rgb, visualize_obj
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def render_set(model_path, name, iteration, views, gaussians, pipeline, background, classifier, groundingdino_model, sam_predictor, TEXT_PROMPT, threshold=0.2):
|
| 31 |
+
render_path = os.path.join(model_path, name, "ours_{}_text".format(iteration), "renders")
|
| 32 |
+
gts_path = os.path.join(model_path, name, "ours_{}_text".format(iteration), "gt")
|
| 33 |
+
colormask_path = os.path.join(model_path, name, "ours_{}_text".format(iteration), "objects_feature16")
|
| 34 |
+
pred_obj_path = os.path.join(model_path, name, "ours_{}_text".format(iteration), "test_mask")
|
| 35 |
+
makedirs(render_path, exist_ok=True)
|
| 36 |
+
makedirs(gts_path, exist_ok=True)
|
| 37 |
+
makedirs(colormask_path, exist_ok=True)
|
| 38 |
+
makedirs(pred_obj_path, exist_ok=True)
|
| 39 |
+
|
| 40 |
+
# Use Grounded-SAM on the first frame
|
| 41 |
+
results0 = render(views[0], gaussians, pipeline, background)
|
| 42 |
+
rendering0 = results0["render"]
|
| 43 |
+
rendering_obj0 = results0["render_object"]
|
| 44 |
+
logits = classifier(rendering_obj0)
|
| 45 |
+
pred_obj = torch.argmax(logits,dim=0)
|
| 46 |
+
|
| 47 |
+
image = (rendering0.permute(1,2,0) * 255).cpu().numpy().astype('uint8')
|
| 48 |
+
text_mask, annotated_frame_with_mask = grouned_sam_output(groundingdino_model, sam_predictor, TEXT_PROMPT, image)
|
| 49 |
+
Image.fromarray(annotated_frame_with_mask).save(os.path.join(render_path[:-8],'grounded-sam---'+TEXT_PROMPT+'.png'))
|
| 50 |
+
selected_obj_ids = select_obj_ioa(pred_obj, text_mask)
|
| 51 |
+
|
| 52 |
+
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
|
| 53 |
+
pred_obj_img_path = os.path.join(pred_obj_path,str(idx))
|
| 54 |
+
makedirs(pred_obj_img_path, exist_ok=True)
|
| 55 |
+
|
| 56 |
+
results = render(view, gaussians, pipeline, background)
|
| 57 |
+
rendering = results["render"]
|
| 58 |
+
rendering_obj = results["render_object"]
|
| 59 |
+
logits = classifier(rendering_obj)
|
| 60 |
+
|
| 61 |
+
if len(selected_obj_ids) > 0:
|
| 62 |
+
prob = torch.softmax(logits,dim=0)
|
| 63 |
+
|
| 64 |
+
pred_obj_mask = prob[selected_obj_ids, :, :] > threshold
|
| 65 |
+
pred_obj_mask = pred_obj_mask.any(dim=0)
|
| 66 |
+
pred_obj_mask = (pred_obj_mask.squeeze().cpu().numpy() * 255).astype(np.uint8)
|
| 67 |
+
else:
|
| 68 |
+
pred_obj_mask = torch.zeros_like(view.objects).cpu().numpy()
|
| 69 |
+
|
| 70 |
+
gt_objects = view.objects
|
| 71 |
+
gt_rgb_mask = visualize_obj(gt_objects.cpu().numpy().astype(np.uint8))
|
| 72 |
+
|
| 73 |
+
rgb_mask = feature_to_rgb(rendering_obj)
|
| 74 |
+
Image.fromarray(rgb_mask).save(os.path.join(colormask_path, '{0:05d}'.format(idx) + ".png"))
|
| 75 |
+
Image.fromarray(pred_obj_mask).save(os.path.join(pred_obj_img_path, TEXT_PROMPT + ".png"))
|
| 76 |
+
print(os.path.join(pred_obj_img_path, TEXT_PROMPT + ".png"))
|
| 77 |
+
gt = view.original_image[0:3, :, :]
|
| 78 |
+
torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
|
| 79 |
+
torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool):
|
| 84 |
+
with torch.no_grad():
|
| 85 |
+
dataset.eval = True
|
| 86 |
+
gaussians = GaussianModel(dataset.sh_degree)
|
| 87 |
+
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
|
| 88 |
+
|
| 89 |
+
num_classes = dataset.num_classes
|
| 90 |
+
print("Num classes: ",num_classes)
|
| 91 |
+
|
| 92 |
+
classifier = torch.nn.Conv2d(gaussians.num_objects, num_classes, kernel_size=1)
|
| 93 |
+
classifier.cuda()
|
| 94 |
+
classifier.load_state_dict(torch.load(os.path.join(dataset.model_path,"point_cloud","iteration_"+str(scene.loaded_iter),"classifier.pth")))
|
| 95 |
+
|
| 96 |
+
bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
|
| 97 |
+
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
|
| 98 |
+
|
| 99 |
+
# grounding-dino
|
| 100 |
+
# Use this command for evaluate the Grounding DINO model
|
| 101 |
+
# Or you can download the model by yourself
|
| 102 |
+
ckpt_repo_id = "ShilongLiu/GroundingDINO"
|
| 103 |
+
ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"
|
| 104 |
+
ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"
|
| 105 |
+
groundingdino_model = load_model_hf(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename)
|
| 106 |
+
|
| 107 |
+
# sam-hq
|
| 108 |
+
sam_checkpoint = 'Tracking-Anything-with-DEVA/saves/sam_vit_h_4b8939.pth'
|
| 109 |
+
sam = sam_model_registry["vit_h"](checkpoint=sam_checkpoint)
|
| 110 |
+
sam.to(device='cuda')
|
| 111 |
+
sam_predictor = SamPredictor(sam)
|
| 112 |
+
|
| 113 |
+
# Text prompt
|
| 114 |
+
if 'figurines' in dataset.model_path:
|
| 115 |
+
positive_input = "green apple;green toy chair;old camera;porcelain hand;red apple;red toy chair;rubber duck with red hat"
|
| 116 |
+
elif 'ramen' in dataset.model_path:
|
| 117 |
+
positive_input = "chopsticks;egg;glass of water;pork belly;wavy noodles in bowl;yellow bowl"
|
| 118 |
+
elif 'teatime' in dataset.model_path:
|
| 119 |
+
positive_input = "apple;bag of cookies;coffee mug;cookies on a plate;paper napkin;plate;sheep;spoon handle;stuffed bear;tea in a glass"
|
| 120 |
+
else:
|
| 121 |
+
raise NotImplementedError # You can provide your text prompt here
|
| 122 |
+
|
| 123 |
+
positives = positive_input.split(";")
|
| 124 |
+
print("Text prompts: ", positives)
|
| 125 |
+
|
| 126 |
+
for TEXT_PROMPT in positives:
|
| 127 |
+
if not skip_train:
|
| 128 |
+
render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background, classifier, groundingdino_model, sam_predictor, TEXT_PROMPT)
|
| 129 |
+
if not skip_test:
|
| 130 |
+
render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background, classifier, groundingdino_model, sam_predictor, TEXT_PROMPT)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
# Set up command line argument parser
|
| 137 |
+
parser = ArgumentParser(description="Testing script parameters")
|
| 138 |
+
model = ModelParams(parser, sentinel=True)
|
| 139 |
+
pipeline = PipelineParams(parser)
|
| 140 |
+
parser.add_argument("--iteration", default=-1, type=int)
|
| 141 |
+
parser.add_argument("--skip_train", action="store_true")
|
| 142 |
+
parser.add_argument("--skip_test", action="store_true")
|
| 143 |
+
parser.add_argument("--quiet", action="store_true")
|
| 144 |
+
args = get_combined_args(parser)
|
| 145 |
+
print("Rendering " + args.model_path)
|
| 146 |
+
|
| 147 |
+
# Initialize system state (RNG)
|
| 148 |
+
safe_state(args.quiet)
|
| 149 |
+
|
| 150 |
+
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test)
|
gaussian-grouping/run.sh
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Get the parent directory of the script
|
| 4 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 5 |
+
PARENT_FOLDER="$(dirname "$SCRIPT_DIR")"
|
| 6 |
+
|
| 7 |
+
for num in {1..5}
|
| 8 |
+
do
|
| 9 |
+
cd "${PARENT_FOLDER}/clevr-dataset-gen/image_generation/"
|
| 10 |
+
bash gen.sh ${num}
|
| 11 |
+
cd "${PARENT_FOLDER}/gaussian-grouping/"
|
| 12 |
+
bash cal.sh clevr-${num}
|
| 13 |
+
done
|
gaussian-grouping/scene/__init__.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import random
|
| 14 |
+
import json
|
| 15 |
+
from utils.system_utils import searchForMaxIteration
|
| 16 |
+
from scene.dataset_readers import sceneLoadTypeCallbacks
|
| 17 |
+
from scene.gaussian_model import GaussianModel
|
| 18 |
+
from arguments import ModelParams
|
| 19 |
+
from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON
|
| 20 |
+
from PIL import Image
|
| 21 |
+
import numpy as np
|
| 22 |
+
from scene.dataset_readers import CameraInfo, getNerfppNorm
|
| 23 |
+
from utils.graphics_utils import focal2fov
|
| 24 |
+
|
| 25 |
+
class Scene:
|
| 26 |
+
|
| 27 |
+
gaussians : GaussianModel
|
| 28 |
+
|
| 29 |
+
def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0], only_gaussians=False):
|
| 30 |
+
"""b
|
| 31 |
+
:param path: Path to colmap scene main folder.
|
| 32 |
+
"""
|
| 33 |
+
self.model_path = args.model_path
|
| 34 |
+
self.loaded_iter = None
|
| 35 |
+
self.gaussians = gaussians
|
| 36 |
+
|
| 37 |
+
# --- mtd ---
|
| 38 |
+
only_gaussians = True # 在我的实验中,我们不从头生成,而是使用预生成的 gaussian 模型,来进行 editing
|
| 39 |
+
# --- mtd ---
|
| 40 |
+
|
| 41 |
+
if only_gaussians:
|
| 42 |
+
|
| 43 |
+
self.loaded_iter = 1
|
| 44 |
+
# self.loaded_iter = load_iteration
|
| 45 |
+
# --- mtd ---
|
| 46 |
+
self.loaded_iter = load_iteration # 我们这里用 load_iteration
|
| 47 |
+
|
| 48 |
+
# import pdb; pdb.set_trace()
|
| 49 |
+
# --- mtd ---
|
| 50 |
+
if isinstance(self.loaded_iter,str):
|
| 51 |
+
ply_path = os.path.join(self.model_path, "point_cloud" + self.loaded_iter, "point_cloud.ply")
|
| 52 |
+
else:
|
| 53 |
+
ply_path = os.path.join(self.model_path, "point_cloud", "iteration_" + str(self.loaded_iter), "point_cloud.ply")
|
| 54 |
+
|
| 55 |
+
# print(self.model_path)
|
| 56 |
+
# print(self.loaded_iter)
|
| 57 |
+
# print(isinstance(self.loaded_iter,str))
|
| 58 |
+
# if isinstance(self.loaded_iter,str):
|
| 59 |
+
# print(os.path.join(self.model_path, "point_cloud", self.loaded_iter, "point_cloud.ply"))
|
| 60 |
+
# print(ply_path)
|
| 61 |
+
|
| 62 |
+
# if os.path.exists(ply_path) and False:
|
| 63 |
+
if os.path.exists(ply_path):
|
| 64 |
+
# print(123456)
|
| 65 |
+
self.gaussians.load_ply(ply_path)
|
| 66 |
+
else:
|
| 67 |
+
import open3d as o3d
|
| 68 |
+
scene_id = self.model_path.split('/')[1]
|
| 69 |
+
file_path = f'/media/shared_space/data/scannet/scans/{scene_id}/{scene_id}_vh_clean.ply' # DO NOT UST NT xxx_clean_2.ply !
|
| 70 |
+
pcd = o3d.io.read_point_cloud(file_path)
|
| 71 |
+
|
| 72 |
+
self.gaussians.create_from_pcd(pcd, 1.0)
|
| 73 |
+
|
| 74 |
+
# --- mtd ---
|
| 75 |
+
# ------------------ 新增:尝试读取摄像机信息 ------------------
|
| 76 |
+
self.train_cameras = {}
|
| 77 |
+
self.test_cameras = {}
|
| 78 |
+
|
| 79 |
+
cam_infos = []
|
| 80 |
+
cam_json_path = os.path.join(self.model_path, "cameras.json")
|
| 81 |
+
if os.path.exists(cam_json_path):
|
| 82 |
+
with open(cam_json_path, 'r') as f:
|
| 83 |
+
json_cams = json.load(f)
|
| 84 |
+
for entry in json_cams:
|
| 85 |
+
try:
|
| 86 |
+
width = entry.get('width', 800)
|
| 87 |
+
height = entry.get('height', 600)
|
| 88 |
+
rot = np.array(entry['rotation']) # shape (3,3)
|
| 89 |
+
pos = np.array(entry['position']) # shape (3,)
|
| 90 |
+
# 将 camera->world 转为 world->camera
|
| 91 |
+
R_wc = rot.T
|
| 92 |
+
T_wc = -R_wc @ pos
|
| 93 |
+
fy = entry.get('fy', 500.0)
|
| 94 |
+
fx = entry.get('fx', 500.0)
|
| 95 |
+
FovY = focal2fov(fy, height)
|
| 96 |
+
FovX = focal2fov(fx, width)
|
| 97 |
+
blank_img = Image.new('RGB', (width, height))
|
| 98 |
+
dummy_objects = np.empty((0,), dtype=np.float32)
|
| 99 |
+
cam_infos.append(CameraInfo(uid=entry['id'], R=R_wc, T=T_wc,
|
| 100 |
+
FovY=FovY, FovX=FovX, image=blank_img,
|
| 101 |
+
image_path='', image_name=str(entry['id']),
|
| 102 |
+
width=width, height=height, objects=dummy_objects))
|
| 103 |
+
except Exception as e:
|
| 104 |
+
print(f"[Scene] Warning: skip camera entry due to {e}")
|
| 105 |
+
else:
|
| 106 |
+
# fallback:基于 correct.json 生成一个顶视虚拟相机
|
| 107 |
+
correct_json_path = os.path.join(self.model_path, "correct.json")
|
| 108 |
+
if os.path.exists(correct_json_path):
|
| 109 |
+
width, height = 800, 800
|
| 110 |
+
# 顶视方向:Z 轴朝相机,Y 轴向下
|
| 111 |
+
R_cw = np.array([[1, 0, 0],
|
| 112 |
+
[0, 0, -1],
|
| 113 |
+
[0, 1, 0]]) # camera->world
|
| 114 |
+
pos = np.array([0.0, 0.0, 10.0])
|
| 115 |
+
R_wc = R_cw.T
|
| 116 |
+
T_wc = -R_wc @ pos
|
| 117 |
+
FovX = FovY = np.radians(60.0)
|
| 118 |
+
blank_img = Image.new('RGB', (width, height))
|
| 119 |
+
dummy_objects = np.empty((0,), dtype=np.float32)
|
| 120 |
+
cam_infos.append(CameraInfo(uid=0, R=R_wc, T=T_wc, FovY=FovY,
|
| 121 |
+
FovX=FovX, image=blank_img, image_path='',
|
| 122 |
+
image_name='virtual_top', width=width,
|
| 123 |
+
height=height, objects=dummy_objects))
|
| 124 |
+
|
| 125 |
+
# 若仍然没有可用相机,则继续但给出提示
|
| 126 |
+
if not cam_infos:
|
| 127 |
+
print("[Scene] Warning: no camera information found; train/test camera lists will be empty.")
|
| 128 |
+
self.cameras_extent = 1.0
|
| 129 |
+
for resolution_scale in resolution_scales:
|
| 130 |
+
self.train_cameras[resolution_scale] = []
|
| 131 |
+
self.test_cameras[resolution_scale] = []
|
| 132 |
+
else:
|
| 133 |
+
# 计算 nerf 归一化半径
|
| 134 |
+
nerf_norm = getNerfppNorm(cam_infos)
|
| 135 |
+
self.cameras_extent = nerf_norm["radius"]
|
| 136 |
+
for resolution_scale in resolution_scales:
|
| 137 |
+
self.train_cameras[resolution_scale] = cameraList_from_camInfos(cam_infos, resolution_scale, args)
|
| 138 |
+
self.test_cameras[resolution_scale] = []
|
| 139 |
+
# ------------------ 新增结束 ------------------
|
| 140 |
+
# --- mtd ---
|
| 141 |
+
|
| 142 |
+
# 保证高斯模型已加载
|
| 143 |
+
if os.path.exists(ply_path):
|
| 144 |
+
pass # 前面已 load_ply
|
| 145 |
+
else:
|
| 146 |
+
# 若之前 create_from_pcd 已进行,这里不重复
|
| 147 |
+
pass
|
| 148 |
+
|
| 149 |
+
# 注意:不要提前 return,后续代码可能还需使用 self.train_cameras
|
| 150 |
+
return
|
| 151 |
+
|
| 152 |
+
if load_iteration:
|
| 153 |
+
if load_iteration == -1:
|
| 154 |
+
self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
|
| 155 |
+
else:
|
| 156 |
+
self.loaded_iter = load_iteration
|
| 157 |
+
print("Loading trained model at iteration {}".format(self.loaded_iter))
|
| 158 |
+
|
| 159 |
+
self.train_cameras = {}
|
| 160 |
+
self.test_cameras = {}
|
| 161 |
+
|
| 162 |
+
# print(os.path.join(args.source_path, "sparse"))
|
| 163 |
+
|
| 164 |
+
if os.path.exists(os.path.join(args.source_path, "sparse")):
|
| 165 |
+
scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval, args.object_path, n_views=args.n_views, random_init=args.random_init, train_split=args.train_split)
|
| 166 |
+
elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
|
| 167 |
+
print("Found transforms_train.json file, assuming Blender data set!")
|
| 168 |
+
scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
|
| 169 |
+
else:
|
| 170 |
+
assert False, "Could not recognize scene type!"
|
| 171 |
+
|
| 172 |
+
if not self.loaded_iter:
|
| 173 |
+
with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
|
| 174 |
+
dest_file.write(src_file.read())
|
| 175 |
+
json_cams = []
|
| 176 |
+
camlist = []
|
| 177 |
+
if scene_info.test_cameras:
|
| 178 |
+
camlist.extend(scene_info.test_cameras)
|
| 179 |
+
if scene_info.train_cameras:
|
| 180 |
+
camlist.extend(scene_info.train_cameras)
|
| 181 |
+
for id, cam in enumerate(camlist):
|
| 182 |
+
json_cams.append(camera_to_JSON(id, cam))
|
| 183 |
+
with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
|
| 184 |
+
json.dump(json_cams, file)
|
| 185 |
+
|
| 186 |
+
if shuffle:
|
| 187 |
+
random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
|
| 188 |
+
random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
|
| 189 |
+
|
| 190 |
+
self.cameras_extent = scene_info.nerf_normalization["radius"]
|
| 191 |
+
|
| 192 |
+
for resolution_scale in resolution_scales:
|
| 193 |
+
print("Loading Training Cameras")
|
| 194 |
+
self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
|
| 195 |
+
print("Loading Test Cameras")
|
| 196 |
+
self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
|
| 197 |
+
|
| 198 |
+
if self.loaded_iter:
|
| 199 |
+
if isinstance(self.loaded_iter,str):
|
| 200 |
+
print("edit load path", self.loaded_iter)
|
| 201 |
+
self.gaussians.load_ply(os.path.join(self.model_path,
|
| 202 |
+
"point_cloud"+self.loaded_iter,
|
| 203 |
+
"point_cloud.ply"))
|
| 204 |
+
else:
|
| 205 |
+
self.gaussians.load_ply(os.path.join(self.model_path,
|
| 206 |
+
"point_cloud",
|
| 207 |
+
"iteration_" + str(self.loaded_iter),
|
| 208 |
+
"point_cloud.ply"))
|
| 209 |
+
else:
|
| 210 |
+
self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
|
| 211 |
+
|
| 212 |
+
def save(self, iteration):
|
| 213 |
+
point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
|
| 214 |
+
self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
|
| 215 |
+
|
| 216 |
+
def getTrainCameras(self, scale=1.0):
|
| 217 |
+
return self.train_cameras[scale]
|
| 218 |
+
|
| 219 |
+
def getTestCameras(self, scale=1.0):
|
| 220 |
+
return self.test_cameras[scale]
|
gaussian-grouping/scene/cameras.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch import nn
|
| 14 |
+
import numpy as np
|
| 15 |
+
from utils.graphics_utils import getWorld2View2, getProjectionMatrix
|
| 16 |
+
from scipy.spatial.transform import Rotation as R
|
| 17 |
+
|
| 18 |
+
class Camera(nn.Module):
|
| 19 |
+
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
|
| 20 |
+
image_name, uid,
|
| 21 |
+
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda", objects=None, style_transfer=False
|
| 22 |
+
):
|
| 23 |
+
super(Camera, self).__init__()
|
| 24 |
+
|
| 25 |
+
self.uid = uid
|
| 26 |
+
self.colmap_id = colmap_id
|
| 27 |
+
self.R = R
|
| 28 |
+
self.T = T
|
| 29 |
+
self.FoVx = FoVx
|
| 30 |
+
self.FoVy = FoVy
|
| 31 |
+
self.image_name = image_name
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
self.data_device = torch.device(data_device)
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(e)
|
| 37 |
+
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
|
| 38 |
+
self.data_device = torch.device("cuda")
|
| 39 |
+
|
| 40 |
+
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
|
| 41 |
+
self.image_width = self.original_image.shape[2]
|
| 42 |
+
self.image_height = self.original_image.shape[1]
|
| 43 |
+
|
| 44 |
+
if gt_alpha_mask is not None:
|
| 45 |
+
self.original_image *= gt_alpha_mask.to(self.data_device)
|
| 46 |
+
else:
|
| 47 |
+
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
|
| 48 |
+
|
| 49 |
+
self.zfar = 100.0
|
| 50 |
+
self.znear = 0.01
|
| 51 |
+
|
| 52 |
+
self.trans = trans
|
| 53 |
+
self.scale = scale
|
| 54 |
+
|
| 55 |
+
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
|
| 56 |
+
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
|
| 57 |
+
self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
|
| 58 |
+
self.camera_center = self.world_view_transform.inverse()[3, :3]
|
| 59 |
+
|
| 60 |
+
if objects is not None:
|
| 61 |
+
self.objects = objects.to(self.data_device)
|
| 62 |
+
else:
|
| 63 |
+
self.objects = None
|
| 64 |
+
|
| 65 |
+
if style_transfer:
|
| 66 |
+
self.transfer_image = self.original_image.clone()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class MiniCam:
|
| 70 |
+
def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
|
| 71 |
+
self.image_width = width
|
| 72 |
+
self.image_height = height
|
| 73 |
+
self.FoVy = fovy
|
| 74 |
+
self.FoVx = fovx
|
| 75 |
+
self.znear = znear
|
| 76 |
+
self.zfar = zfar
|
| 77 |
+
self.world_view_transform = world_view_transform
|
| 78 |
+
self.full_proj_transform = full_proj_transform
|
| 79 |
+
view_inv = torch.inverse(self.world_view_transform)
|
| 80 |
+
self.camera_center = view_inv[3][:3]
|
| 81 |
+
|
gaussian-grouping/scene/colmap_loader.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import collections
|
| 14 |
+
import struct
|
| 15 |
+
|
| 16 |
+
CameraModel = collections.namedtuple(
|
| 17 |
+
"CameraModel", ["model_id", "model_name", "num_params"])
|
| 18 |
+
Camera = collections.namedtuple(
|
| 19 |
+
"Camera", ["id", "model", "width", "height", "params"])
|
| 20 |
+
BaseImage = collections.namedtuple(
|
| 21 |
+
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
|
| 22 |
+
Point3D = collections.namedtuple(
|
| 23 |
+
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
|
| 24 |
+
CAMERA_MODELS = {
|
| 25 |
+
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
|
| 26 |
+
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
|
| 27 |
+
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
|
| 28 |
+
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
|
| 29 |
+
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
|
| 30 |
+
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
|
| 31 |
+
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
|
| 32 |
+
CameraModel(model_id=7, model_name="FOV", num_params=5),
|
| 33 |
+
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
|
| 34 |
+
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
|
| 35 |
+
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
|
| 36 |
+
}
|
| 37 |
+
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
|
| 38 |
+
for camera_model in CAMERA_MODELS])
|
| 39 |
+
CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
|
| 40 |
+
for camera_model in CAMERA_MODELS])
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def qvec2rotmat(qvec):
|
| 44 |
+
return np.array([
|
| 45 |
+
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
|
| 46 |
+
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
|
| 47 |
+
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
|
| 48 |
+
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
|
| 49 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
|
| 50 |
+
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
|
| 51 |
+
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
|
| 52 |
+
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
|
| 53 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
|
| 54 |
+
|
| 55 |
+
def rotmat2qvec(R):
|
| 56 |
+
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
|
| 57 |
+
K = np.array([
|
| 58 |
+
[Rxx - Ryy - Rzz, 0, 0, 0],
|
| 59 |
+
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
|
| 60 |
+
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
|
| 61 |
+
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
|
| 62 |
+
eigvals, eigvecs = np.linalg.eigh(K)
|
| 63 |
+
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
|
| 64 |
+
if qvec[0] < 0:
|
| 65 |
+
qvec *= -1
|
| 66 |
+
return qvec
|
| 67 |
+
|
| 68 |
+
class Image(BaseImage):
|
| 69 |
+
def qvec2rotmat(self):
|
| 70 |
+
return qvec2rotmat(self.qvec)
|
| 71 |
+
|
| 72 |
+
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
|
| 73 |
+
"""Read and unpack the next bytes from a binary file.
|
| 74 |
+
:param fid:
|
| 75 |
+
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
|
| 76 |
+
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
| 77 |
+
:param endian_character: Any of {@, =, <, >, !}
|
| 78 |
+
:return: Tuple of read and unpacked values.
|
| 79 |
+
"""
|
| 80 |
+
data = fid.read(num_bytes)
|
| 81 |
+
return struct.unpack(endian_character + format_char_sequence, data)
|
| 82 |
+
|
| 83 |
+
def read_points3D_text(path):
|
| 84 |
+
"""
|
| 85 |
+
see: src/base/reconstruction.cc
|
| 86 |
+
void Reconstruction::ReadPoints3DText(const std::string& path)
|
| 87 |
+
void Reconstruction::WritePoints3DText(const std::string& path)
|
| 88 |
+
"""
|
| 89 |
+
xyzs = None
|
| 90 |
+
rgbs = None
|
| 91 |
+
errors = None
|
| 92 |
+
num_points = 0
|
| 93 |
+
with open(path, "r") as fid:
|
| 94 |
+
while True:
|
| 95 |
+
line = fid.readline()
|
| 96 |
+
if not line:
|
| 97 |
+
break
|
| 98 |
+
line = line.strip()
|
| 99 |
+
if len(line) > 0 and line[0] != "#":
|
| 100 |
+
num_points += 1
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
xyzs = np.empty((num_points, 3))
|
| 104 |
+
rgbs = np.empty((num_points, 3))
|
| 105 |
+
errors = np.empty((num_points, 1))
|
| 106 |
+
count = 0
|
| 107 |
+
with open(path, "r") as fid:
|
| 108 |
+
while True:
|
| 109 |
+
line = fid.readline()
|
| 110 |
+
if not line:
|
| 111 |
+
break
|
| 112 |
+
line = line.strip()
|
| 113 |
+
if len(line) > 0 and line[0] != "#":
|
| 114 |
+
elems = line.split()
|
| 115 |
+
xyz = np.array(tuple(map(float, elems[1:4])))
|
| 116 |
+
rgb = np.array(tuple(map(int, elems[4:7])))
|
| 117 |
+
error = np.array(float(elems[7]))
|
| 118 |
+
xyzs[count] = xyz
|
| 119 |
+
rgbs[count] = rgb
|
| 120 |
+
errors[count] = error
|
| 121 |
+
count += 1
|
| 122 |
+
|
| 123 |
+
return xyzs, rgbs, errors
|
| 124 |
+
|
| 125 |
+
def read_points3D_binary(path_to_model_file):
|
| 126 |
+
"""
|
| 127 |
+
see: src/base/reconstruction.cc
|
| 128 |
+
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
| 129 |
+
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
with open(path_to_model_file, "rb") as fid:
|
| 134 |
+
num_points = read_next_bytes(fid, 8, "Q")[0]
|
| 135 |
+
|
| 136 |
+
xyzs = np.empty((num_points, 3))
|
| 137 |
+
rgbs = np.empty((num_points, 3))
|
| 138 |
+
errors = np.empty((num_points, 1))
|
| 139 |
+
|
| 140 |
+
for p_id in range(num_points):
|
| 141 |
+
binary_point_line_properties = read_next_bytes(
|
| 142 |
+
fid, num_bytes=43, format_char_sequence="QdddBBBd")
|
| 143 |
+
xyz = np.array(binary_point_line_properties[1:4])
|
| 144 |
+
rgb = np.array(binary_point_line_properties[4:7])
|
| 145 |
+
error = np.array(binary_point_line_properties[7])
|
| 146 |
+
track_length = read_next_bytes(
|
| 147 |
+
fid, num_bytes=8, format_char_sequence="Q")[0]
|
| 148 |
+
track_elems = read_next_bytes(
|
| 149 |
+
fid, num_bytes=8*track_length,
|
| 150 |
+
format_char_sequence="ii"*track_length)
|
| 151 |
+
xyzs[p_id] = xyz
|
| 152 |
+
rgbs[p_id] = rgb
|
| 153 |
+
errors[p_id] = error
|
| 154 |
+
return xyzs, rgbs, errors
|
| 155 |
+
|
| 156 |
+
def read_intrinsics_text(path):
|
| 157 |
+
"""
|
| 158 |
+
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
|
| 159 |
+
"""
|
| 160 |
+
cameras = {}
|
| 161 |
+
with open(path, "r") as fid:
|
| 162 |
+
while True:
|
| 163 |
+
line = fid.readline()
|
| 164 |
+
if not line:
|
| 165 |
+
break
|
| 166 |
+
line = line.strip()
|
| 167 |
+
if len(line) > 0 and line[0] != "#":
|
| 168 |
+
elems = line.split()
|
| 169 |
+
camera_id = int(elems[0])
|
| 170 |
+
model = elems[1]
|
| 171 |
+
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
|
| 172 |
+
width = int(elems[2])
|
| 173 |
+
height = int(elems[3])
|
| 174 |
+
params = np.array(tuple(map(float, elems[4:])))
|
| 175 |
+
cameras[camera_id] = Camera(id=camera_id, model=model,
|
| 176 |
+
width=width, height=height,
|
| 177 |
+
params=params)
|
| 178 |
+
return cameras
|
| 179 |
+
|
| 180 |
+
def read_extrinsics_binary(path_to_model_file):
|
| 181 |
+
"""
|
| 182 |
+
see: src/base/reconstruction.cc
|
| 183 |
+
void Reconstruction::ReadImagesBinary(const std::string& path)
|
| 184 |
+
void Reconstruction::WriteImagesBinary(const std::string& path)
|
| 185 |
+
"""
|
| 186 |
+
images = {}
|
| 187 |
+
with open(path_to_model_file, "rb") as fid:
|
| 188 |
+
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
|
| 189 |
+
for _ in range(num_reg_images):
|
| 190 |
+
binary_image_properties = read_next_bytes(
|
| 191 |
+
fid, num_bytes=64, format_char_sequence="idddddddi")
|
| 192 |
+
image_id = binary_image_properties[0]
|
| 193 |
+
qvec = np.array(binary_image_properties[1:5])
|
| 194 |
+
tvec = np.array(binary_image_properties[5:8])
|
| 195 |
+
camera_id = binary_image_properties[8]
|
| 196 |
+
image_name = ""
|
| 197 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
| 198 |
+
while current_char != b"\x00": # look for the ASCII 0 entry
|
| 199 |
+
image_name += current_char.decode("utf-8")
|
| 200 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
| 201 |
+
num_points2D = read_next_bytes(fid, num_bytes=8,
|
| 202 |
+
format_char_sequence="Q")[0]
|
| 203 |
+
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
|
| 204 |
+
format_char_sequence="ddq"*num_points2D)
|
| 205 |
+
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
|
| 206 |
+
tuple(map(float, x_y_id_s[1::3]))])
|
| 207 |
+
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
|
| 208 |
+
images[image_id] = Image(
|
| 209 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
| 210 |
+
camera_id=camera_id, name=image_name,
|
| 211 |
+
xys=xys, point3D_ids=point3D_ids)
|
| 212 |
+
return images
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def read_intrinsics_binary(path_to_model_file):
|
| 216 |
+
"""
|
| 217 |
+
see: src/base/reconstruction.cc
|
| 218 |
+
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
| 219 |
+
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
| 220 |
+
"""
|
| 221 |
+
cameras = {}
|
| 222 |
+
with open(path_to_model_file, "rb") as fid:
|
| 223 |
+
num_cameras = read_next_bytes(fid, 8, "Q")[0]
|
| 224 |
+
for _ in range(num_cameras):
|
| 225 |
+
camera_properties = read_next_bytes(
|
| 226 |
+
fid, num_bytes=24, format_char_sequence="iiQQ")
|
| 227 |
+
camera_id = camera_properties[0]
|
| 228 |
+
model_id = camera_properties[1]
|
| 229 |
+
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
|
| 230 |
+
width = camera_properties[2]
|
| 231 |
+
height = camera_properties[3]
|
| 232 |
+
num_params = CAMERA_MODEL_IDS[model_id].num_params
|
| 233 |
+
params = read_next_bytes(fid, num_bytes=8*num_params,
|
| 234 |
+
format_char_sequence="d"*num_params)
|
| 235 |
+
cameras[camera_id] = Camera(id=camera_id,
|
| 236 |
+
model=model_name,
|
| 237 |
+
width=width,
|
| 238 |
+
height=height,
|
| 239 |
+
params=np.array(params))
|
| 240 |
+
assert len(cameras) == num_cameras
|
| 241 |
+
return cameras
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def read_extrinsics_text(path):
|
| 245 |
+
"""
|
| 246 |
+
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
|
| 247 |
+
"""
|
| 248 |
+
images = {}
|
| 249 |
+
with open(path, "r") as fid:
|
| 250 |
+
while True:
|
| 251 |
+
line = fid.readline()
|
| 252 |
+
if not line:
|
| 253 |
+
break
|
| 254 |
+
line = line.strip()
|
| 255 |
+
if len(line) > 0 and line[0] != "#":
|
| 256 |
+
elems = line.split()
|
| 257 |
+
image_id = int(elems[0])
|
| 258 |
+
qvec = np.array(tuple(map(float, elems[1:5])))
|
| 259 |
+
tvec = np.array(tuple(map(float, elems[5:8])))
|
| 260 |
+
camera_id = int(elems[8])
|
| 261 |
+
image_name = elems[9]
|
| 262 |
+
elems = fid.readline().split()
|
| 263 |
+
xys = np.column_stack([tuple(map(float, elems[0::3])),
|
| 264 |
+
tuple(map(float, elems[1::3]))])
|
| 265 |
+
point3D_ids = np.array(tuple(map(int, elems[2::3])))
|
| 266 |
+
images[image_id] = Image(
|
| 267 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
| 268 |
+
camera_id=camera_id, name=image_name,
|
| 269 |
+
xys=xys, point3D_ids=point3D_ids)
|
| 270 |
+
return images
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def read_colmap_bin_array(path):
|
| 274 |
+
"""
|
| 275 |
+
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py
|
| 276 |
+
|
| 277 |
+
:param path: path to the colmap binary file.
|
| 278 |
+
:return: nd array with the floating point values in the value
|
| 279 |
+
"""
|
| 280 |
+
with open(path, "rb") as fid:
|
| 281 |
+
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
|
| 282 |
+
usecols=(0, 1, 2), dtype=int)
|
| 283 |
+
fid.seek(0)
|
| 284 |
+
num_delimiter = 0
|
| 285 |
+
byte = fid.read(1)
|
| 286 |
+
while True:
|
| 287 |
+
if byte == b"&":
|
| 288 |
+
num_delimiter += 1
|
| 289 |
+
if num_delimiter >= 3:
|
| 290 |
+
break
|
| 291 |
+
byte = fid.read(1)
|
| 292 |
+
array = np.fromfile(fid, np.float32)
|
| 293 |
+
array = array.reshape((width, height, channels), order="F")
|
| 294 |
+
return np.transpose(array, (1, 0, 2)).squeeze()
|
gaussian-grouping/scene/dataset_readers.py
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
import sys
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import NamedTuple
|
| 13 |
+
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
|
| 14 |
+
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
|
| 15 |
+
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
|
| 16 |
+
import numpy as np
|
| 17 |
+
import json
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from plyfile import PlyData, PlyElement
|
| 20 |
+
from utils.sh_utils import SH2RGB
|
| 21 |
+
from scene.gaussian_model import BasicPointCloud
|
| 22 |
+
|
| 23 |
+
class CameraInfo(NamedTuple):
|
| 24 |
+
uid: int
|
| 25 |
+
R: np.array
|
| 26 |
+
T: np.array
|
| 27 |
+
FovY: np.array
|
| 28 |
+
FovX: np.array
|
| 29 |
+
image: np.array
|
| 30 |
+
image_path: str
|
| 31 |
+
image_name: str
|
| 32 |
+
width: int
|
| 33 |
+
height: int
|
| 34 |
+
objects: np.array
|
| 35 |
+
|
| 36 |
+
class SceneInfo(NamedTuple):
|
| 37 |
+
point_cloud: BasicPointCloud
|
| 38 |
+
train_cameras: list
|
| 39 |
+
test_cameras: list
|
| 40 |
+
nerf_normalization: dict
|
| 41 |
+
ply_path: str
|
| 42 |
+
|
| 43 |
+
def getNerfppNorm(cam_info):
|
| 44 |
+
def get_center_and_diag(cam_centers):
|
| 45 |
+
cam_centers = np.hstack(cam_centers)
|
| 46 |
+
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
|
| 47 |
+
center = avg_cam_center
|
| 48 |
+
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
|
| 49 |
+
diagonal = np.max(dist)
|
| 50 |
+
return center.flatten(), diagonal
|
| 51 |
+
|
| 52 |
+
cam_centers = []
|
| 53 |
+
|
| 54 |
+
for cam in cam_info:
|
| 55 |
+
W2C = getWorld2View2(cam.R, cam.T)
|
| 56 |
+
C2W = np.linalg.inv(W2C)
|
| 57 |
+
cam_centers.append(C2W[:3, 3:4])
|
| 58 |
+
|
| 59 |
+
center, diagonal = get_center_and_diag(cam_centers)
|
| 60 |
+
radius = diagonal * 1.1
|
| 61 |
+
|
| 62 |
+
translate = -center
|
| 63 |
+
|
| 64 |
+
return {"translate": translate, "radius": radius}
|
| 65 |
+
|
| 66 |
+
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder, objects_folder):
|
| 67 |
+
cam_infos = []
|
| 68 |
+
for idx, key in enumerate(cam_extrinsics):
|
| 69 |
+
sys.stdout.write('\r')
|
| 70 |
+
# the exact output you're looking for:
|
| 71 |
+
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
|
| 72 |
+
sys.stdout.flush()
|
| 73 |
+
|
| 74 |
+
extr = cam_extrinsics[key]
|
| 75 |
+
intr = cam_intrinsics[extr.camera_id]
|
| 76 |
+
height = intr.height
|
| 77 |
+
width = intr.width
|
| 78 |
+
|
| 79 |
+
uid = intr.id
|
| 80 |
+
R = np.transpose(qvec2rotmat(extr.qvec))
|
| 81 |
+
T = np.array(extr.tvec)
|
| 82 |
+
|
| 83 |
+
# print(R)
|
| 84 |
+
# print(T)
|
| 85 |
+
|
| 86 |
+
from scipy.spatial.transform import Rotation
|
| 87 |
+
import math
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
file_path = f'config/camera_rotation.json'
|
| 91 |
+
with open(file_path, 'r') as file:
|
| 92 |
+
camera_config = json.load(file)
|
| 93 |
+
euler_angles = camera_config['gvec_rotate_euler']
|
| 94 |
+
R_euler = Rotation.from_euler('xyz', euler_angles, degrees=True).as_matrix()
|
| 95 |
+
R = R_euler @ R
|
| 96 |
+
|
| 97 |
+
transform = np.array(camera_config['transform'])
|
| 98 |
+
T = -R @ T
|
| 99 |
+
T += transform
|
| 100 |
+
T = -np.linalg.inv(R) @ T
|
| 101 |
+
|
| 102 |
+
rotation = camera_config['rotation']
|
| 103 |
+
rotation_radians = np.radians(rotation)
|
| 104 |
+
R_z = np.array([
|
| 105 |
+
[np.cos(rotation_radians), -np.sin(rotation_radians), 0],
|
| 106 |
+
[np.sin(rotation_radians), np.cos(rotation_radians), 0],
|
| 107 |
+
[0, 0, 1]
|
| 108 |
+
])
|
| 109 |
+
R = R_z @ R
|
| 110 |
+
|
| 111 |
+
# T = -R @ T
|
| 112 |
+
# print(f"{T[0]} {T[1]} {T[2]}")
|
| 113 |
+
|
| 114 |
+
# if idx == 0:
|
| 115 |
+
# print(R_euler)
|
| 116 |
+
|
| 117 |
+
# print(R_rotated)
|
| 118 |
+
# print(T_rotated)
|
| 119 |
+
|
| 120 |
+
if intr.model=="SIMPLE_PINHOLE":
|
| 121 |
+
focal_length_x = intr.params[0]
|
| 122 |
+
FovY = focal2fov(focal_length_x, height)
|
| 123 |
+
FovX = focal2fov(focal_length_x, width)
|
| 124 |
+
elif intr.model=="PINHOLE":
|
| 125 |
+
focal_length_x = intr.params[0]
|
| 126 |
+
focal_length_y = intr.params[1]
|
| 127 |
+
# print(focal_length_x, focal_length_y)
|
| 128 |
+
FovY = focal2fov(focal_length_y, height)
|
| 129 |
+
FovX = focal2fov(focal_length_x, width)
|
| 130 |
+
else:
|
| 131 |
+
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
|
| 132 |
+
|
| 133 |
+
image_path = os.path.join(images_folder, os.path.basename(extr.name))
|
| 134 |
+
image_name = os.path.basename(image_path).split(".")[0]
|
| 135 |
+
image = Image.open(image_path) if os.path.exists(image_path) else None
|
| 136 |
+
object_path = os.path.join(objects_folder, image_name + '.png')
|
| 137 |
+
objects = Image.open(object_path) if os.path.exists(object_path) else None
|
| 138 |
+
|
| 139 |
+
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
|
| 140 |
+
image_path=image_path, image_name=image_name, width=width, height=height, objects=objects)
|
| 141 |
+
cam_infos.append(cam_info)
|
| 142 |
+
|
| 143 |
+
if idx == 0:
|
| 144 |
+
new_cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=intr.params[0], FovX=intr.params[1], image=image,
|
| 145 |
+
image_path=image_path, image_name=image_name, width=width, height=height, objects=objects)
|
| 146 |
+
import pickle
|
| 147 |
+
with open('config/cam_info.pkl', 'wb') as f:
|
| 148 |
+
pickle.dump(new_cam_info, f)
|
| 149 |
+
|
| 150 |
+
sys.stdout.write('\n')
|
| 151 |
+
return cam_infos
|
| 152 |
+
|
| 153 |
+
def fetchPly(path): ###Rotate
|
| 154 |
+
plydata = PlyData.read(path)
|
| 155 |
+
vertices = plydata['vertex']
|
| 156 |
+
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
|
| 157 |
+
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
|
| 158 |
+
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
|
| 159 |
+
|
| 160 |
+
from scipy.spatial.transform import Rotation
|
| 161 |
+
import math
|
| 162 |
+
|
| 163 |
+
file_path = f'config/camera_rotation.json'
|
| 164 |
+
with open(file_path, 'r') as file:
|
| 165 |
+
camera_config = json.load(file)
|
| 166 |
+
euler_angles = camera_config['gvec_rotate_euler']
|
| 167 |
+
R_euler = Rotation.from_euler('xyz', euler_angles, degrees=True).as_matrix()
|
| 168 |
+
positions = R_euler @ np.expand_dims(positions, -1)
|
| 169 |
+
positions = positions.squeeze(-1)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
transform = np.array(camera_config['transform'])
|
| 173 |
+
positions += transform
|
| 174 |
+
|
| 175 |
+
rotation = camera_config['rotation']
|
| 176 |
+
rotation_radians = np.radians(rotation)
|
| 177 |
+
R_z = np.array([
|
| 178 |
+
[np.cos(rotation_radians), -np.sin(rotation_radians), 0],
|
| 179 |
+
[np.sin(rotation_radians), np.cos(rotation_radians), 0],
|
| 180 |
+
[0, 0, 1]
|
| 181 |
+
])
|
| 182 |
+
positions = R_z @ np.expand_dims(positions, -1)
|
| 183 |
+
positions = positions.squeeze(-1)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
return BasicPointCloud(points=positions, colors=colors, normals=normals)
|
| 187 |
+
|
| 188 |
+
def storePly(path, xyz, rgb):
|
| 189 |
+
# Define the dtype for the structured array
|
| 190 |
+
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
|
| 191 |
+
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
|
| 192 |
+
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
|
| 193 |
+
|
| 194 |
+
normals = np.zeros_like(xyz)
|
| 195 |
+
|
| 196 |
+
elements = np.empty(xyz.shape[0], dtype=dtype)
|
| 197 |
+
attributes = np.concatenate((xyz, normals, rgb), axis=1)
|
| 198 |
+
elements[:] = list(map(tuple, attributes))
|
| 199 |
+
|
| 200 |
+
# Create the PlyData object and write to file
|
| 201 |
+
vertex_element = PlyElement.describe(elements, 'vertex')
|
| 202 |
+
ply_data = PlyData([vertex_element])
|
| 203 |
+
ply_data.write(path)
|
| 204 |
+
|
| 205 |
+
def readColmapSceneInfo(path, images, eval, object_path, llffhold=8, n_views=100, random_init=False, train_split=False):
|
| 206 |
+
try:
|
| 207 |
+
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
|
| 208 |
+
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
|
| 209 |
+
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) #
|
| 210 |
+
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
|
| 211 |
+
except:
|
| 212 |
+
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
|
| 213 |
+
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
|
| 214 |
+
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
|
| 215 |
+
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
|
| 216 |
+
|
| 217 |
+
reading_dir = "images" if images == None else images
|
| 218 |
+
object_dir = 'object_mask' if object_path == None else object_path #
|
| 219 |
+
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir), objects_folder=os.path.join(path, object_dir))
|
| 220 |
+
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
|
| 221 |
+
|
| 222 |
+
if eval:
|
| 223 |
+
if train_split:
|
| 224 |
+
train_dir = os.path.join(path, "images_train")
|
| 225 |
+
train_names = sorted(os.listdir(train_dir))
|
| 226 |
+
train_names = [train_name.split('.')[0] for train_name in train_names]
|
| 227 |
+
train_cam_infos = []
|
| 228 |
+
test_cam_infos = []
|
| 229 |
+
for cam_info in cam_infos:
|
| 230 |
+
if cam_info.image_name in train_names:
|
| 231 |
+
train_cam_infos.append(cam_info)
|
| 232 |
+
else:
|
| 233 |
+
test_cam_infos.append(cam_info)
|
| 234 |
+
|
| 235 |
+
else:
|
| 236 |
+
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
|
| 237 |
+
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
|
| 238 |
+
|
| 239 |
+
if n_views == 100:
|
| 240 |
+
pass
|
| 241 |
+
elif n_views == 50:
|
| 242 |
+
idx_sub = np.linspace(0, len(train_cam_infos)-1, round(len(train_cam_infos)*0.5)) # 50% views
|
| 243 |
+
idx_sub = [round(i) for i in idx_sub]
|
| 244 |
+
train_cam_infos = [train_cam_infos[i_sub] for i_sub in idx_sub]
|
| 245 |
+
elif isinstance(n_views,int):
|
| 246 |
+
idx_sub = np.linspace(0, len(train_cam_infos)-1, n_views) # 3views
|
| 247 |
+
idx_sub = [round(i) for i in idx_sub]
|
| 248 |
+
train_cam_infos = [train_cam_infos[i_sub] for i_sub in idx_sub]
|
| 249 |
+
print(train_cam_infos)
|
| 250 |
+
else:
|
| 251 |
+
raise NotImplementedError
|
| 252 |
+
print("Training images: ", len(train_cam_infos))
|
| 253 |
+
print("Testing images: ", len(test_cam_infos))
|
| 254 |
+
|
| 255 |
+
else:
|
| 256 |
+
if train_split:
|
| 257 |
+
train_dir = os.path.join(path, "images_train")
|
| 258 |
+
train_names = sorted(os.listdir(train_dir))
|
| 259 |
+
train_names = [train_name.split('.')[0] for train_name in train_names]
|
| 260 |
+
train_cam_infos = []
|
| 261 |
+
for cam_info in cam_infos:
|
| 262 |
+
if cam_info.image_name in train_names:
|
| 263 |
+
train_cam_infos.append(cam_info)
|
| 264 |
+
test_cam_infos = []
|
| 265 |
+
else:
|
| 266 |
+
train_cam_infos = cam_infos
|
| 267 |
+
test_cam_infos = []
|
| 268 |
+
|
| 269 |
+
nerf_normalization = getNerfppNorm(train_cam_infos)
|
| 270 |
+
|
| 271 |
+
if random_init:
|
| 272 |
+
# Since this data set has no colmap data, we start with random points
|
| 273 |
+
num_pts = 100_000
|
| 274 |
+
print(f"Generating random point cloud ({num_pts})...")
|
| 275 |
+
|
| 276 |
+
# We create random points inside the bounds of the synthetic Blender scenes
|
| 277 |
+
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
|
| 278 |
+
shs = np.random.random((num_pts, 3)) / 255.0
|
| 279 |
+
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
|
| 280 |
+
|
| 281 |
+
ply_path = os.path.join(path, "sparse/0/points3D_randinit.ply")
|
| 282 |
+
storePly(ply_path, xyz, SH2RGB(shs) * 255)
|
| 283 |
+
|
| 284 |
+
else:
|
| 285 |
+
ply_path = os.path.join(path, "sparse/0/points3D.ply")
|
| 286 |
+
bin_path = os.path.join(path, "sparse/0/points3D.bin")
|
| 287 |
+
txt_path = os.path.join(path, "sparse/0/points3D.txt")
|
| 288 |
+
if not os.path.exists(ply_path):
|
| 289 |
+
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
|
| 290 |
+
try:
|
| 291 |
+
xyz, rgb, _ = read_points3D_binary(bin_path)
|
| 292 |
+
except:
|
| 293 |
+
xyz, rgb, _ = read_points3D_text(txt_path)
|
| 294 |
+
storePly(ply_path, xyz, rgb)
|
| 295 |
+
try:
|
| 296 |
+
pcd = fetchPly(ply_path)
|
| 297 |
+
except:
|
| 298 |
+
pcd = None
|
| 299 |
+
|
| 300 |
+
scene_info = SceneInfo(point_cloud=pcd,
|
| 301 |
+
train_cameras=train_cam_infos,
|
| 302 |
+
test_cameras=test_cam_infos,
|
| 303 |
+
nerf_normalization=nerf_normalization,
|
| 304 |
+
ply_path=ply_path)
|
| 305 |
+
return scene_info
|
| 306 |
+
|
| 307 |
+
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
|
| 308 |
+
cam_infos = []
|
| 309 |
+
|
| 310 |
+
with open(os.path.join(path, transformsfile)) as json_file:
|
| 311 |
+
contents = json.load(json_file)
|
| 312 |
+
fovx = contents["camera_angle_x"]
|
| 313 |
+
|
| 314 |
+
frames = contents["frames"]
|
| 315 |
+
for idx, frame in enumerate(frames):
|
| 316 |
+
cam_name = os.path.join(path, frame["file_path"] + extension)
|
| 317 |
+
|
| 318 |
+
# NeRF 'transform_matrix' is a camera-to-world transform
|
| 319 |
+
c2w = np.array(frame["transform_matrix"])
|
| 320 |
+
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
|
| 321 |
+
c2w[:3, 1:3] *= -1
|
| 322 |
+
|
| 323 |
+
# get the world-to-camera transform and set R, T
|
| 324 |
+
w2c = np.linalg.inv(c2w)
|
| 325 |
+
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
|
| 326 |
+
T = w2c[:3, 3]
|
| 327 |
+
|
| 328 |
+
image_path = os.path.join(path, cam_name)
|
| 329 |
+
image_name = Path(cam_name).stem
|
| 330 |
+
image = Image.open(image_path)
|
| 331 |
+
|
| 332 |
+
im_data = np.array(image.convert("RGBA"))
|
| 333 |
+
|
| 334 |
+
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
|
| 335 |
+
|
| 336 |
+
norm_data = im_data / 255.0
|
| 337 |
+
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
|
| 338 |
+
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
|
| 339 |
+
|
| 340 |
+
fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
|
| 341 |
+
FovY = fovy
|
| 342 |
+
FovX = fovx
|
| 343 |
+
|
| 344 |
+
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
|
| 345 |
+
image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
|
| 346 |
+
|
| 347 |
+
return cam_infos
|
| 348 |
+
|
| 349 |
+
def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
|
| 350 |
+
print("Reading Training Transforms")
|
| 351 |
+
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
|
| 352 |
+
print("Reading Test Transforms")
|
| 353 |
+
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
|
| 354 |
+
|
| 355 |
+
if not eval:
|
| 356 |
+
train_cam_infos.extend(test_cam_infos)
|
| 357 |
+
test_cam_infos = []
|
| 358 |
+
|
| 359 |
+
nerf_normalization = getNerfppNorm(train_cam_infos)
|
| 360 |
+
|
| 361 |
+
ply_path = os.path.join(path, "points3d.ply")
|
| 362 |
+
if not os.path.exists(ply_path):
|
| 363 |
+
# Since this data set has no colmap data, we start with random points
|
| 364 |
+
num_pts = 100_000
|
| 365 |
+
print(f"Generating random point cloud ({num_pts})...")
|
| 366 |
+
|
| 367 |
+
# We create random points inside the bounds of the synthetic Blender scenes
|
| 368 |
+
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
|
| 369 |
+
shs = np.random.random((num_pts, 3)) / 255.0
|
| 370 |
+
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
|
| 371 |
+
|
| 372 |
+
storePly(ply_path, xyz, SH2RGB(shs) * 255)
|
| 373 |
+
try:
|
| 374 |
+
pcd = fetchPly(ply_path)
|
| 375 |
+
except:
|
| 376 |
+
pcd = None
|
| 377 |
+
|
| 378 |
+
scene_info = SceneInfo(point_cloud=pcd,
|
| 379 |
+
train_cameras=train_cam_infos,
|
| 380 |
+
test_cameras=test_cam_infos,
|
| 381 |
+
nerf_normalization=nerf_normalization,
|
| 382 |
+
ply_path=ply_path)
|
| 383 |
+
return scene_info
|
| 384 |
+
|
| 385 |
+
sceneLoadTypeCallbacks = {
|
| 386 |
+
"Colmap": readColmapSceneInfo,
|
| 387 |
+
"Blender" : readNerfSyntheticInfo
|
| 388 |
+
}
|
gaussian-grouping/scene/gaussian_model.py
ADDED
|
@@ -0,0 +1,663 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2023, Gaussian-Grouping
|
| 2 |
+
# Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# ------------------------------------------------------------------------
|
| 6 |
+
# Modified from codes in Gaussian-Splatting
|
| 7 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import numpy as np
|
| 11 |
+
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
|
| 12 |
+
from torch import nn
|
| 13 |
+
import os
|
| 14 |
+
from utils.system_utils import mkdir_p
|
| 15 |
+
from plyfile import PlyData, PlyElement
|
| 16 |
+
from utils.sh_utils import RGB2SH
|
| 17 |
+
from simple_knn._C import distCUDA2
|
| 18 |
+
from utils.graphics_utils import BasicPointCloud
|
| 19 |
+
from utils.general_utils import strip_symmetric, build_scaling_rotation
|
| 20 |
+
from scipy.spatial import KDTree
|
| 21 |
+
|
| 22 |
+
class GaussianModel:
|
| 23 |
+
|
| 24 |
+
def setup_functions(self):
|
| 25 |
+
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
|
| 26 |
+
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
|
| 27 |
+
actual_covariance = L @ L.transpose(1, 2)
|
| 28 |
+
symm = strip_symmetric(actual_covariance)
|
| 29 |
+
return symm
|
| 30 |
+
|
| 31 |
+
# vars def here
|
| 32 |
+
|
| 33 |
+
self.scaling_activation = torch.exp
|
| 34 |
+
self.scaling_inverse_activation = torch.log
|
| 35 |
+
|
| 36 |
+
self.covariance_activation = build_covariance_from_scaling_rotation
|
| 37 |
+
|
| 38 |
+
self.opacity_activation = torch.sigmoid
|
| 39 |
+
self.inverse_opacity_activation = inverse_sigmoid
|
| 40 |
+
|
| 41 |
+
self.rotation_activation = torch.nn.functional.normalize
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def __init__(self, sh_degree : int):
|
| 45 |
+
self.active_sh_degree = 0
|
| 46 |
+
self.max_sh_degree = sh_degree
|
| 47 |
+
self._xyz = torch.empty(0)
|
| 48 |
+
self._features_dc = torch.empty(0)
|
| 49 |
+
self._features_rest = torch.empty(0)
|
| 50 |
+
self._scaling = torch.empty(0)
|
| 51 |
+
self._rotation = torch.empty(0)
|
| 52 |
+
self._opacity = torch.empty(0)
|
| 53 |
+
self._objects_dc = torch.empty(0)
|
| 54 |
+
self.num_objects = 16
|
| 55 |
+
self.max_radii2D = torch.empty(0)
|
| 56 |
+
self.xyz_gradient_accum = torch.empty(0)
|
| 57 |
+
self.denom = torch.empty(0)
|
| 58 |
+
self.optimizer = None
|
| 59 |
+
self.percent_dense = 0
|
| 60 |
+
self.spatial_lr_scale = 0
|
| 61 |
+
self.setup_functions()
|
| 62 |
+
|
| 63 |
+
def capture(self):
|
| 64 |
+
return (
|
| 65 |
+
self.active_sh_degree,
|
| 66 |
+
self._xyz,
|
| 67 |
+
self._features_dc,
|
| 68 |
+
self._features_rest,
|
| 69 |
+
self._scaling,
|
| 70 |
+
self._rotation,
|
| 71 |
+
self._opacity,
|
| 72 |
+
self._objects_dc,
|
| 73 |
+
self.max_radii2D,
|
| 74 |
+
self.xyz_gradient_accum,
|
| 75 |
+
self.denom,
|
| 76 |
+
self.optimizer.state_dict(),
|
| 77 |
+
self.spatial_lr_scale,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def restore(self, model_args, training_args):
|
| 81 |
+
(self.active_sh_degree,
|
| 82 |
+
self._xyz,
|
| 83 |
+
self._features_dc,
|
| 84 |
+
self._features_rest,
|
| 85 |
+
self._scaling,
|
| 86 |
+
self._rotation,
|
| 87 |
+
self._opacity,
|
| 88 |
+
self._objects_dc,
|
| 89 |
+
self.max_radii2D,
|
| 90 |
+
xyz_gradient_accum,
|
| 91 |
+
denom,
|
| 92 |
+
opt_dict,
|
| 93 |
+
self.spatial_lr_scale) = model_args
|
| 94 |
+
self.training_setup(training_args)
|
| 95 |
+
# self.xyz_gradient_accum = xyz_gradient_accum
|
| 96 |
+
# self.denom = denom
|
| 97 |
+
# self.optimizer.load_state_dict(opt_dict)
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def get_scaling(self):
|
| 101 |
+
return self.scaling_activation(self._scaling)
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def get_rotation(self):
|
| 105 |
+
return self.rotation_activation(self._rotation)
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def get_xyz(self):
|
| 109 |
+
return self._xyz
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def get_features(self):
|
| 113 |
+
features_dc = self._features_dc
|
| 114 |
+
features_rest = self._features_rest
|
| 115 |
+
return torch.cat((features_dc, features_rest), dim=1)
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def get_objects(self):
|
| 119 |
+
return self._objects_dc
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def get_opacity(self):
|
| 123 |
+
return self.opacity_activation(self._opacity)
|
| 124 |
+
|
| 125 |
+
def get_covariance(self, scaling_modifier = 1):
|
| 126 |
+
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
|
| 127 |
+
|
| 128 |
+
def oneupSHdegree(self):
|
| 129 |
+
if self.active_sh_degree < self.max_sh_degree:
|
| 130 |
+
self.active_sh_degree += 1
|
| 131 |
+
|
| 132 |
+
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): # use this plz!
|
| 133 |
+
self.spatial_lr_scale = spatial_lr_scale
|
| 134 |
+
fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
|
| 135 |
+
fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
|
| 136 |
+
features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
|
| 137 |
+
features[:, :3, 0 ] = fused_color
|
| 138 |
+
features[:, 3:, 1:] = 0.0
|
| 139 |
+
|
| 140 |
+
# random init obj_id now
|
| 141 |
+
fused_objects = RGB2SH(torch.rand((fused_point_cloud.shape[0],self.num_objects), device="cuda")) # ?
|
| 142 |
+
fused_objects = fused_objects[:,:,None]
|
| 143 |
+
|
| 144 |
+
print("Number of points at initialisation : ", fused_point_cloud.shape[0])
|
| 145 |
+
|
| 146 |
+
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
|
| 147 |
+
# dist2 = dist2 / 0.05
|
| 148 |
+
# scales = torch.log(dist2)[...,None].repeat(1, 3)
|
| 149 |
+
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
|
| 150 |
+
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
|
| 151 |
+
rots[:, 0] = 1
|
| 152 |
+
|
| 153 |
+
opacities = inverse_sigmoid(0.99 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
|
| 154 |
+
|
| 155 |
+
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
|
| 156 |
+
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
|
| 157 |
+
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
|
| 158 |
+
self._scaling = nn.Parameter(scales.requires_grad_(True)) # save as log
|
| 159 |
+
self._rotation = nn.Parameter(rots.requires_grad_(True)) # save as inv_sigmoid
|
| 160 |
+
self._opacity = nn.Parameter(opacities.requires_grad_(True))
|
| 161 |
+
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
|
| 162 |
+
self._objects_dc = nn.Parameter(fused_objects.transpose(1, 2).contiguous().requires_grad_(True))
|
| 163 |
+
|
| 164 |
+
def training_setup(self, training_args):
|
| 165 |
+
self.percent_dense = training_args.percent_dense
|
| 166 |
+
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 167 |
+
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 168 |
+
|
| 169 |
+
l = [
|
| 170 |
+
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
|
| 171 |
+
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
|
| 172 |
+
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
|
| 173 |
+
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
|
| 174 |
+
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
|
| 175 |
+
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
|
| 176 |
+
{'params': [self._objects_dc], 'lr': training_args.feature_lr, "name": "obj_dc"},
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
|
| 180 |
+
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
|
| 181 |
+
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
|
| 182 |
+
lr_delay_mult=training_args.position_lr_delay_mult,
|
| 183 |
+
max_steps=training_args.position_lr_max_steps)
|
| 184 |
+
|
| 185 |
+
def finetune_setup(self, training_args, mask3d):
|
| 186 |
+
# Define a function that applies the mask to the gradients
|
| 187 |
+
def mask_hook(grad):
|
| 188 |
+
return grad * mask3d
|
| 189 |
+
def mask_hook2(grad):
|
| 190 |
+
return grad * mask3d.squeeze(-1)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
# Register the hook to the parameter (only once!)
|
| 194 |
+
hook_xyz = self._xyz.register_hook(mask_hook2)
|
| 195 |
+
hook_dc = self._features_dc.register_hook(mask_hook)
|
| 196 |
+
hook_rest = self._features_rest.register_hook(mask_hook)
|
| 197 |
+
hook_opacity = self._opacity.register_hook(mask_hook2)
|
| 198 |
+
hook_scaling = self._scaling.register_hook(mask_hook2)
|
| 199 |
+
hook_rotation = self._rotation.register_hook(mask_hook2)
|
| 200 |
+
|
| 201 |
+
self._objects_dc.requires_grad = False
|
| 202 |
+
|
| 203 |
+
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
|
| 204 |
+
self.percent_dense = training_args.percent_dense
|
| 205 |
+
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 206 |
+
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 207 |
+
|
| 208 |
+
l = [
|
| 209 |
+
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
|
| 210 |
+
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
|
| 211 |
+
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
|
| 212 |
+
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
|
| 213 |
+
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
|
| 214 |
+
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
|
| 215 |
+
{'params': [self._objects_dc], 'lr': training_args.feature_lr, "name": "obj_dc"},
|
| 216 |
+
]
|
| 217 |
+
|
| 218 |
+
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
|
| 219 |
+
|
| 220 |
+
def removal_setup(self, training_args, mask3d): #
|
| 221 |
+
|
| 222 |
+
mask3d = ~mask3d.bool().squeeze()
|
| 223 |
+
# print(mask3d.shape)
|
| 224 |
+
# print(self._xyz.shape)
|
| 225 |
+
# print(mask3d[0])
|
| 226 |
+
|
| 227 |
+
# Extracting subsets using the mask
|
| 228 |
+
xyz_sub = self._xyz[mask3d].detach()
|
| 229 |
+
features_dc_sub = self._features_dc[mask3d].detach()
|
| 230 |
+
features_rest_sub = self._features_rest[mask3d].detach()
|
| 231 |
+
opacity_sub = self._opacity[mask3d].detach()
|
| 232 |
+
scaling_sub = self._scaling[mask3d].detach()
|
| 233 |
+
rotation_sub = self._rotation[mask3d].detach()
|
| 234 |
+
objects_dc_sub = self._objects_dc[mask3d].detach()
|
| 235 |
+
|
| 236 |
+
# print(xyz_sub.shape)
|
| 237 |
+
# print(rotation_sub.shape)
|
| 238 |
+
|
| 239 |
+
def set_requires_grad(tensor, requires_grad):
|
| 240 |
+
"""Returns a new tensor with the specified requires_grad setting."""
|
| 241 |
+
return tensor.detach().clone().requires_grad_(requires_grad)
|
| 242 |
+
|
| 243 |
+
# Construct nn.Parameters with specified gradients
|
| 244 |
+
self._xyz = nn.Parameter(set_requires_grad(xyz_sub, False))
|
| 245 |
+
self._features_dc = nn.Parameter(set_requires_grad(features_dc_sub, False))
|
| 246 |
+
self._features_rest = nn.Parameter(set_requires_grad(features_rest_sub, False))
|
| 247 |
+
self._opacity = nn.Parameter(set_requires_grad(opacity_sub, False))
|
| 248 |
+
self._scaling = nn.Parameter(set_requires_grad(scaling_sub, False))
|
| 249 |
+
self._rotation = nn.Parameter(set_requires_grad(rotation_sub, False))
|
| 250 |
+
self._objects_dc = nn.Parameter(set_requires_grad(objects_dc_sub, False))
|
| 251 |
+
|
| 252 |
+
def translate_setup(self, training_args, mask3d, ori_center, dst_center):
|
| 253 |
+
|
| 254 |
+
mask3d = mask3d.bool().squeeze()
|
| 255 |
+
|
| 256 |
+
with torch.no_grad():
|
| 257 |
+
xyz_sub = self._xyz[mask3d].detach()
|
| 258 |
+
xyz_sub += dst_center - ori_center
|
| 259 |
+
self._xyz[mask3d] = xyz_sub
|
| 260 |
+
|
| 261 |
+
'''
|
| 262 |
+
xyz_sub = self._xyz[mask3d].detach()
|
| 263 |
+
|
| 264 |
+
def set_requires_grad(tensor, requires_grad):
|
| 265 |
+
"""Returns a new tensor with the specified requires_grad setting."""
|
| 266 |
+
return tensor.detach().clone().requires_grad_(requires_grad)
|
| 267 |
+
|
| 268 |
+
self._xyz = nn.Parameter(set_requires_grad(xyz_sub, False))
|
| 269 |
+
'''
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def rotate_setup(self, training_args, mask3d, ori_center, rotate_matrix):
|
| 273 |
+
|
| 274 |
+
mask3d = mask3d.bool().squeeze()
|
| 275 |
+
coor_ori = torch.zeros(3).cuda()
|
| 276 |
+
|
| 277 |
+
self.translate_setup(self, mask3d, ori_center, coor_ori)
|
| 278 |
+
|
| 279 |
+
with torch.no_grad():
|
| 280 |
+
xyz_sub = self._xyz[mask3d]
|
| 281 |
+
xyz_sub = (rotate_matrix @ xyz_sub.unsqueeze(-1)).squeeze(-1)
|
| 282 |
+
self._xyz[mask3d] = xyz_sub
|
| 283 |
+
|
| 284 |
+
self.translate_setup(self, mask3d, coor_ori, ori_center)
|
| 285 |
+
|
| 286 |
+
from scipy.spatial.transform import Rotation as R
|
| 287 |
+
rotation = R.from_matrix(rotate_matrix.cpu())
|
| 288 |
+
quat_last = torch.FloatTensor(rotation.as_quat()).cuda()
|
| 289 |
+
idx = torch.IntTensor([3,0,1,2]).cuda()
|
| 290 |
+
quat = quat_last.index_select(dim=-1, index=idx)
|
| 291 |
+
|
| 292 |
+
with torch.no_grad():
|
| 293 |
+
rot_sub = self._rotation[mask3d]
|
| 294 |
+
|
| 295 |
+
w1, x1, y1, z1 = quat.split(1,dim=-1)
|
| 296 |
+
w2, x2, y2, z2 = rot_sub.split(1,dim=-1)
|
| 297 |
+
|
| 298 |
+
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
|
| 299 |
+
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
|
| 300 |
+
y = w1*y2 + y1*w2 + z1*x2 - x1*z2
|
| 301 |
+
z = w1*z2 + z1*w2 + x1*y2 - y1*x2
|
| 302 |
+
|
| 303 |
+
rot_sub = torch.cat((w,x,y,z),dim=1)
|
| 304 |
+
self._rotation[mask3d] = rot_sub
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def inpaint_setup(self, training_args, mask3d):
|
| 308 |
+
|
| 309 |
+
def initialize_new_features(features, num_new_points, mask_xyz_values, distance_threshold=0.25, max_distance_threshold=1, k=5):
|
| 310 |
+
"""Initialize new points for multiple features based on neighbouring points in the remaining area."""
|
| 311 |
+
new_features = {}
|
| 312 |
+
|
| 313 |
+
if num_new_points == 0:
|
| 314 |
+
for key in features:
|
| 315 |
+
new_features[key] = torch.empty((0, *features[key].shape[1:]), device=features[key].device)
|
| 316 |
+
return new_features
|
| 317 |
+
|
| 318 |
+
# Get remaining points from features
|
| 319 |
+
remaining_xyz_values = features["xyz"]
|
| 320 |
+
remaining_xyz_values_np = remaining_xyz_values.cpu().numpy()
|
| 321 |
+
|
| 322 |
+
# Build a KD-Tree for fast nearest-neighbor lookup
|
| 323 |
+
kdtree = KDTree(remaining_xyz_values_np)
|
| 324 |
+
|
| 325 |
+
# Sample random points from mask_xyz_values as query points
|
| 326 |
+
mask_xyz_values_np = mask_xyz_values.cpu().numpy()
|
| 327 |
+
query_points = mask_xyz_values_np
|
| 328 |
+
|
| 329 |
+
# Find the k nearest neighbors in the remaining points for each query point
|
| 330 |
+
distances, indices = kdtree.query(query_points, k=k)
|
| 331 |
+
selected_indices = indices
|
| 332 |
+
|
| 333 |
+
# Initialize new points for each feature
|
| 334 |
+
for key, feature in features.items():
|
| 335 |
+
# Convert feature to numpy array
|
| 336 |
+
feature_np = feature.cpu().numpy()
|
| 337 |
+
|
| 338 |
+
# If we have valid neighbors, calculate the mean of neighbor points
|
| 339 |
+
if feature_np.ndim == 2:
|
| 340 |
+
neighbor_points = feature_np[selected_indices]
|
| 341 |
+
elif feature_np.ndim == 3:
|
| 342 |
+
neighbor_points = feature_np[selected_indices, :, :]
|
| 343 |
+
else:
|
| 344 |
+
raise ValueError(f"Unsupported feature dimension: {feature_np.ndim}")
|
| 345 |
+
new_points_np = np.mean(neighbor_points, axis=1)
|
| 346 |
+
|
| 347 |
+
# Convert back to tensor
|
| 348 |
+
new_features[key] = torch.tensor(new_points_np, device=feature.device, dtype=feature.dtype)
|
| 349 |
+
|
| 350 |
+
return new_features['xyz'], new_features['features_dc'], new_features['scaling'], new_features['objects_dc'], new_features['features_rest'], new_features['opacity'], new_features['rotation']
|
| 351 |
+
|
| 352 |
+
mask3d = ~mask3d.bool().squeeze()
|
| 353 |
+
mask_xyz_values = self._xyz[~mask3d]
|
| 354 |
+
|
| 355 |
+
# Extracting subsets using the mask
|
| 356 |
+
xyz_sub = self._xyz[mask3d].detach()
|
| 357 |
+
features_dc_sub = self._features_dc[mask3d].detach()
|
| 358 |
+
features_rest_sub = self._features_rest[mask3d].detach()
|
| 359 |
+
opacity_sub = self._opacity[mask3d].detach()
|
| 360 |
+
scaling_sub = self._scaling[mask3d].detach()
|
| 361 |
+
rotation_sub = self._rotation[mask3d].detach()
|
| 362 |
+
objects_dc_sub = self._objects_dc[mask3d].detach()
|
| 363 |
+
|
| 364 |
+
# Add new points with random initialization
|
| 365 |
+
sub_features = {
|
| 366 |
+
'xyz': xyz_sub,
|
| 367 |
+
'features_dc': features_dc_sub,
|
| 368 |
+
'scaling': scaling_sub,
|
| 369 |
+
'objects_dc': objects_dc_sub,
|
| 370 |
+
'features_rest': features_rest_sub,
|
| 371 |
+
'opacity': opacity_sub,
|
| 372 |
+
'rotation': rotation_sub,
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
num_new_points = len(mask_xyz_values)
|
| 376 |
+
with torch.no_grad():
|
| 377 |
+
new_xyz, new_features_dc, new_scaling, new_objects_dc, new_features_rest, new_opacity, new_rotation = initialize_new_features(sub_features, num_new_points, mask_xyz_values)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def set_requires_grad(tensor, requires_grad):
|
| 381 |
+
"""Returns a new tensor with the specified requires_grad setting."""
|
| 382 |
+
return tensor.detach().clone().requires_grad_(requires_grad)
|
| 383 |
+
|
| 384 |
+
# Construct nn.Parameters with specified gradients
|
| 385 |
+
self._xyz = nn.Parameter(torch.cat([set_requires_grad(xyz_sub, False), set_requires_grad(new_xyz, True)]))
|
| 386 |
+
self._features_dc = nn.Parameter(torch.cat([set_requires_grad(features_dc_sub, False), set_requires_grad(new_features_dc, True)]))
|
| 387 |
+
self._features_rest = nn.Parameter(torch.cat([set_requires_grad(features_rest_sub, False), set_requires_grad(new_features_rest, True)]))
|
| 388 |
+
self._opacity = nn.Parameter(torch.cat([set_requires_grad(opacity_sub, False), set_requires_grad(new_opacity, True)]))
|
| 389 |
+
self._scaling = nn.Parameter(torch.cat([set_requires_grad(scaling_sub, False), set_requires_grad(new_scaling, True)]))
|
| 390 |
+
self._rotation = nn.Parameter(torch.cat([set_requires_grad(rotation_sub, False), set_requires_grad(new_rotation, True)]))
|
| 391 |
+
self._objects_dc = nn.Parameter(torch.cat([set_requires_grad(objects_dc_sub, False), set_requires_grad(new_objects_dc, True)]))
|
| 392 |
+
|
| 393 |
+
# for optimize
|
| 394 |
+
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
|
| 395 |
+
self.percent_dense = training_args.percent_dense
|
| 396 |
+
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 397 |
+
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 398 |
+
|
| 399 |
+
# Setup optimizer. Only the new points will have gradients.
|
| 400 |
+
l = [
|
| 401 |
+
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
|
| 402 |
+
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
|
| 403 |
+
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
|
| 404 |
+
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
|
| 405 |
+
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
|
| 406 |
+
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
|
| 407 |
+
{'params': [self._objects_dc], 'lr': training_args.feature_lr, "name": "obj_dc"} # Assuming there's a learning rate for objects_dc in training_args
|
| 408 |
+
]
|
| 409 |
+
|
| 410 |
+
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
|
| 411 |
+
|
| 412 |
+
def update_learning_rate(self, iteration):
|
| 413 |
+
''' Learning rate scheduling per step '''
|
| 414 |
+
for param_group in self.optimizer.param_groups:
|
| 415 |
+
if param_group["name"] == "xyz":
|
| 416 |
+
lr = self.xyz_scheduler_args(iteration)
|
| 417 |
+
param_group['lr'] = lr
|
| 418 |
+
return lr
|
| 419 |
+
|
| 420 |
+
def construct_list_of_attributes(self):
|
| 421 |
+
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
|
| 422 |
+
# All channels except the 3 DC
|
| 423 |
+
for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
|
| 424 |
+
l.append('f_dc_{}'.format(i))
|
| 425 |
+
for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
|
| 426 |
+
l.append('f_rest_{}'.format(i))
|
| 427 |
+
l.append('opacity')
|
| 428 |
+
for i in range(self._scaling.shape[1]):
|
| 429 |
+
l.append('scale_{}'.format(i))
|
| 430 |
+
for i in range(self._rotation.shape[1]):
|
| 431 |
+
l.append('rot_{}'.format(i))
|
| 432 |
+
for i in range(self._objects_dc.shape[1]*self._objects_dc.shape[2]):
|
| 433 |
+
l.append('obj_dc_{}'.format(i))
|
| 434 |
+
return l
|
| 435 |
+
|
| 436 |
+
def save_ply(self, path):
|
| 437 |
+
mkdir_p(os.path.dirname(path))
|
| 438 |
+
|
| 439 |
+
xyz = self._xyz.detach().cpu().numpy()
|
| 440 |
+
normals = np.zeros_like(xyz)
|
| 441 |
+
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
|
| 442 |
+
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
|
| 443 |
+
opacities = self._opacity.detach().cpu().numpy()
|
| 444 |
+
scale = self._scaling.detach().cpu().numpy()
|
| 445 |
+
rotation = self._rotation.detach().cpu().numpy()
|
| 446 |
+
obj_dc = self._objects_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
|
| 447 |
+
|
| 448 |
+
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
|
| 449 |
+
|
| 450 |
+
elements = np.empty(xyz.shape[0], dtype=dtype_full)
|
| 451 |
+
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation, obj_dc), axis=1)
|
| 452 |
+
elements[:] = list(map(tuple, attributes))
|
| 453 |
+
el = PlyElement.describe(elements, 'vertex')
|
| 454 |
+
PlyData([el]).write(path)
|
| 455 |
+
|
| 456 |
+
def reset_opacity(self):
|
| 457 |
+
opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
|
| 458 |
+
optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
|
| 459 |
+
self._opacity = optimizable_tensors["opacity"]
|
| 460 |
+
|
| 461 |
+
def load_ply(self, path):
|
| 462 |
+
plydata = PlyData.read(path)
|
| 463 |
+
|
| 464 |
+
xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
|
| 465 |
+
np.asarray(plydata.elements[0]["y"]),
|
| 466 |
+
np.asarray(plydata.elements[0]["z"])), axis=1)
|
| 467 |
+
opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
|
| 468 |
+
|
| 469 |
+
features_dc = np.zeros((xyz.shape[0], 3, 1))
|
| 470 |
+
features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
|
| 471 |
+
features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
|
| 472 |
+
features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
|
| 473 |
+
|
| 474 |
+
extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")]
|
| 475 |
+
extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1]))
|
| 476 |
+
assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3
|
| 477 |
+
features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
|
| 478 |
+
for idx, attr_name in enumerate(extra_f_names):
|
| 479 |
+
features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
|
| 480 |
+
# Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
|
| 481 |
+
features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1))
|
| 482 |
+
|
| 483 |
+
scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
|
| 484 |
+
scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
|
| 485 |
+
scales = np.zeros((xyz.shape[0], len(scale_names)))
|
| 486 |
+
for idx, attr_name in enumerate(scale_names):
|
| 487 |
+
scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
|
| 488 |
+
|
| 489 |
+
rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
|
| 490 |
+
rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
|
| 491 |
+
rots = np.zeros((xyz.shape[0], len(rot_names)))
|
| 492 |
+
for idx, attr_name in enumerate(rot_names):
|
| 493 |
+
rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
|
| 494 |
+
|
| 495 |
+
objects_dc = np.zeros((xyz.shape[0], self.num_objects, 1))
|
| 496 |
+
for idx in range(self.num_objects):
|
| 497 |
+
objects_dc[:,idx,0] = np.asarray(plydata.elements[0]["obj_dc_"+str(idx)])
|
| 498 |
+
|
| 499 |
+
self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
|
| 500 |
+
self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
| 501 |
+
self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
| 502 |
+
self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
|
| 503 |
+
self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
|
| 504 |
+
self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
|
| 505 |
+
self._objects_dc = nn.Parameter(torch.tensor(objects_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
| 506 |
+
|
| 507 |
+
self.active_sh_degree = self.max_sh_degree
|
| 508 |
+
|
| 509 |
+
def replace_tensor_to_optimizer(self, tensor, name):
|
| 510 |
+
optimizable_tensors = {}
|
| 511 |
+
for group in self.optimizer.param_groups:
|
| 512 |
+
if group["name"] == name:
|
| 513 |
+
stored_state = self.optimizer.state.get(group['params'][0], None)
|
| 514 |
+
stored_state["exp_avg"] = torch.zeros_like(tensor)
|
| 515 |
+
stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
|
| 516 |
+
|
| 517 |
+
del self.optimizer.state[group['params'][0]]
|
| 518 |
+
group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
|
| 519 |
+
self.optimizer.state[group['params'][0]] = stored_state
|
| 520 |
+
|
| 521 |
+
optimizable_tensors[group["name"]] = group["params"][0]
|
| 522 |
+
return optimizable_tensors
|
| 523 |
+
|
| 524 |
+
def _prune_optimizer(self, mask):
|
| 525 |
+
optimizable_tensors = {}
|
| 526 |
+
for group in self.optimizer.param_groups:
|
| 527 |
+
stored_state = self.optimizer.state.get(group['params'][0], None)
|
| 528 |
+
if stored_state is not None:
|
| 529 |
+
stored_state["exp_avg"] = stored_state["exp_avg"][mask]
|
| 530 |
+
stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
|
| 531 |
+
|
| 532 |
+
del self.optimizer.state[group['params'][0]]
|
| 533 |
+
group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
|
| 534 |
+
self.optimizer.state[group['params'][0]] = stored_state
|
| 535 |
+
|
| 536 |
+
optimizable_tensors[group["name"]] = group["params"][0]
|
| 537 |
+
else:
|
| 538 |
+
group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
|
| 539 |
+
optimizable_tensors[group["name"]] = group["params"][0]
|
| 540 |
+
return optimizable_tensors
|
| 541 |
+
|
| 542 |
+
def prune_points(self, mask):
|
| 543 |
+
valid_points_mask = ~mask
|
| 544 |
+
optimizable_tensors = self._prune_optimizer(valid_points_mask)
|
| 545 |
+
|
| 546 |
+
self._xyz = optimizable_tensors["xyz"]
|
| 547 |
+
self._features_dc = optimizable_tensors["f_dc"]
|
| 548 |
+
self._features_rest = optimizable_tensors["f_rest"]
|
| 549 |
+
self._opacity = optimizable_tensors["opacity"]
|
| 550 |
+
self._scaling = optimizable_tensors["scaling"]
|
| 551 |
+
self._rotation = optimizable_tensors["rotation"]
|
| 552 |
+
self._objects_dc = optimizable_tensors["obj_dc"]
|
| 553 |
+
|
| 554 |
+
self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
|
| 555 |
+
|
| 556 |
+
self.denom = self.denom[valid_points_mask]
|
| 557 |
+
self.max_radii2D = self.max_radii2D[valid_points_mask]
|
| 558 |
+
|
| 559 |
+
def cat_tensors_to_optimizer(self, tensors_dict):
|
| 560 |
+
optimizable_tensors = {}
|
| 561 |
+
for group in self.optimizer.param_groups:
|
| 562 |
+
assert len(group["params"]) == 1
|
| 563 |
+
extension_tensor = tensors_dict[group["name"]]
|
| 564 |
+
stored_state = self.optimizer.state.get(group['params'][0], None)
|
| 565 |
+
if stored_state is not None:
|
| 566 |
+
|
| 567 |
+
stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
|
| 568 |
+
stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
|
| 569 |
+
|
| 570 |
+
del self.optimizer.state[group['params'][0]]
|
| 571 |
+
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
|
| 572 |
+
self.optimizer.state[group['params'][0]] = stored_state
|
| 573 |
+
|
| 574 |
+
optimizable_tensors[group["name"]] = group["params"][0]
|
| 575 |
+
else:
|
| 576 |
+
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
|
| 577 |
+
optimizable_tensors[group["name"]] = group["params"][0]
|
| 578 |
+
|
| 579 |
+
return optimizable_tensors
|
| 580 |
+
|
| 581 |
+
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, new_objects_dc):
|
| 582 |
+
d = {"xyz": new_xyz,
|
| 583 |
+
"f_dc": new_features_dc,
|
| 584 |
+
"f_rest": new_features_rest,
|
| 585 |
+
"opacity": new_opacities,
|
| 586 |
+
"scaling" : new_scaling,
|
| 587 |
+
"rotation" : new_rotation,
|
| 588 |
+
"obj_dc": new_objects_dc}
|
| 589 |
+
|
| 590 |
+
optimizable_tensors = self.cat_tensors_to_optimizer(d)
|
| 591 |
+
self._xyz = optimizable_tensors["xyz"]
|
| 592 |
+
self._features_dc = optimizable_tensors["f_dc"]
|
| 593 |
+
self._features_rest = optimizable_tensors["f_rest"]
|
| 594 |
+
self._opacity = optimizable_tensors["opacity"]
|
| 595 |
+
self._scaling = optimizable_tensors["scaling"]
|
| 596 |
+
self._rotation = optimizable_tensors["rotation"]
|
| 597 |
+
self._objects_dc = optimizable_tensors["obj_dc"]
|
| 598 |
+
|
| 599 |
+
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 600 |
+
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
|
| 601 |
+
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
|
| 602 |
+
|
| 603 |
+
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
|
| 604 |
+
n_init_points = self.get_xyz.shape[0]
|
| 605 |
+
# Extract points that satisfy the gradient condition
|
| 606 |
+
padded_grad = torch.zeros((n_init_points), device="cuda")
|
| 607 |
+
padded_grad[:grads.shape[0]] = grads.squeeze()
|
| 608 |
+
selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
|
| 609 |
+
selected_pts_mask = torch.logical_and(selected_pts_mask,
|
| 610 |
+
torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
|
| 611 |
+
|
| 612 |
+
stds = self.get_scaling[selected_pts_mask].repeat(N,1)
|
| 613 |
+
means =torch.zeros((stds.size(0), 3),device="cuda")
|
| 614 |
+
samples = torch.normal(mean=means, std=stds)
|
| 615 |
+
rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
|
| 616 |
+
new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
|
| 617 |
+
new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
|
| 618 |
+
new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
|
| 619 |
+
new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
|
| 620 |
+
new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
|
| 621 |
+
new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
|
| 622 |
+
new_objects_dc = self._objects_dc[selected_pts_mask].repeat(N,1,1)
|
| 623 |
+
|
| 624 |
+
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation, new_objects_dc)
|
| 625 |
+
|
| 626 |
+
prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
|
| 627 |
+
self.prune_points(prune_filter)
|
| 628 |
+
|
| 629 |
+
def densify_and_clone(self, grads, grad_threshold, scene_extent):
|
| 630 |
+
# Extract points that satisfy the gradient condition
|
| 631 |
+
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
|
| 632 |
+
selected_pts_mask = torch.logical_and(selected_pts_mask,
|
| 633 |
+
torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
|
| 634 |
+
|
| 635 |
+
new_xyz = self._xyz[selected_pts_mask]
|
| 636 |
+
new_features_dc = self._features_dc[selected_pts_mask]
|
| 637 |
+
new_features_rest = self._features_rest[selected_pts_mask]
|
| 638 |
+
new_opacities = self._opacity[selected_pts_mask]
|
| 639 |
+
new_scaling = self._scaling[selected_pts_mask]
|
| 640 |
+
new_rotation = self._rotation[selected_pts_mask]
|
| 641 |
+
new_objects_dc = self._objects_dc[selected_pts_mask]
|
| 642 |
+
|
| 643 |
+
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, new_objects_dc)
|
| 644 |
+
|
| 645 |
+
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
|
| 646 |
+
grads = self.xyz_gradient_accum / self.denom
|
| 647 |
+
grads[grads.isnan()] = 0.0
|
| 648 |
+
|
| 649 |
+
self.densify_and_clone(grads, max_grad, extent)
|
| 650 |
+
self.densify_and_split(grads, max_grad, extent)
|
| 651 |
+
|
| 652 |
+
prune_mask = (self.get_opacity < min_opacity).squeeze()
|
| 653 |
+
if max_screen_size:
|
| 654 |
+
big_points_vs = self.max_radii2D > max_screen_size
|
| 655 |
+
big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
|
| 656 |
+
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
|
| 657 |
+
self.prune_points(prune_mask)
|
| 658 |
+
|
| 659 |
+
torch.cuda.empty_cache()
|
| 660 |
+
|
| 661 |
+
def add_densification_stats(self, viewspace_point_tensor, update_filter):
|
| 662 |
+
self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
|
| 663 |
+
self.denom[update_filter] += 1
|
gaussian-grouping/script/edit_object_inpaint.sh
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Check if the user provided an argument
|
| 4 |
+
if [ "$#" -ne 2 ]; then
|
| 5 |
+
echo "Usage: $0 <output_folder> <config_file> "
|
| 6 |
+
exit 1
|
| 7 |
+
fi
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
output_folder="$1"
|
| 11 |
+
config_file="$2"
|
| 12 |
+
|
| 13 |
+
if [ ! -d "$output_folder" ]; then
|
| 14 |
+
echo "Error: Folder '$output_folder' does not exist."
|
| 15 |
+
exit 2
|
| 16 |
+
fi
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Remove the selected object
|
| 21 |
+
python edit_object_inpaint.py -m ${output_folder} --config_file ${config_file}
|
| 22 |
+
|
gaussian-grouping/script/edit_object_removal.sh
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Check if the user provided an argument
|
| 4 |
+
if [ "$#" -ne 2 ]; then
|
| 5 |
+
echo "Usage: $0 <output_folder> <config_file> "
|
| 6 |
+
exit 1
|
| 7 |
+
fi
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
output_folder="$1"
|
| 11 |
+
config_file="$2"
|
| 12 |
+
|
| 13 |
+
if [ ! -d "$output_folder" ]; then
|
| 14 |
+
echo "Error: Folder '$output_folder' does not exist."
|
| 15 |
+
exit 2
|
| 16 |
+
fi
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Remove the selected object
|
| 21 |
+
python edit_object_removal.py -m ${output_folder} --config_file ${config_file} --iteration 15000 --skip_test
|
gaussian-grouping/script/eval_lerf_mask.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import cv2
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
dataset_name = sys.argv[1]
|
| 8 |
+
|
| 9 |
+
gt_folder_path = os.path.join('data/lerf_mask',dataset_name,'test_mask')
|
| 10 |
+
# You can change pred_folder_path to your output
|
| 11 |
+
pred_folder_path = os.path.join('result/lerf_mask',dataset_name)
|
| 12 |
+
|
| 13 |
+
# General util function to get the boundary of a binary mask.
|
| 14 |
+
# https://gist.github.com/bowenc0221/71f7a02afee92646ca05efeeb14d687d
|
| 15 |
+
def mask_to_boundary(mask, dilation_ratio=0.02):
|
| 16 |
+
"""
|
| 17 |
+
Convert binary mask to boundary mask.
|
| 18 |
+
:param mask (numpy array, uint8): binary mask
|
| 19 |
+
:param dilation_ratio (float): ratio to calculate dilation = dilation_ratio * image_diagonal
|
| 20 |
+
:return: boundary mask (numpy array)
|
| 21 |
+
"""
|
| 22 |
+
h, w = mask.shape
|
| 23 |
+
img_diag = np.sqrt(h ** 2 + w ** 2)
|
| 24 |
+
dilation = int(round(dilation_ratio * img_diag))
|
| 25 |
+
if dilation < 1:
|
| 26 |
+
dilation = 1
|
| 27 |
+
# Pad image so mask truncated by the image border is also considered as boundary.
|
| 28 |
+
new_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
|
| 29 |
+
kernel = np.ones((3, 3), dtype=np.uint8)
|
| 30 |
+
new_mask_erode = cv2.erode(new_mask, kernel, iterations=dilation)
|
| 31 |
+
mask_erode = new_mask_erode[1 : h + 1, 1 : w + 1]
|
| 32 |
+
# G_d intersects G in the paper.
|
| 33 |
+
return mask - mask_erode
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def boundary_iou(gt, dt, dilation_ratio=0.02):
|
| 37 |
+
"""
|
| 38 |
+
Compute boundary iou between two binary masks.
|
| 39 |
+
:param gt (numpy array, uint8): binary mask
|
| 40 |
+
:param dt (numpy array, uint8): binary mask
|
| 41 |
+
:param dilation_ratio (float): ratio to calculate dilation = dilation_ratio * image_diagonal
|
| 42 |
+
:return: boundary iou (float)
|
| 43 |
+
"""
|
| 44 |
+
dt = (dt>128).astype('uint8')
|
| 45 |
+
gt = (gt>128).astype('uint8')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
gt_boundary = mask_to_boundary(gt, dilation_ratio)
|
| 49 |
+
dt_boundary = mask_to_boundary(dt, dilation_ratio)
|
| 50 |
+
intersection = ((gt_boundary * dt_boundary) > 0).sum()
|
| 51 |
+
union = ((gt_boundary + dt_boundary) > 0).sum()
|
| 52 |
+
boundary_iou = intersection / union
|
| 53 |
+
return boundary_iou
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def load_mask(mask_path):
|
| 57 |
+
"""Load the mask from the given path."""
|
| 58 |
+
if os.path.exists(mask_path):
|
| 59 |
+
return np.array(Image.open(mask_path).convert('L')) # Convert to grayscale
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
def resize_mask(mask, target_shape):
|
| 63 |
+
"""Resize the mask to the target shape."""
|
| 64 |
+
return np.array(Image.fromarray(mask).resize((target_shape[1], target_shape[0]), resample=Image.NEAREST))
|
| 65 |
+
|
| 66 |
+
def calculate_iou(mask1, mask2):
|
| 67 |
+
"""Calculate IoU between two boolean masks."""
|
| 68 |
+
mask1_bool = mask1 > 128
|
| 69 |
+
mask2_bool = mask2 > 128
|
| 70 |
+
intersection = np.logical_and(mask1_bool, mask2_bool)
|
| 71 |
+
union = np.logical_or(mask1_bool, mask2_bool)
|
| 72 |
+
iou = np.sum(intersection) / np.sum(union)
|
| 73 |
+
return iou
|
| 74 |
+
|
| 75 |
+
iou_scores = {} # Store IoU scores for each class
|
| 76 |
+
biou_scores = {}
|
| 77 |
+
class_counts = {} # Count the number of times each class appears
|
| 78 |
+
|
| 79 |
+
# Iterate over each image and category in the GT dataset
|
| 80 |
+
for image_name in os.listdir(gt_folder_path):
|
| 81 |
+
gt_image_path = os.path.join(gt_folder_path, image_name)
|
| 82 |
+
pred_image_path = os.path.join(pred_folder_path, image_name)
|
| 83 |
+
|
| 84 |
+
if os.path.isdir(gt_image_path):
|
| 85 |
+
for cat_file in os.listdir(gt_image_path):
|
| 86 |
+
cat_id = cat_file.split('.')[0] # Assuming cat_file format is "cat_id.png"
|
| 87 |
+
gt_mask_path = os.path.join(gt_image_path, cat_file)
|
| 88 |
+
pred_mask_path = os.path.join(pred_image_path, cat_file)
|
| 89 |
+
|
| 90 |
+
gt_mask = load_mask(gt_mask_path)
|
| 91 |
+
pred_mask = load_mask(pred_mask_path)
|
| 92 |
+
print("GT: ",gt_mask_path)
|
| 93 |
+
print("Pred: ",pred_mask_path)
|
| 94 |
+
|
| 95 |
+
if gt_mask is not None and pred_mask is not None:
|
| 96 |
+
# Resize prediction mask to match GT mask shape if they are different
|
| 97 |
+
if pred_mask.shape != gt_mask.shape:
|
| 98 |
+
pred_mask = resize_mask(pred_mask, gt_mask.shape)
|
| 99 |
+
|
| 100 |
+
iou = calculate_iou(gt_mask, pred_mask)
|
| 101 |
+
biou = boundary_iou(gt_mask, pred_mask)
|
| 102 |
+
print("IoU: ",iou," BIoU: ",biou)
|
| 103 |
+
if cat_id not in iou_scores:
|
| 104 |
+
iou_scores[cat_id] = []
|
| 105 |
+
biou_scores[cat_id] = []
|
| 106 |
+
iou_scores[cat_id].append(iou)
|
| 107 |
+
biou_scores[cat_id].append(biou)
|
| 108 |
+
class_counts[cat_id] = class_counts.get(cat_id, 0) + 1
|
| 109 |
+
|
| 110 |
+
# Calculate mean IoU for each class
|
| 111 |
+
mean_iou_per_class = {cat_id: np.mean(iou_scores[cat_id]) for cat_id in iou_scores}
|
| 112 |
+
mean_biou_per_class = {cat_id: np.mean(biou_scores[cat_id]) for cat_id in biou_scores}
|
| 113 |
+
|
| 114 |
+
# Calculate overall mean IoU
|
| 115 |
+
overall_mean_iou = np.mean(list(mean_iou_per_class.values()))
|
| 116 |
+
overall_mean_biou = np.mean(list(mean_biou_per_class.values()))
|
| 117 |
+
|
| 118 |
+
print("Mean IoU per class:", mean_iou_per_class)
|
| 119 |
+
print("Mean Boundary IoU per class:", mean_biou_per_class)
|
| 120 |
+
print("Overall Mean IoU:", overall_mean_iou)
|
| 121 |
+
print("Overall Boundary Mean IoU:", overall_mean_biou)
|
gaussian-grouping/script/prepare_pseudo_label.sh
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Check if the user provided an argument
|
| 5 |
+
if [ "$#" -ne 2 ]; then
|
| 6 |
+
echo "Usage: $0 <dataset_name>"
|
| 7 |
+
exit 1
|
| 8 |
+
fi
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
dataset_name="$1"
|
| 12 |
+
scale="$2"
|
| 13 |
+
dataset_folder="data/$dataset_name"
|
| 14 |
+
|
| 15 |
+
if [ ! -d "$dataset_folder" ]; then
|
| 16 |
+
echo "Error: Folder '$dataset_folder' does not exist."
|
| 17 |
+
exit 2
|
| 18 |
+
fi
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# 1. DEVA anything mask
|
| 23 |
+
cd Tracking-Anything-with-DEVA/
|
| 24 |
+
|
| 25 |
+
if [ "$scale" = "1" ]; then
|
| 26 |
+
img_path="../data/${dataset_name}/images"
|
| 27 |
+
else
|
| 28 |
+
img_path="../data/${dataset_name}/images_${scale}"
|
| 29 |
+
fi
|
| 30 |
+
|
| 31 |
+
# colored mask for visualization check
|
| 32 |
+
# ori: --size 480 \
|
| 33 |
+
# SAM_NUM_POINTS_PER_BATCH=1, chunk_size=1, SAM_NUM_POINTS_PER_SIDE=32
|
| 34 |
+
python demo/demo_automatic.py \
|
| 35 |
+
--chunk_size 4 \
|
| 36 |
+
--img_path "$img_path" \
|
| 37 |
+
--amp \
|
| 38 |
+
--temporal_setting semionline \
|
| 39 |
+
--SAM_NUM_POINTS_PER_BATCH 4 \
|
| 40 |
+
--size 480 \
|
| 41 |
+
--output "./example/output_gaussian_dataset/${dataset_name}" \
|
| 42 |
+
--suppress_small_objects \
|
| 43 |
+
--SAM_PRED_IOU_THRESHOLD 0.7 \
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
mv ./example/output_gaussian_dataset/${dataset_name}/Annotations ./example/output_gaussian_dataset/${dataset_name}/Annotations_color
|
| 47 |
+
|
| 48 |
+
# gray mask for training
|
| 49 |
+
python demo/demo_automatic.py \
|
| 50 |
+
--chunk_size 4 \
|
| 51 |
+
--img_path "$img_path" \
|
| 52 |
+
--amp \
|
| 53 |
+
--temporal_setting semionline \
|
| 54 |
+
--SAM_NUM_POINTS_PER_BATCH 4 \
|
| 55 |
+
--size 480 \
|
| 56 |
+
--output "./example/output_gaussian_dataset/${dataset_name}" \
|
| 57 |
+
--use_short_id \
|
| 58 |
+
--suppress_small_objects \
|
| 59 |
+
--SAM_PRED_IOU_THRESHOLD 0.7 \
|
| 60 |
+
|
| 61 |
+
# 2. copy gray mask to the correponding data path
|
| 62 |
+
cp -r ./example/output_gaussian_dataset/${dataset_name}/Annotations ../data/${dataset_name}/object_mask
|
| 63 |
+
cd ..
|
gaussian-grouping/script/prepare_pseudo_label_with_text.sh
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Check if the user provided an argument
|
| 5 |
+
if [ "$#" -ne 2 ]; then
|
| 6 |
+
echo "Usage: $0 <dataset_name>"
|
| 7 |
+
exit 1
|
| 8 |
+
fi
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
dataset_name="$1"
|
| 12 |
+
prompt="$2"
|
| 13 |
+
|
| 14 |
+
scale=1
|
| 15 |
+
dataset_folder="data/$dataset_name"
|
| 16 |
+
|
| 17 |
+
if [ ! -d "$dataset_folder" ]; then
|
| 18 |
+
echo "Error: Folder '$dataset_folder' does not exist."
|
| 19 |
+
exit 2
|
| 20 |
+
fi
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# 1. DEVA anything mask
|
| 25 |
+
cd Tracking-Anything-with-DEVA/
|
| 26 |
+
|
| 27 |
+
if [ "$scale" = "1" ]; then
|
| 28 |
+
img_path="../data/${dataset_name}/images"
|
| 29 |
+
else
|
| 30 |
+
img_path="../data/${dataset_name}/images_${scale}"
|
| 31 |
+
fi
|
| 32 |
+
|
| 33 |
+
# colored mask for visualization check
|
| 34 |
+
# ori: --size 480 \
|
| 35 |
+
# SAM_NUM_POINTS_PER_BATCH=1, chunk_size=1, SAM_NUM_POINTS_PER_SIDE=32
|
| 36 |
+
python demo/demo_with_text.py \
|
| 37 |
+
--chunk_size 4 \
|
| 38 |
+
--img_path "$img_path" \
|
| 39 |
+
--amp \
|
| 40 |
+
--temporal_setting semionline \
|
| 41 |
+
--SAM_NUM_POINTS_PER_BATCH 4 \
|
| 42 |
+
--size 480 \
|
| 43 |
+
--output "./example/output_gaussian_dataset/${dataset_name}" \
|
| 44 |
+
--DINO_THRESHOLD 0.40 \
|
| 45 |
+
--max_missed_detection_count 0 \
|
| 46 |
+
--prompt ${prompt}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# 2. copy gray mask to the correponding data path
|
| 50 |
+
rm ./example/output_gaussian_dataset/${dataset_name}/object_mask -r
|
| 51 |
+
|
| 52 |
+
cp -r ./example/output_gaussian_dataset/${dataset_name}/Annotations ./example/output_gaussian_dataset/${dataset_name}/object_mask
|
| 53 |
+
|
| 54 |
+
python get_gray_image.py --path ./example/output_gaussian_dataset/${dataset_name} --prompt ${prompt}
|
| 55 |
+
|
| 56 |
+
cp -r ./example/output_gaussian_dataset/${dataset_name}/object_mask ../data/${dataset_name}/
|
| 57 |
+
cd ..
|
gaussian-grouping/script/train.sh
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Check if the user provided an argument
|
| 4 |
+
if [ "$#" -ne 2 ]; then
|
| 5 |
+
echo "Usage: $0 <dataset_name>"
|
| 6 |
+
exit 1
|
| 7 |
+
fi
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dataset_name="$1"
|
| 11 |
+
scale="$2"
|
| 12 |
+
dataset_folder="data/$dataset_name"
|
| 13 |
+
|
| 14 |
+
if [ ! -d "$dataset_folder" ]; then
|
| 15 |
+
echo "Error: Folder '$dataset_folder' does not exist."
|
| 16 |
+
exit 2
|
| 17 |
+
fi
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Gaussian Grouping training
|
| 21 |
+
python train.py -s $dataset_folder -r ${scale} -m output/${dataset_name} --config_file config/gaussian_dataset/train.json
|
| 22 |
+
|
| 23 |
+
# Segmentation rendering using trained model
|
| 24 |
+
python render.py -m output/${dataset_name} --num_classes 256
|
gaussian-grouping/script/train_lerf.sh
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Check if the user provided an argument
|
| 4 |
+
if [ "$#" -ne 2 ]; then
|
| 5 |
+
echo "Usage: $0 <dataset_name>"
|
| 6 |
+
exit 1
|
| 7 |
+
fi
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dataset_name="$1"
|
| 11 |
+
scale="$2"
|
| 12 |
+
dataset_folder="data/$dataset_name"
|
| 13 |
+
|
| 14 |
+
if [ ! -d "$dataset_folder" ]; then
|
| 15 |
+
echo "Error: Folder '$dataset_folder' does not exist."
|
| 16 |
+
exit 2
|
| 17 |
+
fi
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Gaussian Grouping training
|
| 21 |
+
python train.py -s $dataset_folder -r ${scale} -m output/${dataset_name} --config_file config/gaussian_dataset/train.json --train_split
|
| 22 |
+
|
| 23 |
+
# Segmentation rendering using trained model
|
| 24 |
+
python render.py -m output/${dataset_name} --num_classes 256 --images images
|
gaussian-grouping/script/train_lerf_randominit.sh
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Check if the user provided an argument
|
| 4 |
+
if [ "$#" -ne 2 ]; then
|
| 5 |
+
echo "Usage: $0 <dataset_name>"
|
| 6 |
+
exit 1
|
| 7 |
+
fi
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
dataset_name="$1"
|
| 11 |
+
scale="$2"
|
| 12 |
+
dataset_folder="data/$dataset_name"
|
| 13 |
+
|
| 14 |
+
if [ ! -d "$dataset_folder" ]; then
|
| 15 |
+
echo "Error: Folder '$dataset_folder' does not exist."
|
| 16 |
+
exit 2
|
| 17 |
+
fi
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Gaussian Grouping training
|
| 21 |
+
python train.py -s $dataset_folder -r ${scale} -m output/${dataset_name} --config_file config/gaussian_dataset/train.json --train_split --random_init
|
| 22 |
+
|
| 23 |
+
# Segmentation rendering using trained model
|
| 24 |
+
python render.py -m output/${dataset_name} --num_classes 256 --images images
|
gaussian-grouping/submodules/diff-gaussian-rasterization/CMakeLists.txt
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2023, Inria
|
| 3 |
+
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This software is free for non-commercial, research and evaluation use
|
| 7 |
+
# under the terms of the LICENSE.md file.
|
| 8 |
+
#
|
| 9 |
+
# For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
cmake_minimum_required(VERSION 3.20)
|
| 13 |
+
|
| 14 |
+
project(DiffRast LANGUAGES CUDA CXX)
|
| 15 |
+
|
| 16 |
+
set(CMAKE_CXX_STANDARD 17)
|
| 17 |
+
set(CMAKE_CXX_EXTENSIONS OFF)
|
| 18 |
+
set(CMAKE_CUDA_STANDARD 17)
|
| 19 |
+
|
| 20 |
+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
| 21 |
+
|
| 22 |
+
add_library(CudaRasterizer
|
| 23 |
+
cuda_rasterizer/backward.h
|
| 24 |
+
cuda_rasterizer/backward.cu
|
| 25 |
+
cuda_rasterizer/forward.h
|
| 26 |
+
cuda_rasterizer/forward.cu
|
| 27 |
+
cuda_rasterizer/auxiliary.h
|
| 28 |
+
cuda_rasterizer/rasterizer_impl.cu
|
| 29 |
+
cuda_rasterizer/rasterizer_impl.h
|
| 30 |
+
cuda_rasterizer/rasterizer.h
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
set_target_properties(CudaRasterizer PROPERTIES CUDA_ARCHITECTURES "70;75;86")
|
| 34 |
+
|
| 35 |
+
target_include_directories(CudaRasterizer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cuda_rasterizer)
|
| 36 |
+
target_include_directories(CudaRasterizer PRIVATE third_party/glm ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
|
gaussian-grouping/submodules/diff-gaussian-rasterization/LICENSE.md
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Gaussian-Splatting License
|
| 2 |
+
===========================
|
| 3 |
+
|
| 4 |
+
**Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**.
|
| 5 |
+
The *Software* is in the process of being registered with the Agence pour la Protection des
|
| 6 |
+
Programmes (APP).
|
| 7 |
+
|
| 8 |
+
The *Software* is still being developed by the *Licensor*.
|
| 9 |
+
|
| 10 |
+
*Licensor*'s goal is to allow the research community to use, test and evaluate
|
| 11 |
+
the *Software*.
|
| 12 |
+
|
| 13 |
+
## 1. Definitions
|
| 14 |
+
|
| 15 |
+
*Licensee* means any person or entity that uses the *Software* and distributes
|
| 16 |
+
its *Work*.
|
| 17 |
+
|
| 18 |
+
*Licensor* means the owners of the *Software*, i.e Inria and MPII
|
| 19 |
+
|
| 20 |
+
*Software* means the original work of authorship made available under this
|
| 21 |
+
License ie gaussian-splatting.
|
| 22 |
+
|
| 23 |
+
*Work* means the *Software* and any additions to or derivative works of the
|
| 24 |
+
*Software* that are made available under this License.
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
## 2. Purpose
|
| 28 |
+
This license is intended to define the rights granted to the *Licensee* by
|
| 29 |
+
Licensors under the *Software*.
|
| 30 |
+
|
| 31 |
+
## 3. Rights granted
|
| 32 |
+
|
| 33 |
+
For the above reasons Licensors have decided to distribute the *Software*.
|
| 34 |
+
Licensors grant non-exclusive rights to use the *Software* for research purposes
|
| 35 |
+
to research users (both academic and industrial), free of charge, without right
|
| 36 |
+
to sublicense.. The *Software* may be used "non-commercially", i.e., for research
|
| 37 |
+
and/or evaluation purposes only.
|
| 38 |
+
|
| 39 |
+
Subject to the terms and conditions of this License, you are granted a
|
| 40 |
+
non-exclusive, royalty-free, license to reproduce, prepare derivative works of,
|
| 41 |
+
publicly display, publicly perform and distribute its *Work* and any resulting
|
| 42 |
+
derivative works in any form.
|
| 43 |
+
|
| 44 |
+
## 4. Limitations
|
| 45 |
+
|
| 46 |
+
**4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do
|
| 47 |
+
so under this License, (b) you include a complete copy of this License with
|
| 48 |
+
your distribution, and (c) you retain without modification any copyright,
|
| 49 |
+
patent, trademark, or attribution notices that are present in the *Work*.
|
| 50 |
+
|
| 51 |
+
**4.2 Derivative Works.** You may specify that additional or different terms apply
|
| 52 |
+
to the use, reproduction, and distribution of your derivative works of the *Work*
|
| 53 |
+
("Your Terms") only if (a) Your Terms provide that the use limitation in
|
| 54 |
+
Section 2 applies to your derivative works, and (b) you identify the specific
|
| 55 |
+
derivative works that are subject to Your Terms. Notwithstanding Your Terms,
|
| 56 |
+
this License (including the redistribution requirements in Section 3.1) will
|
| 57 |
+
continue to apply to the *Work* itself.
|
| 58 |
+
|
| 59 |
+
**4.3** Any other use without of prior consent of Licensors is prohibited. Research
|
| 60 |
+
users explicitly acknowledge having received from Licensors all information
|
| 61 |
+
allowing to appreciate the adequacy between of the *Software* and their needs and
|
| 62 |
+
to undertake all necessary precautions for its execution and use.
|
| 63 |
+
|
| 64 |
+
**4.4** The *Software* is provided both as a compiled library file and as source
|
| 65 |
+
code. In case of using the *Software* for a publication or other results obtained
|
| 66 |
+
through the use of the *Software*, users are strongly encouraged to cite the
|
| 67 |
+
corresponding publications as explained in the documentation of the *Software*.
|
| 68 |
+
|
| 69 |
+
## 5. Disclaimer
|
| 70 |
+
|
| 71 |
+
THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES
|
| 72 |
+
WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY
|
| 73 |
+
UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL
|
| 74 |
+
CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES
|
| 75 |
+
OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL
|
| 76 |
+
USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR
|
| 77 |
+
ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE
|
| 78 |
+
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 79 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
| 80 |
+
GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION)
|
| 81 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
| 82 |
+
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR
|
| 83 |
+
IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*.
|
gaussian-grouping/submodules/diff-gaussian-rasterization/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Differential Gaussian Rasterization
|
| 2 |
+
|
| 3 |
+
Modified by Gaussian-Grouping https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
|
| 5 |
+
Used as the rasterization engine for the paper "3D Gaussian Splatting for Real-Time Rendering of Radiance Fields". If you can make use of it in your own research, please be so kind to cite us.
|
| 6 |
+
|
| 7 |
+
<section class="section" id="BibTeX">
|
| 8 |
+
<div class="container is-max-desktop content">
|
| 9 |
+
<h2 class="title">BibTeX</h2>
|
| 10 |
+
<pre><code>@Article{kerbl3Dgaussians,
|
| 11 |
+
author = {Kerbl, Bernhard and Kopanas, Georgios and Leimk{\"u}hler, Thomas and Drettakis, George},
|
| 12 |
+
title = {3D Gaussian Splatting for Real-Time Radiance Field Rendering},
|
| 13 |
+
journal = {ACM Transactions on Graphics},
|
| 14 |
+
number = {4},
|
| 15 |
+
volume = {42},
|
| 16 |
+
month = {July},
|
| 17 |
+
year = {2023},
|
| 18 |
+
url = {https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/}
|
| 19 |
+
}</code></pre>
|
| 20 |
+
</div>
|
| 21 |
+
</section>
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/auxiliary.h
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Inria
|
| 3 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* This software is free for non-commercial, research and evaluation use
|
| 7 |
+
* under the terms of the LICENSE.md file.
|
| 8 |
+
*
|
| 9 |
+
* For inquiries contact george.drettakis@inria.fr
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#ifndef CUDA_RASTERIZER_AUXILIARY_H_INCLUDED
|
| 13 |
+
#define CUDA_RASTERIZER_AUXILIARY_H_INCLUDED
|
| 14 |
+
|
| 15 |
+
#include "config.h"
|
| 16 |
+
#include "stdio.h"
|
| 17 |
+
|
| 18 |
+
#define BLOCK_SIZE (BLOCK_X * BLOCK_Y)
|
| 19 |
+
#define NUM_WARPS (BLOCK_SIZE/32)
|
| 20 |
+
|
| 21 |
+
// Spherical harmonics coefficients
|
| 22 |
+
__device__ const float SH_C0 = 0.28209479177387814f;
|
| 23 |
+
__device__ const float SH_C1 = 0.4886025119029199f;
|
| 24 |
+
__device__ const float SH_C2[] = {
|
| 25 |
+
1.0925484305920792f,
|
| 26 |
+
-1.0925484305920792f,
|
| 27 |
+
0.31539156525252005f,
|
| 28 |
+
-1.0925484305920792f,
|
| 29 |
+
0.5462742152960396f
|
| 30 |
+
};
|
| 31 |
+
__device__ const float SH_C3[] = {
|
| 32 |
+
-0.5900435899266435f,
|
| 33 |
+
2.890611442640554f,
|
| 34 |
+
-0.4570457994644658f,
|
| 35 |
+
0.3731763325901154f,
|
| 36 |
+
-0.4570457994644658f,
|
| 37 |
+
1.445305721320277f,
|
| 38 |
+
-0.5900435899266435f
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
__forceinline__ __device__ float ndc2Pix(float v, int S)
|
| 42 |
+
{
|
| 43 |
+
return ((v + 1.0) * S - 1.0) * 0.5;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
__forceinline__ __device__ void getRect(const float2 p, int max_radius, uint2& rect_min, uint2& rect_max, dim3 grid)
|
| 47 |
+
{
|
| 48 |
+
rect_min = {
|
| 49 |
+
min(grid.x, max((int)0, (int)((p.x - max_radius) / BLOCK_X))),
|
| 50 |
+
min(grid.y, max((int)0, (int)((p.y - max_radius) / BLOCK_Y)))
|
| 51 |
+
};
|
| 52 |
+
rect_max = {
|
| 53 |
+
min(grid.x, max((int)0, (int)((p.x + max_radius + BLOCK_X - 1) / BLOCK_X))),
|
| 54 |
+
min(grid.y, max((int)0, (int)((p.y + max_radius + BLOCK_Y - 1) / BLOCK_Y)))
|
| 55 |
+
};
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
__forceinline__ __device__ float3 transformPoint4x3(const float3& p, const float* matrix)
|
| 59 |
+
{
|
| 60 |
+
float3 transformed = {
|
| 61 |
+
matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z + matrix[12],
|
| 62 |
+
matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z + matrix[13],
|
| 63 |
+
matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z + matrix[14],
|
| 64 |
+
};
|
| 65 |
+
return transformed;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
__forceinline__ __device__ float4 transformPoint4x4(const float3& p, const float* matrix)
|
| 69 |
+
{
|
| 70 |
+
float4 transformed = {
|
| 71 |
+
matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z + matrix[12],
|
| 72 |
+
matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z + matrix[13],
|
| 73 |
+
matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z + matrix[14],
|
| 74 |
+
matrix[3] * p.x + matrix[7] * p.y + matrix[11] * p.z + matrix[15]
|
| 75 |
+
};
|
| 76 |
+
return transformed;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
__forceinline__ __device__ float3 transformVec4x3(const float3& p, const float* matrix)
|
| 80 |
+
{
|
| 81 |
+
float3 transformed = {
|
| 82 |
+
matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z,
|
| 83 |
+
matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z,
|
| 84 |
+
matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z,
|
| 85 |
+
};
|
| 86 |
+
return transformed;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
__forceinline__ __device__ float3 transformVec4x3Transpose(const float3& p, const float* matrix)
|
| 90 |
+
{
|
| 91 |
+
float3 transformed = {
|
| 92 |
+
matrix[0] * p.x + matrix[1] * p.y + matrix[2] * p.z,
|
| 93 |
+
matrix[4] * p.x + matrix[5] * p.y + matrix[6] * p.z,
|
| 94 |
+
matrix[8] * p.x + matrix[9] * p.y + matrix[10] * p.z,
|
| 95 |
+
};
|
| 96 |
+
return transformed;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
__forceinline__ __device__ float dnormvdz(float3 v, float3 dv)
|
| 100 |
+
{
|
| 101 |
+
float sum2 = v.x * v.x + v.y * v.y + v.z * v.z;
|
| 102 |
+
float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2);
|
| 103 |
+
float dnormvdz = (-v.x * v.z * dv.x - v.y * v.z * dv.y + (sum2 - v.z * v.z) * dv.z) * invsum32;
|
| 104 |
+
return dnormvdz;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
__forceinline__ __device__ float3 dnormvdv(float3 v, float3 dv)
|
| 108 |
+
{
|
| 109 |
+
float sum2 = v.x * v.x + v.y * v.y + v.z * v.z;
|
| 110 |
+
float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2);
|
| 111 |
+
|
| 112 |
+
float3 dnormvdv;
|
| 113 |
+
dnormvdv.x = ((+sum2 - v.x * v.x) * dv.x - v.y * v.x * dv.y - v.z * v.x * dv.z) * invsum32;
|
| 114 |
+
dnormvdv.y = (-v.x * v.y * dv.x + (sum2 - v.y * v.y) * dv.y - v.z * v.y * dv.z) * invsum32;
|
| 115 |
+
dnormvdv.z = (-v.x * v.z * dv.x - v.y * v.z * dv.y + (sum2 - v.z * v.z) * dv.z) * invsum32;
|
| 116 |
+
return dnormvdv;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
__forceinline__ __device__ float4 dnormvdv(float4 v, float4 dv)
|
| 120 |
+
{
|
| 121 |
+
float sum2 = v.x * v.x + v.y * v.y + v.z * v.z + v.w * v.w;
|
| 122 |
+
float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2);
|
| 123 |
+
|
| 124 |
+
float4 vdv = { v.x * dv.x, v.y * dv.y, v.z * dv.z, v.w * dv.w };
|
| 125 |
+
float vdv_sum = vdv.x + vdv.y + vdv.z + vdv.w;
|
| 126 |
+
float4 dnormvdv;
|
| 127 |
+
dnormvdv.x = ((sum2 - v.x * v.x) * dv.x - v.x * (vdv_sum - vdv.x)) * invsum32;
|
| 128 |
+
dnormvdv.y = ((sum2 - v.y * v.y) * dv.y - v.y * (vdv_sum - vdv.y)) * invsum32;
|
| 129 |
+
dnormvdv.z = ((sum2 - v.z * v.z) * dv.z - v.z * (vdv_sum - vdv.z)) * invsum32;
|
| 130 |
+
dnormvdv.w = ((sum2 - v.w * v.w) * dv.w - v.w * (vdv_sum - vdv.w)) * invsum32;
|
| 131 |
+
return dnormvdv;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
__forceinline__ __device__ float sigmoid(float x)
|
| 135 |
+
{
|
| 136 |
+
return 1.0f / (1.0f + expf(-x));
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
__forceinline__ __device__ bool in_frustum(int idx,
|
| 140 |
+
const float* orig_points,
|
| 141 |
+
const float* viewmatrix,
|
| 142 |
+
const float* projmatrix,
|
| 143 |
+
bool prefiltered,
|
| 144 |
+
float3& p_view)
|
| 145 |
+
{
|
| 146 |
+
float3 p_orig = { orig_points[3 * idx], orig_points[3 * idx + 1], orig_points[3 * idx + 2] };
|
| 147 |
+
|
| 148 |
+
// Bring points to screen space
|
| 149 |
+
float4 p_hom = transformPoint4x4(p_orig, projmatrix);
|
| 150 |
+
float p_w = 1.0f / (p_hom.w + 0.0000001f);
|
| 151 |
+
float3 p_proj = { p_hom.x * p_w, p_hom.y * p_w, p_hom.z * p_w };
|
| 152 |
+
p_view = transformPoint4x3(p_orig, viewmatrix);
|
| 153 |
+
|
| 154 |
+
if (p_view.z <= 0.2f)// || ((p_proj.x < -1.3 || p_proj.x > 1.3 || p_proj.y < -1.3 || p_proj.y > 1.3)))
|
| 155 |
+
{
|
| 156 |
+
if (prefiltered)
|
| 157 |
+
{
|
| 158 |
+
printf("Point is filtered although prefiltered is set. This shouldn't happen!");
|
| 159 |
+
__trap();
|
| 160 |
+
}
|
| 161 |
+
return false;
|
| 162 |
+
}
|
| 163 |
+
return true;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
#define CHECK_CUDA(A, debug) \
|
| 167 |
+
A; if(debug) { \
|
| 168 |
+
auto ret = cudaDeviceSynchronize(); \
|
| 169 |
+
if (ret != cudaSuccess) { \
|
| 170 |
+
std::cerr << "\n[CUDA ERROR] in " << __FILE__ << "\nLine " << __LINE__ << ": " << cudaGetErrorString(ret); \
|
| 171 |
+
throw std::runtime_error(cudaGetErrorString(ret)); \
|
| 172 |
+
} \
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
#endif
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu
ADDED
|
@@ -0,0 +1,686 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "backward.h"
|
| 11 |
+
#include "auxiliary.h"
|
| 12 |
+
#include <cooperative_groups.h>
|
| 13 |
+
#include <cooperative_groups/reduce.h>
|
| 14 |
+
namespace cg = cooperative_groups;
|
| 15 |
+
|
| 16 |
+
// Backward pass for conversion of spherical harmonics to RGB for
|
| 17 |
+
// each Gaussian.
|
| 18 |
+
__device__ void computeColorFromSH(int idx, int deg, int max_coeffs, const glm::vec3* means, glm::vec3 campos, const float* shs, const bool* clamped, const glm::vec3* dL_dcolor, glm::vec3* dL_dmeans, glm::vec3* dL_dshs)
|
| 19 |
+
{
|
| 20 |
+
// Compute intermediate values, as it is done during forward
|
| 21 |
+
glm::vec3 pos = means[idx];
|
| 22 |
+
glm::vec3 dir_orig = pos - campos;
|
| 23 |
+
glm::vec3 dir = dir_orig / glm::length(dir_orig);
|
| 24 |
+
|
| 25 |
+
glm::vec3* sh = ((glm::vec3*)shs) + idx * max_coeffs;
|
| 26 |
+
|
| 27 |
+
// Use PyTorch rule for clamping: if clamping was applied,
|
| 28 |
+
// gradient becomes 0.
|
| 29 |
+
glm::vec3 dL_dRGB = dL_dcolor[idx];
|
| 30 |
+
dL_dRGB.x *= clamped[3 * idx + 0] ? 0 : 1;
|
| 31 |
+
dL_dRGB.y *= clamped[3 * idx + 1] ? 0 : 1;
|
| 32 |
+
dL_dRGB.z *= clamped[3 * idx + 2] ? 0 : 1;
|
| 33 |
+
|
| 34 |
+
glm::vec3 dRGBdx(0, 0, 0);
|
| 35 |
+
glm::vec3 dRGBdy(0, 0, 0);
|
| 36 |
+
glm::vec3 dRGBdz(0, 0, 0);
|
| 37 |
+
float x = dir.x;
|
| 38 |
+
float y = dir.y;
|
| 39 |
+
float z = dir.z;
|
| 40 |
+
|
| 41 |
+
// Target location for this Gaussian to write SH gradients to
|
| 42 |
+
glm::vec3* dL_dsh = dL_dshs + idx * max_coeffs;
|
| 43 |
+
|
| 44 |
+
// No tricks here, just high school-level calculus.
|
| 45 |
+
float dRGBdsh0 = SH_C0;
|
| 46 |
+
dL_dsh[0] = dRGBdsh0 * dL_dRGB;
|
| 47 |
+
if (deg > 0)
|
| 48 |
+
{
|
| 49 |
+
float dRGBdsh1 = -SH_C1 * y;
|
| 50 |
+
float dRGBdsh2 = SH_C1 * z;
|
| 51 |
+
float dRGBdsh3 = -SH_C1 * x;
|
| 52 |
+
dL_dsh[1] = dRGBdsh1 * dL_dRGB;
|
| 53 |
+
dL_dsh[2] = dRGBdsh2 * dL_dRGB;
|
| 54 |
+
dL_dsh[3] = dRGBdsh3 * dL_dRGB;
|
| 55 |
+
|
| 56 |
+
dRGBdx = -SH_C1 * sh[3];
|
| 57 |
+
dRGBdy = -SH_C1 * sh[1];
|
| 58 |
+
dRGBdz = SH_C1 * sh[2];
|
| 59 |
+
|
| 60 |
+
if (deg > 1)
|
| 61 |
+
{
|
| 62 |
+
float xx = x * x, yy = y * y, zz = z * z;
|
| 63 |
+
float xy = x * y, yz = y * z, xz = x * z;
|
| 64 |
+
|
| 65 |
+
float dRGBdsh4 = SH_C2[0] * xy;
|
| 66 |
+
float dRGBdsh5 = SH_C2[1] * yz;
|
| 67 |
+
float dRGBdsh6 = SH_C2[2] * (2.f * zz - xx - yy);
|
| 68 |
+
float dRGBdsh7 = SH_C2[3] * xz;
|
| 69 |
+
float dRGBdsh8 = SH_C2[4] * (xx - yy);
|
| 70 |
+
dL_dsh[4] = dRGBdsh4 * dL_dRGB;
|
| 71 |
+
dL_dsh[5] = dRGBdsh5 * dL_dRGB;
|
| 72 |
+
dL_dsh[6] = dRGBdsh6 * dL_dRGB;
|
| 73 |
+
dL_dsh[7] = dRGBdsh7 * dL_dRGB;
|
| 74 |
+
dL_dsh[8] = dRGBdsh8 * dL_dRGB;
|
| 75 |
+
|
| 76 |
+
dRGBdx += SH_C2[0] * y * sh[4] + SH_C2[2] * 2.f * -x * sh[6] + SH_C2[3] * z * sh[7] + SH_C2[4] * 2.f * x * sh[8];
|
| 77 |
+
dRGBdy += SH_C2[0] * x * sh[4] + SH_C2[1] * z * sh[5] + SH_C2[2] * 2.f * -y * sh[6] + SH_C2[4] * 2.f * -y * sh[8];
|
| 78 |
+
dRGBdz += SH_C2[1] * y * sh[5] + SH_C2[2] * 2.f * 2.f * z * sh[6] + SH_C2[3] * x * sh[7];
|
| 79 |
+
|
| 80 |
+
if (deg > 2)
|
| 81 |
+
{
|
| 82 |
+
float dRGBdsh9 = SH_C3[0] * y * (3.f * xx - yy);
|
| 83 |
+
float dRGBdsh10 = SH_C3[1] * xy * z;
|
| 84 |
+
float dRGBdsh11 = SH_C3[2] * y * (4.f * zz - xx - yy);
|
| 85 |
+
float dRGBdsh12 = SH_C3[3] * z * (2.f * zz - 3.f * xx - 3.f * yy);
|
| 86 |
+
float dRGBdsh13 = SH_C3[4] * x * (4.f * zz - xx - yy);
|
| 87 |
+
float dRGBdsh14 = SH_C3[5] * z * (xx - yy);
|
| 88 |
+
float dRGBdsh15 = SH_C3[6] * x * (xx - 3.f * yy);
|
| 89 |
+
dL_dsh[9] = dRGBdsh9 * dL_dRGB;
|
| 90 |
+
dL_dsh[10] = dRGBdsh10 * dL_dRGB;
|
| 91 |
+
dL_dsh[11] = dRGBdsh11 * dL_dRGB;
|
| 92 |
+
dL_dsh[12] = dRGBdsh12 * dL_dRGB;
|
| 93 |
+
dL_dsh[13] = dRGBdsh13 * dL_dRGB;
|
| 94 |
+
dL_dsh[14] = dRGBdsh14 * dL_dRGB;
|
| 95 |
+
dL_dsh[15] = dRGBdsh15 * dL_dRGB;
|
| 96 |
+
|
| 97 |
+
dRGBdx += (
|
| 98 |
+
SH_C3[0] * sh[9] * 3.f * 2.f * xy +
|
| 99 |
+
SH_C3[1] * sh[10] * yz +
|
| 100 |
+
SH_C3[2] * sh[11] * -2.f * xy +
|
| 101 |
+
SH_C3[3] * sh[12] * -3.f * 2.f * xz +
|
| 102 |
+
SH_C3[4] * sh[13] * (-3.f * xx + 4.f * zz - yy) +
|
| 103 |
+
SH_C3[5] * sh[14] * 2.f * xz +
|
| 104 |
+
SH_C3[6] * sh[15] * 3.f * (xx - yy));
|
| 105 |
+
|
| 106 |
+
dRGBdy += (
|
| 107 |
+
SH_C3[0] * sh[9] * 3.f * (xx - yy) +
|
| 108 |
+
SH_C3[1] * sh[10] * xz +
|
| 109 |
+
SH_C3[2] * sh[11] * (-3.f * yy + 4.f * zz - xx) +
|
| 110 |
+
SH_C3[3] * sh[12] * -3.f * 2.f * yz +
|
| 111 |
+
SH_C3[4] * sh[13] * -2.f * xy +
|
| 112 |
+
SH_C3[5] * sh[14] * -2.f * yz +
|
| 113 |
+
SH_C3[6] * sh[15] * -3.f * 2.f * xy);
|
| 114 |
+
|
| 115 |
+
dRGBdz += (
|
| 116 |
+
SH_C3[1] * sh[10] * xy +
|
| 117 |
+
SH_C3[2] * sh[11] * 4.f * 2.f * yz +
|
| 118 |
+
SH_C3[3] * sh[12] * 3.f * (2.f * zz - xx - yy) +
|
| 119 |
+
SH_C3[4] * sh[13] * 4.f * 2.f * xz +
|
| 120 |
+
SH_C3[5] * sh[14] * (xx - yy));
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
// The view direction is an input to the computation. View direction
|
| 126 |
+
// is influenced by the Gaussian's mean, so SHs gradients
|
| 127 |
+
// must propagate back into 3D position.
|
| 128 |
+
glm::vec3 dL_ddir(glm::dot(dRGBdx, dL_dRGB), glm::dot(dRGBdy, dL_dRGB), glm::dot(dRGBdz, dL_dRGB));
|
| 129 |
+
|
| 130 |
+
// Account for normalization of direction
|
| 131 |
+
float3 dL_dmean = dnormvdv(float3{ dir_orig.x, dir_orig.y, dir_orig.z }, float3{ dL_ddir.x, dL_ddir.y, dL_ddir.z });
|
| 132 |
+
|
| 133 |
+
// Gradients of loss w.r.t. Gaussian means, but only the portion
|
| 134 |
+
// that is caused because the mean affects the view-dependent color.
|
| 135 |
+
// Additional mean gradient is accumulated in below methods.
|
| 136 |
+
dL_dmeans[idx] += glm::vec3(dL_dmean.x, dL_dmean.y, dL_dmean.z);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
// Backward version of INVERSE 2D covariance matrix computation
|
| 140 |
+
// (due to length launched as separate kernel before other
|
| 141 |
+
// backward steps contained in preprocess)
|
| 142 |
+
__global__ void computeCov2DCUDA(int P,
|
| 143 |
+
const float3* means,
|
| 144 |
+
const int* radii,
|
| 145 |
+
const float* cov3Ds,
|
| 146 |
+
const float h_x, float h_y,
|
| 147 |
+
const float tan_fovx, float tan_fovy,
|
| 148 |
+
const float* view_matrix,
|
| 149 |
+
const float* dL_dconics,
|
| 150 |
+
float3* dL_dmeans,
|
| 151 |
+
float* dL_dcov)
|
| 152 |
+
{
|
| 153 |
+
auto idx = cg::this_grid().thread_rank();
|
| 154 |
+
if (idx >= P || !(radii[idx] > 0))
|
| 155 |
+
return;
|
| 156 |
+
|
| 157 |
+
// Reading location of 3D covariance for this Gaussian
|
| 158 |
+
const float* cov3D = cov3Ds + 6 * idx;
|
| 159 |
+
|
| 160 |
+
// Fetch gradients, recompute 2D covariance and relevant
|
| 161 |
+
// intermediate forward results needed in the backward.
|
| 162 |
+
float3 mean = means[idx];
|
| 163 |
+
float3 dL_dconic = { dL_dconics[4 * idx], dL_dconics[4 * idx + 1], dL_dconics[4 * idx + 3] };
|
| 164 |
+
float3 t = transformPoint4x3(mean, view_matrix);
|
| 165 |
+
|
| 166 |
+
const float limx = 1.3f * tan_fovx;
|
| 167 |
+
const float limy = 1.3f * tan_fovy;
|
| 168 |
+
const float txtz = t.x / t.z;
|
| 169 |
+
const float tytz = t.y / t.z;
|
| 170 |
+
t.x = min(limx, max(-limx, txtz)) * t.z;
|
| 171 |
+
t.y = min(limy, max(-limy, tytz)) * t.z;
|
| 172 |
+
|
| 173 |
+
const float x_grad_mul = txtz < -limx || txtz > limx ? 0 : 1;
|
| 174 |
+
const float y_grad_mul = tytz < -limy || tytz > limy ? 0 : 1;
|
| 175 |
+
|
| 176 |
+
glm::mat3 J = glm::mat3(h_x / t.z, 0.0f, -(h_x * t.x) / (t.z * t.z),
|
| 177 |
+
0.0f, h_y / t.z, -(h_y * t.y) / (t.z * t.z),
|
| 178 |
+
0, 0, 0);
|
| 179 |
+
|
| 180 |
+
glm::mat3 W = glm::mat3(
|
| 181 |
+
view_matrix[0], view_matrix[4], view_matrix[8],
|
| 182 |
+
view_matrix[1], view_matrix[5], view_matrix[9],
|
| 183 |
+
view_matrix[2], view_matrix[6], view_matrix[10]);
|
| 184 |
+
|
| 185 |
+
glm::mat3 Vrk = glm::mat3(
|
| 186 |
+
cov3D[0], cov3D[1], cov3D[2],
|
| 187 |
+
cov3D[1], cov3D[3], cov3D[4],
|
| 188 |
+
cov3D[2], cov3D[4], cov3D[5]);
|
| 189 |
+
|
| 190 |
+
glm::mat3 T = W * J;
|
| 191 |
+
|
| 192 |
+
glm::mat3 cov2D = glm::transpose(T) * glm::transpose(Vrk) * T;
|
| 193 |
+
|
| 194 |
+
// Use helper variables for 2D covariance entries. More compact.
|
| 195 |
+
float a = cov2D[0][0] += 0.3f;
|
| 196 |
+
float b = cov2D[0][1];
|
| 197 |
+
float c = cov2D[1][1] += 0.3f;
|
| 198 |
+
|
| 199 |
+
float denom = a * c - b * b;
|
| 200 |
+
float dL_da = 0, dL_db = 0, dL_dc = 0;
|
| 201 |
+
float denom2inv = 1.0f / ((denom * denom) + 0.0000001f);
|
| 202 |
+
|
| 203 |
+
if (denom2inv != 0)
|
| 204 |
+
{
|
| 205 |
+
// Gradients of loss w.r.t. entries of 2D covariance matrix,
|
| 206 |
+
// given gradients of loss w.r.t. conic matrix (inverse covariance matrix).
|
| 207 |
+
// e.g., dL / da = dL / d_conic_a * d_conic_a / d_a
|
| 208 |
+
dL_da = denom2inv * (-c * c * dL_dconic.x + 2 * b * c * dL_dconic.y + (denom - a * c) * dL_dconic.z);
|
| 209 |
+
dL_dc = denom2inv * (-a * a * dL_dconic.z + 2 * a * b * dL_dconic.y + (denom - a * c) * dL_dconic.x);
|
| 210 |
+
dL_db = denom2inv * 2 * (b * c * dL_dconic.x - (denom + 2 * b * b) * dL_dconic.y + a * b * dL_dconic.z);
|
| 211 |
+
|
| 212 |
+
// Gradients of loss L w.r.t. each 3D covariance matrix (Vrk) entry,
|
| 213 |
+
// given gradients w.r.t. 2D covariance matrix (diagonal).
|
| 214 |
+
// cov2D = transpose(T) * transpose(Vrk) * T;
|
| 215 |
+
dL_dcov[6 * idx + 0] = (T[0][0] * T[0][0] * dL_da + T[0][0] * T[1][0] * dL_db + T[1][0] * T[1][0] * dL_dc);
|
| 216 |
+
dL_dcov[6 * idx + 3] = (T[0][1] * T[0][1] * dL_da + T[0][1] * T[1][1] * dL_db + T[1][1] * T[1][1] * dL_dc);
|
| 217 |
+
dL_dcov[6 * idx + 5] = (T[0][2] * T[0][2] * dL_da + T[0][2] * T[1][2] * dL_db + T[1][2] * T[1][2] * dL_dc);
|
| 218 |
+
|
| 219 |
+
// Gradients of loss L w.r.t. each 3D covariance matrix (Vrk) entry,
|
| 220 |
+
// given gradients w.r.t. 2D covariance matrix (off-diagonal).
|
| 221 |
+
// Off-diagonal elements appear twice --> double the gradient.
|
| 222 |
+
// cov2D = transpose(T) * transpose(Vrk) * T;
|
| 223 |
+
dL_dcov[6 * idx + 1] = 2 * T[0][0] * T[0][1] * dL_da + (T[0][0] * T[1][1] + T[0][1] * T[1][0]) * dL_db + 2 * T[1][0] * T[1][1] * dL_dc;
|
| 224 |
+
dL_dcov[6 * idx + 2] = 2 * T[0][0] * T[0][2] * dL_da + (T[0][0] * T[1][2] + T[0][2] * T[1][0]) * dL_db + 2 * T[1][0] * T[1][2] * dL_dc;
|
| 225 |
+
dL_dcov[6 * idx + 4] = 2 * T[0][2] * T[0][1] * dL_da + (T[0][1] * T[1][2] + T[0][2] * T[1][1]) * dL_db + 2 * T[1][1] * T[1][2] * dL_dc;
|
| 226 |
+
}
|
| 227 |
+
else
|
| 228 |
+
{
|
| 229 |
+
for (int i = 0; i < 6; i++)
|
| 230 |
+
dL_dcov[6 * idx + i] = 0;
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
// Gradients of loss w.r.t. upper 2x3 portion of intermediate matrix T
|
| 234 |
+
// cov2D = transpose(T) * transpose(Vrk) * T;
|
| 235 |
+
float dL_dT00 = 2 * (T[0][0] * Vrk[0][0] + T[0][1] * Vrk[0][1] + T[0][2] * Vrk[0][2]) * dL_da +
|
| 236 |
+
(T[1][0] * Vrk[0][0] + T[1][1] * Vrk[0][1] + T[1][2] * Vrk[0][2]) * dL_db;
|
| 237 |
+
float dL_dT01 = 2 * (T[0][0] * Vrk[1][0] + T[0][1] * Vrk[1][1] + T[0][2] * Vrk[1][2]) * dL_da +
|
| 238 |
+
(T[1][0] * Vrk[1][0] + T[1][1] * Vrk[1][1] + T[1][2] * Vrk[1][2]) * dL_db;
|
| 239 |
+
float dL_dT02 = 2 * (T[0][0] * Vrk[2][0] + T[0][1] * Vrk[2][1] + T[0][2] * Vrk[2][2]) * dL_da +
|
| 240 |
+
(T[1][0] * Vrk[2][0] + T[1][1] * Vrk[2][1] + T[1][2] * Vrk[2][2]) * dL_db;
|
| 241 |
+
float dL_dT10 = 2 * (T[1][0] * Vrk[0][0] + T[1][1] * Vrk[0][1] + T[1][2] * Vrk[0][2]) * dL_dc +
|
| 242 |
+
(T[0][0] * Vrk[0][0] + T[0][1] * Vrk[0][1] + T[0][2] * Vrk[0][2]) * dL_db;
|
| 243 |
+
float dL_dT11 = 2 * (T[1][0] * Vrk[1][0] + T[1][1] * Vrk[1][1] + T[1][2] * Vrk[1][2]) * dL_dc +
|
| 244 |
+
(T[0][0] * Vrk[1][0] + T[0][1] * Vrk[1][1] + T[0][2] * Vrk[1][2]) * dL_db;
|
| 245 |
+
float dL_dT12 = 2 * (T[1][0] * Vrk[2][0] + T[1][1] * Vrk[2][1] + T[1][2] * Vrk[2][2]) * dL_dc +
|
| 246 |
+
(T[0][0] * Vrk[2][0] + T[0][1] * Vrk[2][1] + T[0][2] * Vrk[2][2]) * dL_db;
|
| 247 |
+
|
| 248 |
+
// Gradients of loss w.r.t. upper 3x2 non-zero entries of Jacobian matrix
|
| 249 |
+
// T = W * J
|
| 250 |
+
float dL_dJ00 = W[0][0] * dL_dT00 + W[0][1] * dL_dT01 + W[0][2] * dL_dT02;
|
| 251 |
+
float dL_dJ02 = W[2][0] * dL_dT00 + W[2][1] * dL_dT01 + W[2][2] * dL_dT02;
|
| 252 |
+
float dL_dJ11 = W[1][0] * dL_dT10 + W[1][1] * dL_dT11 + W[1][2] * dL_dT12;
|
| 253 |
+
float dL_dJ12 = W[2][0] * dL_dT10 + W[2][1] * dL_dT11 + W[2][2] * dL_dT12;
|
| 254 |
+
|
| 255 |
+
float tz = 1.f / t.z;
|
| 256 |
+
float tz2 = tz * tz;
|
| 257 |
+
float tz3 = tz2 * tz;
|
| 258 |
+
|
| 259 |
+
// Gradients of loss w.r.t. transformed Gaussian mean t
|
| 260 |
+
float dL_dtx = x_grad_mul * -h_x * tz2 * dL_dJ02;
|
| 261 |
+
float dL_dty = y_grad_mul * -h_y * tz2 * dL_dJ12;
|
| 262 |
+
float dL_dtz = -h_x * tz2 * dL_dJ00 - h_y * tz2 * dL_dJ11 + (2 * h_x * t.x) * tz3 * dL_dJ02 + (2 * h_y * t.y) * tz3 * dL_dJ12;
|
| 263 |
+
|
| 264 |
+
// Account for transformation of mean to t
|
| 265 |
+
// t = transformPoint4x3(mean, view_matrix);
|
| 266 |
+
float3 dL_dmean = transformVec4x3Transpose({ dL_dtx, dL_dty, dL_dtz }, view_matrix);
|
| 267 |
+
|
| 268 |
+
// Gradients of loss w.r.t. Gaussian means, but only the portion
|
| 269 |
+
// that is caused because the mean affects the covariance matrix.
|
| 270 |
+
// Additional mean gradient is accumulated in BACKWARD::preprocess.
|
| 271 |
+
dL_dmeans[idx] = dL_dmean;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
// Backward pass for the conversion of scale and rotation to a
|
| 275 |
+
// 3D covariance matrix for each Gaussian.
|
| 276 |
+
__device__ void computeCov3D(int idx, const glm::vec3 scale, float mod, const glm::vec4 rot, const float* dL_dcov3Ds, glm::vec3* dL_dscales, glm::vec4* dL_drots)
|
| 277 |
+
{
|
| 278 |
+
// Recompute (intermediate) results for the 3D covariance computation.
|
| 279 |
+
glm::vec4 q = rot;// / glm::length(rot);
|
| 280 |
+
float r = q.x;
|
| 281 |
+
float x = q.y;
|
| 282 |
+
float y = q.z;
|
| 283 |
+
float z = q.w;
|
| 284 |
+
|
| 285 |
+
glm::mat3 R = glm::mat3(
|
| 286 |
+
1.f - 2.f * (y * y + z * z), 2.f * (x * y - r * z), 2.f * (x * z + r * y),
|
| 287 |
+
2.f * (x * y + r * z), 1.f - 2.f * (x * x + z * z), 2.f * (y * z - r * x),
|
| 288 |
+
2.f * (x * z - r * y), 2.f * (y * z + r * x), 1.f - 2.f * (x * x + y * y)
|
| 289 |
+
);
|
| 290 |
+
|
| 291 |
+
glm::mat3 S = glm::mat3(1.0f);
|
| 292 |
+
|
| 293 |
+
glm::vec3 s = mod * scale;
|
| 294 |
+
S[0][0] = s.x;
|
| 295 |
+
S[1][1] = s.y;
|
| 296 |
+
S[2][2] = s.z;
|
| 297 |
+
|
| 298 |
+
glm::mat3 M = S * R;
|
| 299 |
+
|
| 300 |
+
const float* dL_dcov3D = dL_dcov3Ds + 6 * idx;
|
| 301 |
+
|
| 302 |
+
glm::vec3 dunc(dL_dcov3D[0], dL_dcov3D[3], dL_dcov3D[5]);
|
| 303 |
+
glm::vec3 ounc = 0.5f * glm::vec3(dL_dcov3D[1], dL_dcov3D[2], dL_dcov3D[4]);
|
| 304 |
+
|
| 305 |
+
// Convert per-element covariance loss gradients to matrix form
|
| 306 |
+
glm::mat3 dL_dSigma = glm::mat3(
|
| 307 |
+
dL_dcov3D[0], 0.5f * dL_dcov3D[1], 0.5f * dL_dcov3D[2],
|
| 308 |
+
0.5f * dL_dcov3D[1], dL_dcov3D[3], 0.5f * dL_dcov3D[4],
|
| 309 |
+
0.5f * dL_dcov3D[2], 0.5f * dL_dcov3D[4], dL_dcov3D[5]
|
| 310 |
+
);
|
| 311 |
+
|
| 312 |
+
// Compute loss gradient w.r.t. matrix M
|
| 313 |
+
// dSigma_dM = 2 * M
|
| 314 |
+
glm::mat3 dL_dM = 2.0f * M * dL_dSigma;
|
| 315 |
+
|
| 316 |
+
glm::mat3 Rt = glm::transpose(R);
|
| 317 |
+
glm::mat3 dL_dMt = glm::transpose(dL_dM);
|
| 318 |
+
|
| 319 |
+
// Gradients of loss w.r.t. scale
|
| 320 |
+
glm::vec3* dL_dscale = dL_dscales + idx;
|
| 321 |
+
dL_dscale->x = glm::dot(Rt[0], dL_dMt[0]);
|
| 322 |
+
dL_dscale->y = glm::dot(Rt[1], dL_dMt[1]);
|
| 323 |
+
dL_dscale->z = glm::dot(Rt[2], dL_dMt[2]);
|
| 324 |
+
|
| 325 |
+
dL_dMt[0] *= s.x;
|
| 326 |
+
dL_dMt[1] *= s.y;
|
| 327 |
+
dL_dMt[2] *= s.z;
|
| 328 |
+
|
| 329 |
+
// Gradients of loss w.r.t. normalized quaternion
|
| 330 |
+
glm::vec4 dL_dq;
|
| 331 |
+
dL_dq.x = 2 * z * (dL_dMt[0][1] - dL_dMt[1][0]) + 2 * y * (dL_dMt[2][0] - dL_dMt[0][2]) + 2 * x * (dL_dMt[1][2] - dL_dMt[2][1]);
|
| 332 |
+
dL_dq.y = 2 * y * (dL_dMt[1][0] + dL_dMt[0][1]) + 2 * z * (dL_dMt[2][0] + dL_dMt[0][2]) + 2 * r * (dL_dMt[1][2] - dL_dMt[2][1]) - 4 * x * (dL_dMt[2][2] + dL_dMt[1][1]);
|
| 333 |
+
dL_dq.z = 2 * x * (dL_dMt[1][0] + dL_dMt[0][1]) + 2 * r * (dL_dMt[2][0] - dL_dMt[0][2]) + 2 * z * (dL_dMt[1][2] + dL_dMt[2][1]) - 4 * y * (dL_dMt[2][2] + dL_dMt[0][0]);
|
| 334 |
+
dL_dq.w = 2 * r * (dL_dMt[0][1] - dL_dMt[1][0]) + 2 * x * (dL_dMt[2][0] + dL_dMt[0][2]) + 2 * y * (dL_dMt[1][2] + dL_dMt[2][1]) - 4 * z * (dL_dMt[1][1] + dL_dMt[0][0]);
|
| 335 |
+
|
| 336 |
+
// Gradients of loss w.r.t. unnormalized quaternion
|
| 337 |
+
float4* dL_drot = (float4*)(dL_drots + idx);
|
| 338 |
+
*dL_drot = float4{ dL_dq.x, dL_dq.y, dL_dq.z, dL_dq.w };//dnormvdv(float4{ rot.x, rot.y, rot.z, rot.w }, float4{ dL_dq.x, dL_dq.y, dL_dq.z, dL_dq.w });
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
// Backward pass of the preprocessing steps, except
|
| 342 |
+
// for the covariance computation and inversion
|
| 343 |
+
// (those are handled by a previous kernel call)
|
| 344 |
+
template<int C>
|
| 345 |
+
__global__ void preprocessCUDA(
|
| 346 |
+
int P, int D, int M,
|
| 347 |
+
const float3* means,
|
| 348 |
+
const int* radii,
|
| 349 |
+
const float* shs,
|
| 350 |
+
const bool* clamped,
|
| 351 |
+
const glm::vec3* scales,
|
| 352 |
+
const glm::vec4* rotations,
|
| 353 |
+
const float scale_modifier,
|
| 354 |
+
const float* proj,
|
| 355 |
+
const glm::vec3* campos,
|
| 356 |
+
const float3* dL_dmean2D,
|
| 357 |
+
glm::vec3* dL_dmeans,
|
| 358 |
+
float* dL_dcolor,
|
| 359 |
+
float* dL_dcov3D,
|
| 360 |
+
float* dL_dsh,
|
| 361 |
+
glm::vec3* dL_dscale,
|
| 362 |
+
glm::vec4* dL_drot)
|
| 363 |
+
{
|
| 364 |
+
auto idx = cg::this_grid().thread_rank();
|
| 365 |
+
if (idx >= P || !(radii[idx] > 0))
|
| 366 |
+
return;
|
| 367 |
+
|
| 368 |
+
float3 m = means[idx];
|
| 369 |
+
|
| 370 |
+
// Taking care of gradients from the screenspace points
|
| 371 |
+
float4 m_hom = transformPoint4x4(m, proj);
|
| 372 |
+
float m_w = 1.0f / (m_hom.w + 0.0000001f);
|
| 373 |
+
|
| 374 |
+
// Compute loss gradient w.r.t. 3D means due to gradients of 2D means
|
| 375 |
+
// from rendering procedure
|
| 376 |
+
glm::vec3 dL_dmean;
|
| 377 |
+
float mul1 = (proj[0] * m.x + proj[4] * m.y + proj[8] * m.z + proj[12]) * m_w * m_w;
|
| 378 |
+
float mul2 = (proj[1] * m.x + proj[5] * m.y + proj[9] * m.z + proj[13]) * m_w * m_w;
|
| 379 |
+
dL_dmean.x = (proj[0] * m_w - proj[3] * mul1) * dL_dmean2D[idx].x + (proj[1] * m_w - proj[3] * mul2) * dL_dmean2D[idx].y;
|
| 380 |
+
dL_dmean.y = (proj[4] * m_w - proj[7] * mul1) * dL_dmean2D[idx].x + (proj[5] * m_w - proj[7] * mul2) * dL_dmean2D[idx].y;
|
| 381 |
+
dL_dmean.z = (proj[8] * m_w - proj[11] * mul1) * dL_dmean2D[idx].x + (proj[9] * m_w - proj[11] * mul2) * dL_dmean2D[idx].y;
|
| 382 |
+
|
| 383 |
+
// That's the second part of the mean gradient. Previous computation
|
| 384 |
+
// of cov2D and following SH conversion also affects it.
|
| 385 |
+
dL_dmeans[idx] += dL_dmean;
|
| 386 |
+
|
| 387 |
+
// Compute gradient updates due to computing colors from SHs
|
| 388 |
+
if (shs)
|
| 389 |
+
computeColorFromSH(idx, D, M, (glm::vec3*)means, *campos, shs, clamped, (glm::vec3*)dL_dcolor, (glm::vec3*)dL_dmeans, (glm::vec3*)dL_dsh);
|
| 390 |
+
|
| 391 |
+
// Compute gradient updates due to computing covariance from scale/rotation
|
| 392 |
+
if (scales)
|
| 393 |
+
computeCov3D(idx, scales[idx], scale_modifier, rotations[idx], dL_dcov3D, dL_dscale, dL_drot);
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
// Backward version of the rendering procedure.
|
| 397 |
+
template <uint32_t C, uint32_t O>
|
| 398 |
+
__global__ void __launch_bounds__(BLOCK_X * BLOCK_Y)
|
| 399 |
+
renderCUDA(
|
| 400 |
+
const uint2* __restrict__ ranges,
|
| 401 |
+
const uint32_t* __restrict__ point_list,
|
| 402 |
+
int W, int H,
|
| 403 |
+
const float* __restrict__ bg_color,
|
| 404 |
+
const float2* __restrict__ points_xy_image,
|
| 405 |
+
const float4* __restrict__ conic_opacity,
|
| 406 |
+
const float* __restrict__ colors,
|
| 407 |
+
const float* __restrict__ objects,
|
| 408 |
+
const float* __restrict__ final_Ts,
|
| 409 |
+
const uint32_t* __restrict__ n_contrib,
|
| 410 |
+
const float* __restrict__ dL_dpixels,
|
| 411 |
+
const float* __restrict__ dL_dpixels_objs,
|
| 412 |
+
float3* __restrict__ dL_dmean2D,
|
| 413 |
+
float4* __restrict__ dL_dconic2D,
|
| 414 |
+
float* __restrict__ dL_dopacity,
|
| 415 |
+
float* __restrict__ dL_dcolors,
|
| 416 |
+
float* __restrict__ dL_dobjects)
|
| 417 |
+
{
|
| 418 |
+
// We rasterize again. Compute necessary block info.
|
| 419 |
+
auto block = cg::this_thread_block();
|
| 420 |
+
const uint32_t horizontal_blocks = (W + BLOCK_X - 1) / BLOCK_X;
|
| 421 |
+
const uint2 pix_min = { block.group_index().x * BLOCK_X, block.group_index().y * BLOCK_Y };
|
| 422 |
+
const uint2 pix_max = { min(pix_min.x + BLOCK_X, W), min(pix_min.y + BLOCK_Y , H) };
|
| 423 |
+
const uint2 pix = { pix_min.x + block.thread_index().x, pix_min.y + block.thread_index().y };
|
| 424 |
+
const uint32_t pix_id = W * pix.y + pix.x;
|
| 425 |
+
const float2 pixf = { (float)pix.x, (float)pix.y };
|
| 426 |
+
|
| 427 |
+
const bool inside = pix.x < W&& pix.y < H;
|
| 428 |
+
const uint2 range = ranges[block.group_index().y * horizontal_blocks + block.group_index().x];
|
| 429 |
+
|
| 430 |
+
const int rounds = ((range.y - range.x + BLOCK_SIZE - 1) / BLOCK_SIZE);
|
| 431 |
+
|
| 432 |
+
bool done = !inside;
|
| 433 |
+
int toDo = range.y - range.x;
|
| 434 |
+
|
| 435 |
+
__shared__ int collected_id[BLOCK_SIZE];
|
| 436 |
+
__shared__ float2 collected_xy[BLOCK_SIZE];
|
| 437 |
+
__shared__ float4 collected_conic_opacity[BLOCK_SIZE];
|
| 438 |
+
__shared__ float collected_colors[C * BLOCK_SIZE];
|
| 439 |
+
__shared__ float collected_objects[O * BLOCK_SIZE];
|
| 440 |
+
|
| 441 |
+
// In the forward, we stored the final value for T, the
|
| 442 |
+
// product of all (1 - alpha) factors.
|
| 443 |
+
const float T_final = inside ? final_Ts[pix_id] : 0;
|
| 444 |
+
float T = T_final;
|
| 445 |
+
|
| 446 |
+
// We start from the back. The ID of the last contributing
|
| 447 |
+
// Gaussian is known from each pixel from the forward.
|
| 448 |
+
uint32_t contributor = toDo;
|
| 449 |
+
const int last_contributor = inside ? n_contrib[pix_id] : 0;
|
| 450 |
+
|
| 451 |
+
float accum_rec[C] = { 0 };
|
| 452 |
+
float accum_rec_obj[O] = { 0 };
|
| 453 |
+
float dL_dpixel[C];
|
| 454 |
+
float dL_dpixel_obj[O];
|
| 455 |
+
if (inside)
|
| 456 |
+
{
|
| 457 |
+
for (int i = 0; i < C; i++)
|
| 458 |
+
dL_dpixel[i] = dL_dpixels[i * H * W + pix_id];
|
| 459 |
+
for (int i = 0; i < O; i++)
|
| 460 |
+
dL_dpixel_obj[i] = dL_dpixels_objs[i * H * W + pix_id];
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
float last_alpha = 0;
|
| 464 |
+
float last_color[C] = { 0 };
|
| 465 |
+
float last_object[O] = { 0 };
|
| 466 |
+
|
| 467 |
+
// Gradient of pixel coordinate w.r.t. normalized
|
| 468 |
+
// screen-space viewport corrdinates (-1 to 1)
|
| 469 |
+
const float ddelx_dx = 0.5 * W;
|
| 470 |
+
const float ddely_dy = 0.5 * H;
|
| 471 |
+
|
| 472 |
+
// Traverse all Gaussians
|
| 473 |
+
for (int i = 0; i < rounds; i++, toDo -= BLOCK_SIZE)
|
| 474 |
+
{
|
| 475 |
+
// Load auxiliary data into shared memory, start in the BACK
|
| 476 |
+
// and load them in revers order.
|
| 477 |
+
block.sync();
|
| 478 |
+
const int progress = i * BLOCK_SIZE + block.thread_rank();
|
| 479 |
+
if (range.x + progress < range.y)
|
| 480 |
+
{
|
| 481 |
+
const int coll_id = point_list[range.y - progress - 1];
|
| 482 |
+
collected_id[block.thread_rank()] = coll_id;
|
| 483 |
+
collected_xy[block.thread_rank()] = points_xy_image[coll_id];
|
| 484 |
+
collected_conic_opacity[block.thread_rank()] = conic_opacity[coll_id];
|
| 485 |
+
for (int i = 0; i < C; i++)
|
| 486 |
+
collected_colors[i * BLOCK_SIZE + block.thread_rank()] = colors[coll_id * C + i];
|
| 487 |
+
for (int i = 0; i < O; i++)
|
| 488 |
+
collected_objects[i * BLOCK_SIZE + block.thread_rank()] = objects[coll_id * O + i];
|
| 489 |
+
}
|
| 490 |
+
block.sync();
|
| 491 |
+
|
| 492 |
+
// Iterate over Gaussians
|
| 493 |
+
for (int j = 0; !done && j < min(BLOCK_SIZE, toDo); j++)
|
| 494 |
+
{
|
| 495 |
+
// Keep track of current Gaussian ID. Skip, if this one
|
| 496 |
+
// is behind the last contributor for this pixel.
|
| 497 |
+
contributor--;
|
| 498 |
+
if (contributor >= last_contributor)
|
| 499 |
+
continue;
|
| 500 |
+
|
| 501 |
+
// Compute blending values, as before.
|
| 502 |
+
const float2 xy = collected_xy[j];
|
| 503 |
+
const float2 d = { xy.x - pixf.x, xy.y - pixf.y };
|
| 504 |
+
const float4 con_o = collected_conic_opacity[j];
|
| 505 |
+
const float power = -0.5f * (con_o.x * d.x * d.x + con_o.z * d.y * d.y) - con_o.y * d.x * d.y;
|
| 506 |
+
if (power > 0.0f)
|
| 507 |
+
continue;
|
| 508 |
+
|
| 509 |
+
const float G = exp(power);
|
| 510 |
+
const float alpha = min(0.99f, con_o.w * G);
|
| 511 |
+
if (alpha < 1.0f / 255.0f)
|
| 512 |
+
continue;
|
| 513 |
+
|
| 514 |
+
T = T / (1.f - alpha);
|
| 515 |
+
const float dchannel_dcolor = alpha * T;
|
| 516 |
+
|
| 517 |
+
// Propagate gradients to per-Gaussian colors and keep
|
| 518 |
+
// gradients w.r.t. alpha (blending factor for a Gaussian/pixel
|
| 519 |
+
// pair).
|
| 520 |
+
float dL_dalpha = 0.0f;
|
| 521 |
+
const int global_id = collected_id[j];
|
| 522 |
+
for (int ch = 0; ch < C; ch++)
|
| 523 |
+
{
|
| 524 |
+
const float c = collected_colors[ch * BLOCK_SIZE + j];
|
| 525 |
+
// Update last color (to be used in the next iteration)
|
| 526 |
+
accum_rec[ch] = last_alpha * last_color[ch] + (1.f - last_alpha) * accum_rec[ch];
|
| 527 |
+
last_color[ch] = c;
|
| 528 |
+
|
| 529 |
+
const float dL_dchannel = dL_dpixel[ch];
|
| 530 |
+
dL_dalpha += (c - accum_rec[ch]) * dL_dchannel;
|
| 531 |
+
// Update the gradients w.r.t. color of the Gaussian.
|
| 532 |
+
// Atomic, since this pixel is just one of potentially
|
| 533 |
+
// many that were affected by this Gaussian.
|
| 534 |
+
atomicAdd(&(dL_dcolors[global_id * C + ch]), dchannel_dcolor * dL_dchannel);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
for (int ch = 0; ch < O; ch++)
|
| 538 |
+
{
|
| 539 |
+
const float o = collected_objects[ch * BLOCK_SIZE + j];
|
| 540 |
+
accum_rec_obj[ch] = last_alpha * last_object[ch] + (1.f - last_alpha) * accum_rec_obj[ch];
|
| 541 |
+
last_object[ch] = o;
|
| 542 |
+
|
| 543 |
+
const float dL_dchannel_obj = dL_dpixel_obj[ch];
|
| 544 |
+
dL_dalpha += (o - accum_rec_obj[ch]) * dL_dchannel_obj;
|
| 545 |
+
|
| 546 |
+
atomicAdd(&(dL_dobjects[global_id * O + ch]), dchannel_dcolor * dL_dchannel_obj);
|
| 547 |
+
}
|
| 548 |
+
dL_dalpha *= T;
|
| 549 |
+
// Update last alpha (to be used in the next iteration)
|
| 550 |
+
last_alpha = alpha;
|
| 551 |
+
|
| 552 |
+
// Account for fact that alpha also influences how much of
|
| 553 |
+
// the background color is added if nothing left to blend
|
| 554 |
+
float bg_dot_dpixel = 0;
|
| 555 |
+
for (int i = 0; i < C; i++)
|
| 556 |
+
bg_dot_dpixel += bg_color[i] * dL_dpixel[i];
|
| 557 |
+
dL_dalpha += (-T_final / (1.f - alpha)) * bg_dot_dpixel;
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
// Helpful reusable temporary variables
|
| 561 |
+
const float dL_dG = con_o.w * dL_dalpha;
|
| 562 |
+
const float gdx = G * d.x;
|
| 563 |
+
const float gdy = G * d.y;
|
| 564 |
+
const float dG_ddelx = -gdx * con_o.x - gdy * con_o.y;
|
| 565 |
+
const float dG_ddely = -gdy * con_o.z - gdx * con_o.y;
|
| 566 |
+
|
| 567 |
+
// Update gradients w.r.t. 2D mean position of the Gaussian
|
| 568 |
+
atomicAdd(&dL_dmean2D[global_id].x, dL_dG * dG_ddelx * ddelx_dx);
|
| 569 |
+
atomicAdd(&dL_dmean2D[global_id].y, dL_dG * dG_ddely * ddely_dy);
|
| 570 |
+
|
| 571 |
+
// Update gradients w.r.t. 2D covariance (2x2 matrix, symmetric)
|
| 572 |
+
atomicAdd(&dL_dconic2D[global_id].x, -0.5f * gdx * d.x * dL_dG);
|
| 573 |
+
atomicAdd(&dL_dconic2D[global_id].y, -0.5f * gdx * d.y * dL_dG);
|
| 574 |
+
atomicAdd(&dL_dconic2D[global_id].w, -0.5f * gdy * d.y * dL_dG);
|
| 575 |
+
|
| 576 |
+
// Update gradients w.r.t. opacity of the Gaussian
|
| 577 |
+
atomicAdd(&(dL_dopacity[global_id]), G * dL_dalpha);
|
| 578 |
+
}
|
| 579 |
+
}
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
void BACKWARD::preprocess(
|
| 583 |
+
int P, int D, int M,
|
| 584 |
+
const float3* means3D,
|
| 585 |
+
const int* radii,
|
| 586 |
+
const float* shs,
|
| 587 |
+
const bool* clamped,
|
| 588 |
+
const glm::vec3* scales,
|
| 589 |
+
const glm::vec4* rotations,
|
| 590 |
+
const float scale_modifier,
|
| 591 |
+
const float* cov3Ds,
|
| 592 |
+
const float* viewmatrix,
|
| 593 |
+
const float* projmatrix,
|
| 594 |
+
const float focal_x, float focal_y,
|
| 595 |
+
const float tan_fovx, float tan_fovy,
|
| 596 |
+
const glm::vec3* campos,
|
| 597 |
+
const float3* dL_dmean2D,
|
| 598 |
+
const float* dL_dconic,
|
| 599 |
+
glm::vec3* dL_dmean3D,
|
| 600 |
+
float* dL_dcolor,
|
| 601 |
+
float* dL_dcov3D,
|
| 602 |
+
float* dL_dsh,
|
| 603 |
+
glm::vec3* dL_dscale,
|
| 604 |
+
glm::vec4* dL_drot)
|
| 605 |
+
{
|
| 606 |
+
// Propagate gradients for the path of 2D conic matrix computation.
|
| 607 |
+
// Somewhat long, thus it is its own kernel rather than being part of
|
| 608 |
+
// "preprocess". When done, loss gradient w.r.t. 3D means has been
|
| 609 |
+
// modified and gradient w.r.t. 3D covariance matrix has been computed.
|
| 610 |
+
computeCov2DCUDA << <(P + 255) / 256, 256 >> > (
|
| 611 |
+
P,
|
| 612 |
+
means3D,
|
| 613 |
+
radii,
|
| 614 |
+
cov3Ds,
|
| 615 |
+
focal_x,
|
| 616 |
+
focal_y,
|
| 617 |
+
tan_fovx,
|
| 618 |
+
tan_fovy,
|
| 619 |
+
viewmatrix,
|
| 620 |
+
dL_dconic,
|
| 621 |
+
(float3*)dL_dmean3D,
|
| 622 |
+
dL_dcov3D);
|
| 623 |
+
|
| 624 |
+
// Propagate gradients for remaining steps: finish 3D mean gradients,
|
| 625 |
+
// propagate color gradients to SH (if desireD), propagate 3D covariance
|
| 626 |
+
// matrix gradients to scale and rotation.
|
| 627 |
+
preprocessCUDA<NUM_CHANNELS> << < (P + 255) / 256, 256 >> > (
|
| 628 |
+
P, D, M,
|
| 629 |
+
(float3*)means3D,
|
| 630 |
+
radii,
|
| 631 |
+
shs,
|
| 632 |
+
clamped,
|
| 633 |
+
(glm::vec3*)scales,
|
| 634 |
+
(glm::vec4*)rotations,
|
| 635 |
+
scale_modifier,
|
| 636 |
+
projmatrix,
|
| 637 |
+
campos,
|
| 638 |
+
(float3*)dL_dmean2D,
|
| 639 |
+
(glm::vec3*)dL_dmean3D,
|
| 640 |
+
dL_dcolor,
|
| 641 |
+
dL_dcov3D,
|
| 642 |
+
dL_dsh,
|
| 643 |
+
dL_dscale,
|
| 644 |
+
dL_drot);
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
void BACKWARD::render(
|
| 648 |
+
const dim3 grid, const dim3 block,
|
| 649 |
+
const uint2* ranges,
|
| 650 |
+
const uint32_t* point_list,
|
| 651 |
+
int W, int H,
|
| 652 |
+
const float* bg_color,
|
| 653 |
+
const float2* means2D,
|
| 654 |
+
const float4* conic_opacity,
|
| 655 |
+
const float* colors,
|
| 656 |
+
const float* objects,
|
| 657 |
+
const float* final_Ts,
|
| 658 |
+
const uint32_t* n_contrib,
|
| 659 |
+
const float* dL_dpixels,
|
| 660 |
+
const float* dL_dpixels_objs,
|
| 661 |
+
float3* dL_dmean2D,
|
| 662 |
+
float4* dL_dconic2D,
|
| 663 |
+
float* dL_dopacity,
|
| 664 |
+
float* dL_dcolors,
|
| 665 |
+
float* dL_dobjects)
|
| 666 |
+
{
|
| 667 |
+
renderCUDA<NUM_CHANNELS, NUM_OBJECTS> << <grid, block >> >(
|
| 668 |
+
ranges,
|
| 669 |
+
point_list,
|
| 670 |
+
W, H,
|
| 671 |
+
bg_color,
|
| 672 |
+
means2D,
|
| 673 |
+
conic_opacity,
|
| 674 |
+
colors,
|
| 675 |
+
objects,
|
| 676 |
+
final_Ts,
|
| 677 |
+
n_contrib,
|
| 678 |
+
dL_dpixels,
|
| 679 |
+
dL_dpixels_objs,
|
| 680 |
+
dL_dmean2D,
|
| 681 |
+
dL_dconic2D,
|
| 682 |
+
dL_dopacity,
|
| 683 |
+
dL_dcolors,
|
| 684 |
+
dL_dobjects
|
| 685 |
+
);
|
| 686 |
+
}
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.h
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#ifndef CUDA_RASTERIZER_BACKWARD_H_INCLUDED
|
| 11 |
+
#define CUDA_RASTERIZER_BACKWARD_H_INCLUDED
|
| 12 |
+
|
| 13 |
+
#include <cuda.h>
|
| 14 |
+
#include "cuda_runtime.h"
|
| 15 |
+
#include "device_launch_parameters.h"
|
| 16 |
+
#define GLM_FORCE_CUDA
|
| 17 |
+
#include <glm/glm.hpp>
|
| 18 |
+
|
| 19 |
+
namespace BACKWARD
|
| 20 |
+
{
|
| 21 |
+
void render(
|
| 22 |
+
const dim3 grid, dim3 block,
|
| 23 |
+
const uint2* ranges,
|
| 24 |
+
const uint32_t* point_list,
|
| 25 |
+
int W, int H,
|
| 26 |
+
const float* bg_color,
|
| 27 |
+
const float2* means2D,
|
| 28 |
+
const float4* conic_opacity,
|
| 29 |
+
const float* colors,
|
| 30 |
+
const float* objects,
|
| 31 |
+
const float* final_Ts,
|
| 32 |
+
const uint32_t* n_contrib,
|
| 33 |
+
const float* dL_dpixels,
|
| 34 |
+
const float* dL_dpixels_objs,
|
| 35 |
+
float3* dL_dmean2D,
|
| 36 |
+
float4* dL_dconic2D,
|
| 37 |
+
float* dL_dopacity,
|
| 38 |
+
float* dL_dcolors,
|
| 39 |
+
float* dL_dobjects);
|
| 40 |
+
|
| 41 |
+
void preprocess(
|
| 42 |
+
int P, int D, int M,
|
| 43 |
+
const float3* means,
|
| 44 |
+
const int* radii,
|
| 45 |
+
const float* shs,
|
| 46 |
+
const bool* clamped,
|
| 47 |
+
const glm::vec3* scales,
|
| 48 |
+
const glm::vec4* rotations,
|
| 49 |
+
const float scale_modifier,
|
| 50 |
+
const float* cov3Ds,
|
| 51 |
+
const float* view,
|
| 52 |
+
const float* proj,
|
| 53 |
+
const float focal_x, float focal_y,
|
| 54 |
+
const float tan_fovx, float tan_fovy,
|
| 55 |
+
const glm::vec3* campos,
|
| 56 |
+
const float3* dL_dmean2D,
|
| 57 |
+
const float* dL_dconics,
|
| 58 |
+
glm::vec3* dL_dmeans,
|
| 59 |
+
float* dL_dcolor,
|
| 60 |
+
float* dL_dcov3D,
|
| 61 |
+
float* dL_dsh,
|
| 62 |
+
glm::vec3* dL_dscale,
|
| 63 |
+
glm::vec4* dL_drot);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
#endif
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/config.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#ifndef CUDA_RASTERIZER_CONFIG_H_INCLUDED
|
| 11 |
+
#define CUDA_RASTERIZER_CONFIG_H_INCLUDED
|
| 12 |
+
|
| 13 |
+
#define NUM_CHANNELS 3 // Default 3, RGB
|
| 14 |
+
#define NUM_OBJECTS 16 // Default 16, identity encoding
|
| 15 |
+
#define BLOCK_X 16
|
| 16 |
+
#define BLOCK_Y 16
|
| 17 |
+
|
| 18 |
+
#endif
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "forward.h"
|
| 11 |
+
#include "auxiliary.h"
|
| 12 |
+
#include <cooperative_groups.h>
|
| 13 |
+
#include <cooperative_groups/reduce.h>
|
| 14 |
+
namespace cg = cooperative_groups;
|
| 15 |
+
|
| 16 |
+
// Forward method for converting the input spherical harmonics
|
| 17 |
+
// coefficients of each Gaussian to a simple RGB color.
|
| 18 |
+
__device__ glm::vec3 computeColorFromSH(int idx, int deg, int max_coeffs, const glm::vec3* means, glm::vec3 campos, const float* shs, bool* clamped)
|
| 19 |
+
{
|
| 20 |
+
// The implementation is loosely based on code for
|
| 21 |
+
// "Differentiable Point-Based Radiance Fields for
|
| 22 |
+
// Efficient View Synthesis" by Zhang et al. (2022)
|
| 23 |
+
glm::vec3 pos = means[idx];
|
| 24 |
+
glm::vec3 dir = pos - campos;
|
| 25 |
+
dir = dir / glm::length(dir);
|
| 26 |
+
|
| 27 |
+
glm::vec3* sh = ((glm::vec3*)shs) + idx * max_coeffs;
|
| 28 |
+
glm::vec3 result = SH_C0 * sh[0];
|
| 29 |
+
|
| 30 |
+
if (deg > 0)
|
| 31 |
+
{
|
| 32 |
+
float x = dir.x;
|
| 33 |
+
float y = dir.y;
|
| 34 |
+
float z = dir.z;
|
| 35 |
+
result = result - SH_C1 * y * sh[1] + SH_C1 * z * sh[2] - SH_C1 * x * sh[3];
|
| 36 |
+
|
| 37 |
+
if (deg > 1)
|
| 38 |
+
{
|
| 39 |
+
float xx = x * x, yy = y * y, zz = z * z;
|
| 40 |
+
float xy = x * y, yz = y * z, xz = x * z;
|
| 41 |
+
result = result +
|
| 42 |
+
SH_C2[0] * xy * sh[4] +
|
| 43 |
+
SH_C2[1] * yz * sh[5] +
|
| 44 |
+
SH_C2[2] * (2.0f * zz - xx - yy) * sh[6] +
|
| 45 |
+
SH_C2[3] * xz * sh[7] +
|
| 46 |
+
SH_C2[4] * (xx - yy) * sh[8];
|
| 47 |
+
|
| 48 |
+
if (deg > 2)
|
| 49 |
+
{
|
| 50 |
+
result = result +
|
| 51 |
+
SH_C3[0] * y * (3.0f * xx - yy) * sh[9] +
|
| 52 |
+
SH_C3[1] * xy * z * sh[10] +
|
| 53 |
+
SH_C3[2] * y * (4.0f * zz - xx - yy) * sh[11] +
|
| 54 |
+
SH_C3[3] * z * (2.0f * zz - 3.0f * xx - 3.0f * yy) * sh[12] +
|
| 55 |
+
SH_C3[4] * x * (4.0f * zz - xx - yy) * sh[13] +
|
| 56 |
+
SH_C3[5] * z * (xx - yy) * sh[14] +
|
| 57 |
+
SH_C3[6] * x * (xx - 3.0f * yy) * sh[15];
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
result += 0.5f;
|
| 62 |
+
|
| 63 |
+
// RGB colors are clamped to positive values. If values are
|
| 64 |
+
// clamped, we need to keep track of this for the backward pass.
|
| 65 |
+
clamped[3 * idx + 0] = (result.x < 0);
|
| 66 |
+
clamped[3 * idx + 1] = (result.y < 0);
|
| 67 |
+
clamped[3 * idx + 2] = (result.z < 0);
|
| 68 |
+
return glm::max(result, 0.0f);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
// Forward version of 2D covariance matrix computation
|
| 72 |
+
__device__ float3 computeCov2D(const float3& mean, float focal_x, float focal_y, float tan_fovx, float tan_fovy, const float* cov3D, const float* viewmatrix)
|
| 73 |
+
{
|
| 74 |
+
// The following models the steps outlined by equations 29
|
| 75 |
+
// and 31 in "EWA Splatting" (Zwicker et al., 2002).
|
| 76 |
+
// Additionally considers aspect / scaling of viewport.
|
| 77 |
+
// Transposes used to account for row-/column-major conventions.
|
| 78 |
+
float3 t = transformPoint4x3(mean, viewmatrix);
|
| 79 |
+
|
| 80 |
+
const float limx = 1.3f * tan_fovx;
|
| 81 |
+
const float limy = 1.3f * tan_fovy;
|
| 82 |
+
const float txtz = t.x / t.z;
|
| 83 |
+
const float tytz = t.y / t.z;
|
| 84 |
+
t.x = min(limx, max(-limx, txtz)) * t.z;
|
| 85 |
+
t.y = min(limy, max(-limy, tytz)) * t.z;
|
| 86 |
+
|
| 87 |
+
glm::mat3 J = glm::mat3(
|
| 88 |
+
focal_x / t.z, 0.0f, -(focal_x * t.x) / (t.z * t.z),
|
| 89 |
+
0.0f, focal_y / t.z, -(focal_y * t.y) / (t.z * t.z),
|
| 90 |
+
0, 0, 0);
|
| 91 |
+
|
| 92 |
+
glm::mat3 W = glm::mat3(
|
| 93 |
+
viewmatrix[0], viewmatrix[4], viewmatrix[8],
|
| 94 |
+
viewmatrix[1], viewmatrix[5], viewmatrix[9],
|
| 95 |
+
viewmatrix[2], viewmatrix[6], viewmatrix[10]);
|
| 96 |
+
|
| 97 |
+
glm::mat3 T = W * J;
|
| 98 |
+
|
| 99 |
+
glm::mat3 Vrk = glm::mat3(
|
| 100 |
+
cov3D[0], cov3D[1], cov3D[2],
|
| 101 |
+
cov3D[1], cov3D[3], cov3D[4],
|
| 102 |
+
cov3D[2], cov3D[4], cov3D[5]);
|
| 103 |
+
|
| 104 |
+
glm::mat3 cov = glm::transpose(T) * glm::transpose(Vrk) * T;
|
| 105 |
+
|
| 106 |
+
// Apply low-pass filter: every Gaussian should be at least
|
| 107 |
+
// one pixel wide/high. Discard 3rd row and column.
|
| 108 |
+
cov[0][0] += 0.3f;
|
| 109 |
+
cov[1][1] += 0.3f;
|
| 110 |
+
return { float(cov[0][0]), float(cov[0][1]), float(cov[1][1]) };
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
// Forward method for converting scale and rotation properties of each
|
| 114 |
+
// Gaussian to a 3D covariance matrix in world space. Also takes care
|
| 115 |
+
// of quaternion normalization.
|
| 116 |
+
__device__ void computeCov3D(const glm::vec3 scale, float mod, const glm::vec4 rot, float* cov3D)
|
| 117 |
+
{
|
| 118 |
+
// Create scaling matrix
|
| 119 |
+
glm::mat3 S = glm::mat3(1.0f);
|
| 120 |
+
S[0][0] = mod * scale.x;
|
| 121 |
+
S[1][1] = mod * scale.y;
|
| 122 |
+
S[2][2] = mod * scale.z;
|
| 123 |
+
|
| 124 |
+
// Normalize quaternion to get valid rotation
|
| 125 |
+
glm::vec4 q = rot;// / glm::length(rot);
|
| 126 |
+
float r = q.x;
|
| 127 |
+
float x = q.y;
|
| 128 |
+
float y = q.z;
|
| 129 |
+
float z = q.w;
|
| 130 |
+
|
| 131 |
+
// Compute rotation matrix from quaternion
|
| 132 |
+
glm::mat3 R = glm::mat3(
|
| 133 |
+
1.f - 2.f * (y * y + z * z), 2.f * (x * y - r * z), 2.f * (x * z + r * y),
|
| 134 |
+
2.f * (x * y + r * z), 1.f - 2.f * (x * x + z * z), 2.f * (y * z - r * x),
|
| 135 |
+
2.f * (x * z - r * y), 2.f * (y * z + r * x), 1.f - 2.f * (x * x + y * y)
|
| 136 |
+
);
|
| 137 |
+
|
| 138 |
+
glm::mat3 M = S * R;
|
| 139 |
+
|
| 140 |
+
// Compute 3D world covariance matrix Sigma
|
| 141 |
+
glm::mat3 Sigma = glm::transpose(M) * M;
|
| 142 |
+
|
| 143 |
+
// Covariance is symmetric, only store upper right
|
| 144 |
+
cov3D[0] = Sigma[0][0];
|
| 145 |
+
cov3D[1] = Sigma[0][1];
|
| 146 |
+
cov3D[2] = Sigma[0][2];
|
| 147 |
+
cov3D[3] = Sigma[1][1];
|
| 148 |
+
cov3D[4] = Sigma[1][2];
|
| 149 |
+
cov3D[5] = Sigma[2][2];
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
// Perform initial steps for each Gaussian prior to rasterization.
|
| 153 |
+
template<int C, int O>
|
| 154 |
+
__global__ void preprocessCUDA(int P, int D, int M,
|
| 155 |
+
const float* orig_points,
|
| 156 |
+
const glm::vec3* scales,
|
| 157 |
+
const float scale_modifier,
|
| 158 |
+
const glm::vec4* rotations,
|
| 159 |
+
const float* opacities,
|
| 160 |
+
const float* shs,
|
| 161 |
+
const float* sh_objs,
|
| 162 |
+
bool* clamped,
|
| 163 |
+
const float* cov3D_precomp,
|
| 164 |
+
const float* colors_precomp,
|
| 165 |
+
const float* viewmatrix,
|
| 166 |
+
const float* projmatrix,
|
| 167 |
+
const glm::vec3* cam_pos,
|
| 168 |
+
const int W, int H,
|
| 169 |
+
const float tan_fovx, float tan_fovy,
|
| 170 |
+
const float focal_x, float focal_y,
|
| 171 |
+
int* radii,
|
| 172 |
+
float2* points_xy_image,
|
| 173 |
+
float* depths,
|
| 174 |
+
float* cov3Ds,
|
| 175 |
+
float* rgb,
|
| 176 |
+
float4* conic_opacity,
|
| 177 |
+
const dim3 grid,
|
| 178 |
+
uint32_t* tiles_touched,
|
| 179 |
+
bool prefiltered)
|
| 180 |
+
{
|
| 181 |
+
auto idx = cg::this_grid().thread_rank();
|
| 182 |
+
if (idx >= P)
|
| 183 |
+
return;
|
| 184 |
+
|
| 185 |
+
// Initialize radius and touched tiles to 0. If this isn't changed,
|
| 186 |
+
// this Gaussian will not be processed further.
|
| 187 |
+
radii[idx] = 0;
|
| 188 |
+
tiles_touched[idx] = 0;
|
| 189 |
+
|
| 190 |
+
// Perform near culling, quit if outside.
|
| 191 |
+
float3 p_view;
|
| 192 |
+
if (!in_frustum(idx, orig_points, viewmatrix, projmatrix, prefiltered, p_view))
|
| 193 |
+
return;
|
| 194 |
+
|
| 195 |
+
// Transform point by projecting
|
| 196 |
+
float3 p_orig = { orig_points[3 * idx], orig_points[3 * idx + 1], orig_points[3 * idx + 2] };
|
| 197 |
+
float4 p_hom = transformPoint4x4(p_orig, projmatrix);
|
| 198 |
+
float p_w = 1.0f / (p_hom.w + 0.0000001f);
|
| 199 |
+
float3 p_proj = { p_hom.x * p_w, p_hom.y * p_w, p_hom.z * p_w };
|
| 200 |
+
|
| 201 |
+
// If 3D covariance matrix is precomputed, use it, otherwise compute
|
| 202 |
+
// from scaling and rotation parameters.
|
| 203 |
+
const float* cov3D;
|
| 204 |
+
if (cov3D_precomp != nullptr)
|
| 205 |
+
{
|
| 206 |
+
cov3D = cov3D_precomp + idx * 6;
|
| 207 |
+
}
|
| 208 |
+
else
|
| 209 |
+
{
|
| 210 |
+
computeCov3D(scales[idx], scale_modifier, rotations[idx], cov3Ds + idx * 6);
|
| 211 |
+
cov3D = cov3Ds + idx * 6;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// Compute 2D screen-space covariance matrix
|
| 215 |
+
float3 cov = computeCov2D(p_orig, focal_x, focal_y, tan_fovx, tan_fovy, cov3D, viewmatrix);
|
| 216 |
+
|
| 217 |
+
// Invert covariance (EWA algorithm)
|
| 218 |
+
float det = (cov.x * cov.z - cov.y * cov.y);
|
| 219 |
+
if (det == 0.0f)
|
| 220 |
+
return;
|
| 221 |
+
float det_inv = 1.f / det;
|
| 222 |
+
float3 conic = { cov.z * det_inv, -cov.y * det_inv, cov.x * det_inv };
|
| 223 |
+
|
| 224 |
+
// Compute extent in screen space (by finding eigenvalues of
|
| 225 |
+
// 2D covariance matrix). Use extent to compute a bounding rectangle
|
| 226 |
+
// of screen-space tiles that this Gaussian overlaps with. Quit if
|
| 227 |
+
// rectangle covers 0 tiles.
|
| 228 |
+
float mid = 0.5f * (cov.x + cov.z);
|
| 229 |
+
float lambda1 = mid + sqrt(max(0.1f, mid * mid - det));
|
| 230 |
+
float lambda2 = mid - sqrt(max(0.1f, mid * mid - det));
|
| 231 |
+
float my_radius = ceil(3.f * sqrt(max(lambda1, lambda2)));
|
| 232 |
+
float2 point_image = { ndc2Pix(p_proj.x, W), ndc2Pix(p_proj.y, H) };
|
| 233 |
+
uint2 rect_min, rect_max;
|
| 234 |
+
getRect(point_image, my_radius, rect_min, rect_max, grid);
|
| 235 |
+
if ((rect_max.x - rect_min.x) * (rect_max.y - rect_min.y) == 0)
|
| 236 |
+
return;
|
| 237 |
+
|
| 238 |
+
// If colors have been precomputed, use them, otherwise convert
|
| 239 |
+
// spherical harmonics coefficients to RGB color.
|
| 240 |
+
if (colors_precomp == nullptr)
|
| 241 |
+
{
|
| 242 |
+
glm::vec3 result = computeColorFromSH(idx, D, M, (glm::vec3*)orig_points, *cam_pos, shs, clamped);
|
| 243 |
+
rgb[idx * C + 0] = result.x;
|
| 244 |
+
rgb[idx * C + 1] = result.y;
|
| 245 |
+
rgb[idx * C + 2] = result.z;
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
// Store some useful helper data for the next steps.
|
| 249 |
+
depths[idx] = p_view.z;
|
| 250 |
+
radii[idx] = my_radius;
|
| 251 |
+
points_xy_image[idx] = point_image;
|
| 252 |
+
// Inverse 2D covariance and opacity neatly pack into one float4
|
| 253 |
+
conic_opacity[idx] = { conic.x, conic.y, conic.z, opacities[idx] };
|
| 254 |
+
tiles_touched[idx] = (rect_max.y - rect_min.y) * (rect_max.x - rect_min.x);
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
__device__ static float atomicMax(float* address, float val)
|
| 258 |
+
{
|
| 259 |
+
int* address_as_i = (int*) address;
|
| 260 |
+
int old = *address_as_i, assumed;
|
| 261 |
+
do {
|
| 262 |
+
assumed = old;
|
| 263 |
+
old = ::atomicCAS(address_as_i, assumed,
|
| 264 |
+
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
|
| 265 |
+
} while (assumed != old);
|
| 266 |
+
return __int_as_float(old);
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
// Main rasterization method. Collaboratively works on one tile per
|
| 270 |
+
// block, each thread treats one pixel. Alternates between fetching
|
| 271 |
+
// and rasterizing data.
|
| 272 |
+
template <uint32_t CHANNELS, uint32_t OBJECTS>
|
| 273 |
+
__global__ void __launch_bounds__(BLOCK_X * BLOCK_Y)
|
| 274 |
+
renderCUDA(
|
| 275 |
+
const uint2* __restrict__ ranges,
|
| 276 |
+
const uint32_t* __restrict__ point_list,
|
| 277 |
+
int W, int H,
|
| 278 |
+
const float2* __restrict__ points_xy_image,
|
| 279 |
+
const float* __restrict__ features,
|
| 280 |
+
const float* __restrict__ obj_features,
|
| 281 |
+
const float4* __restrict__ conic_opacity,
|
| 282 |
+
const float* __restrict__ alpha_mask,
|
| 283 |
+
float* __restrict__ final_T,
|
| 284 |
+
uint32_t* __restrict__ n_contrib,
|
| 285 |
+
const float* __restrict__ bg_color,
|
| 286 |
+
float* __restrict__ out_color,
|
| 287 |
+
float* __restrict__ out_objects,
|
| 288 |
+
float* __restrict__ out_max_trans)
|
| 289 |
+
{
|
| 290 |
+
// Identify current tile and associated min/max pixel range.
|
| 291 |
+
auto block = cg::this_thread_block();
|
| 292 |
+
uint32_t horizontal_blocks = (W + BLOCK_X - 1) / BLOCK_X;
|
| 293 |
+
uint2 pix_min = { block.group_index().x * BLOCK_X, block.group_index().y * BLOCK_Y };
|
| 294 |
+
uint2 pix_max = { min(pix_min.x + BLOCK_X, W), min(pix_min.y + BLOCK_Y , H) };
|
| 295 |
+
uint2 pix = { pix_min.x + block.thread_index().x, pix_min.y + block.thread_index().y };
|
| 296 |
+
uint32_t pix_id = W * pix.y + pix.x;
|
| 297 |
+
float2 pixf = { (float)pix.x, (float)pix.y };
|
| 298 |
+
|
| 299 |
+
// Check if this thread is associated with a valid pixel or outside.
|
| 300 |
+
bool inside = pix.x < W&& pix.y < H;
|
| 301 |
+
// Done threads can help with fetching, but don't rasterize
|
| 302 |
+
bool done = !inside;
|
| 303 |
+
|
| 304 |
+
// Load start/end range of IDs to process in bit sorted list.
|
| 305 |
+
uint2 range = ranges[block.group_index().y * horizontal_blocks + block.group_index().x];
|
| 306 |
+
const int rounds = ((range.y - range.x + BLOCK_SIZE - 1) / BLOCK_SIZE);
|
| 307 |
+
int toDo = range.y - range.x;
|
| 308 |
+
|
| 309 |
+
// Allocate storage for batches of collectively fetched data.
|
| 310 |
+
__shared__ int collected_id[BLOCK_SIZE];
|
| 311 |
+
__shared__ float2 collected_xy[BLOCK_SIZE];
|
| 312 |
+
__shared__ float4 collected_conic_opacity[BLOCK_SIZE];
|
| 313 |
+
|
| 314 |
+
// Initialize helper variables
|
| 315 |
+
float T = 1.0f;
|
| 316 |
+
uint32_t contributor = 0;
|
| 317 |
+
uint32_t last_contributor = 0;
|
| 318 |
+
float C[CHANNELS] = { 0 };
|
| 319 |
+
float O[OBJECTS] = { 0 }; //rendered object
|
| 320 |
+
|
| 321 |
+
// Iterate over batches until all done or range is complete
|
| 322 |
+
for (int i = 0; i < rounds; i++, toDo -= BLOCK_SIZE)
|
| 323 |
+
{
|
| 324 |
+
// End if entire block votes that it is done rasterizing
|
| 325 |
+
int num_done = __syncthreads_count(done);
|
| 326 |
+
if (num_done == BLOCK_SIZE)
|
| 327 |
+
break;
|
| 328 |
+
|
| 329 |
+
// Collectively fetch per-Gaussian data from global to shared
|
| 330 |
+
int progress = i * BLOCK_SIZE + block.thread_rank();
|
| 331 |
+
if (range.x + progress < range.y)
|
| 332 |
+
{
|
| 333 |
+
int coll_id = point_list[range.x + progress];
|
| 334 |
+
collected_id[block.thread_rank()] = coll_id;
|
| 335 |
+
collected_xy[block.thread_rank()] = points_xy_image[coll_id];
|
| 336 |
+
collected_conic_opacity[block.thread_rank()] = conic_opacity[coll_id];
|
| 337 |
+
}
|
| 338 |
+
block.sync();
|
| 339 |
+
float mask = alpha_mask[pix_id];
|
| 340 |
+
// Iterate over current batch
|
| 341 |
+
for (int j = 0; !done && j < min(BLOCK_SIZE, toDo); j++)
|
| 342 |
+
{
|
| 343 |
+
// Keep track of current position in range
|
| 344 |
+
contributor++;
|
| 345 |
+
|
| 346 |
+
// Resample using conic matrix (cf. "Surface
|
| 347 |
+
// Splatting" by Zwicker et al., 2001)
|
| 348 |
+
float2 xy = collected_xy[j];
|
| 349 |
+
float2 d = { xy.x - pixf.x, xy.y - pixf.y };
|
| 350 |
+
float4 con_o = collected_conic_opacity[j];
|
| 351 |
+
float power = -0.5f * (con_o.x * d.x * d.x + con_o.z * d.y * d.y) - con_o.y * d.x * d.y;
|
| 352 |
+
if (power > 0.0f)
|
| 353 |
+
continue;
|
| 354 |
+
|
| 355 |
+
// Eq. (2) from 3D Gaussian splatting paper.
|
| 356 |
+
// Obtain alpha by multiplying with Gaussian opacity
|
| 357 |
+
// and its exponential falloff from mean.
|
| 358 |
+
// Avoid numerical instabilities (see paper appendix).
|
| 359 |
+
float alpha = min(0.99f, con_o.w * exp(power));
|
| 360 |
+
if (alpha < 1.0f / 255.0f)
|
| 361 |
+
continue;
|
| 362 |
+
float test_T = T * (1 - alpha);
|
| 363 |
+
if (test_T < 0.0001f)
|
| 364 |
+
{
|
| 365 |
+
done = true;
|
| 366 |
+
continue;
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
// Eq. (3) from 3D Gaussian splatting paper.
|
| 370 |
+
for (int ch = 0; ch < CHANNELS; ch++){
|
| 371 |
+
C[ch] += features[collected_id[j] * CHANNELS + ch] * alpha * T;}
|
| 372 |
+
for (int ch = 0; ch < OBJECTS; ch++){
|
| 373 |
+
O[ch] += obj_features[collected_id[j] * OBJECTS + ch] * alpha * T;}
|
| 374 |
+
|
| 375 |
+
if (mask > 0.95f){
|
| 376 |
+
atomicMax(out_max_trans + collected_id[j], T);
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
T = test_T;
|
| 380 |
+
|
| 381 |
+
// Keep track of last range entry to update this
|
| 382 |
+
// pixel.
|
| 383 |
+
last_contributor = contributor;
|
| 384 |
+
}
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
// All threads that treat valid pixel write out their final
|
| 388 |
+
// rendering data to the frame and auxiliary buffers.
|
| 389 |
+
if (inside)
|
| 390 |
+
{
|
| 391 |
+
final_T[pix_id] = T;
|
| 392 |
+
n_contrib[pix_id] = last_contributor;
|
| 393 |
+
for (int ch = 0; ch < CHANNELS; ch++){
|
| 394 |
+
out_color[ch * H * W + pix_id] = C[ch] + T * bg_color[ch];}
|
| 395 |
+
for (int ch = 0; ch < OBJECTS; ch++){
|
| 396 |
+
out_objects[ch * H * W + pix_id] = O[ch];}
|
| 397 |
+
}
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
void FORWARD::render(
|
| 401 |
+
const dim3 grid, dim3 block,
|
| 402 |
+
const uint2* ranges,
|
| 403 |
+
const uint32_t* point_list,
|
| 404 |
+
int W, int H,
|
| 405 |
+
const float2* means2D,
|
| 406 |
+
const float* colors,
|
| 407 |
+
const float* objects,
|
| 408 |
+
const float4* conic_opacity,
|
| 409 |
+
const float* alpha,
|
| 410 |
+
float* final_T,
|
| 411 |
+
uint32_t* n_contrib,
|
| 412 |
+
const float* bg_color,
|
| 413 |
+
float* out_color,
|
| 414 |
+
float* out_objects,
|
| 415 |
+
float* out_max_trans)
|
| 416 |
+
{
|
| 417 |
+
renderCUDA<NUM_CHANNELS, NUM_OBJECTS> << <grid, block >> > (
|
| 418 |
+
ranges,
|
| 419 |
+
point_list,
|
| 420 |
+
W, H,
|
| 421 |
+
means2D,
|
| 422 |
+
colors,
|
| 423 |
+
objects,
|
| 424 |
+
conic_opacity,
|
| 425 |
+
alpha,
|
| 426 |
+
final_T,
|
| 427 |
+
n_contrib,
|
| 428 |
+
bg_color,
|
| 429 |
+
out_color,
|
| 430 |
+
out_objects,
|
| 431 |
+
out_max_trans);
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
void FORWARD::preprocess(int P, int D, int M,
|
| 435 |
+
const float* means3D,
|
| 436 |
+
const glm::vec3* scales,
|
| 437 |
+
const float scale_modifier,
|
| 438 |
+
const glm::vec4* rotations,
|
| 439 |
+
const float* opacities,
|
| 440 |
+
const float* shs,
|
| 441 |
+
const float* sh_objs,
|
| 442 |
+
bool* clamped,
|
| 443 |
+
const float* cov3D_precomp,
|
| 444 |
+
const float* colors_precomp,
|
| 445 |
+
const float* viewmatrix,
|
| 446 |
+
const float* projmatrix,
|
| 447 |
+
const glm::vec3* cam_pos,
|
| 448 |
+
const int W, int H,
|
| 449 |
+
const float focal_x, float focal_y,
|
| 450 |
+
const float tan_fovx, float tan_fovy,
|
| 451 |
+
int* radii,
|
| 452 |
+
float2* means2D,
|
| 453 |
+
float* depths,
|
| 454 |
+
float* cov3Ds,
|
| 455 |
+
float* rgb,
|
| 456 |
+
float4* conic_opacity,
|
| 457 |
+
const dim3 grid,
|
| 458 |
+
uint32_t* tiles_touched,
|
| 459 |
+
bool prefiltered)
|
| 460 |
+
{
|
| 461 |
+
preprocessCUDA<NUM_CHANNELS, NUM_OBJECTS> << <(P + 255) / 256, 256 >> > (
|
| 462 |
+
P, D, M,
|
| 463 |
+
means3D,
|
| 464 |
+
scales,
|
| 465 |
+
scale_modifier,
|
| 466 |
+
rotations,
|
| 467 |
+
opacities,
|
| 468 |
+
shs,
|
| 469 |
+
sh_objs,
|
| 470 |
+
clamped,
|
| 471 |
+
cov3D_precomp,
|
| 472 |
+
colors_precomp,
|
| 473 |
+
viewmatrix,
|
| 474 |
+
projmatrix,
|
| 475 |
+
cam_pos,
|
| 476 |
+
W, H,
|
| 477 |
+
tan_fovx, tan_fovy,
|
| 478 |
+
focal_x, focal_y,
|
| 479 |
+
radii,
|
| 480 |
+
means2D,
|
| 481 |
+
depths,
|
| 482 |
+
cov3Ds,
|
| 483 |
+
rgb,
|
| 484 |
+
conic_opacity,
|
| 485 |
+
grid,
|
| 486 |
+
tiles_touched,
|
| 487 |
+
prefiltered
|
| 488 |
+
);
|
| 489 |
+
}
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.h
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#ifndef CUDA_RASTERIZER_FORWARD_H_INCLUDED
|
| 11 |
+
#define CUDA_RASTERIZER_FORWARD_H_INCLUDED
|
| 12 |
+
|
| 13 |
+
#include <cuda.h>
|
| 14 |
+
#include "cuda_runtime.h"
|
| 15 |
+
#include "device_launch_parameters.h"
|
| 16 |
+
#define GLM_FORCE_CUDA
|
| 17 |
+
#include <glm/glm.hpp>
|
| 18 |
+
|
| 19 |
+
namespace FORWARD
|
| 20 |
+
{
|
| 21 |
+
// Perform initial steps for each Gaussian prior to rasterization.
|
| 22 |
+
void preprocess(int P, int D, int M,
|
| 23 |
+
const float* orig_points,
|
| 24 |
+
const glm::vec3* scales,
|
| 25 |
+
const float scale_modifier,
|
| 26 |
+
const glm::vec4* rotations,
|
| 27 |
+
const float* opacities,
|
| 28 |
+
const float* shs,
|
| 29 |
+
const float* sh_objs,
|
| 30 |
+
bool* clamped,
|
| 31 |
+
const float* cov3D_precomp,
|
| 32 |
+
const float* colors_precomp,
|
| 33 |
+
const float* viewmatrix,
|
| 34 |
+
const float* projmatrix,
|
| 35 |
+
const glm::vec3* cam_pos,
|
| 36 |
+
const int W, int H,
|
| 37 |
+
const float focal_x, float focal_y,
|
| 38 |
+
const float tan_fovx, float tan_fovy,
|
| 39 |
+
int* radii,
|
| 40 |
+
float2* points_xy_image,
|
| 41 |
+
float* depths,
|
| 42 |
+
float* cov3Ds,
|
| 43 |
+
float* colors,
|
| 44 |
+
float4* conic_opacity,
|
| 45 |
+
const dim3 grid,
|
| 46 |
+
uint32_t* tiles_touched,
|
| 47 |
+
bool prefiltered);
|
| 48 |
+
|
| 49 |
+
// Main rasterization method.
|
| 50 |
+
void render(
|
| 51 |
+
const dim3 grid, dim3 block,
|
| 52 |
+
const uint2* ranges,
|
| 53 |
+
const uint32_t* point_list,
|
| 54 |
+
int W, int H,
|
| 55 |
+
const float2* points_xy_image,
|
| 56 |
+
const float* features,
|
| 57 |
+
const float* obj_features,
|
| 58 |
+
const float4* conic_opacity,
|
| 59 |
+
const float* alpha,
|
| 60 |
+
float* final_T,
|
| 61 |
+
uint32_t* n_contrib,
|
| 62 |
+
const float* bg_color,
|
| 63 |
+
float* out_color,
|
| 64 |
+
float* out_objects,
|
| 65 |
+
float* out_max_trans);
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
#endif
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#ifndef CUDA_RASTERIZER_H_INCLUDED
|
| 11 |
+
#define CUDA_RASTERIZER_H_INCLUDED
|
| 12 |
+
|
| 13 |
+
#include <vector>
|
| 14 |
+
#include <functional>
|
| 15 |
+
|
| 16 |
+
namespace CudaRasterizer
|
| 17 |
+
{
|
| 18 |
+
class Rasterizer
|
| 19 |
+
{
|
| 20 |
+
public:
|
| 21 |
+
|
| 22 |
+
static void markVisible(
|
| 23 |
+
int P,
|
| 24 |
+
float* means3D,
|
| 25 |
+
float* viewmatrix,
|
| 26 |
+
float* projmatrix,
|
| 27 |
+
bool* present);
|
| 28 |
+
|
| 29 |
+
static int forward(
|
| 30 |
+
std::function<char* (size_t)> geometryBuffer,
|
| 31 |
+
std::function<char* (size_t)> binningBuffer,
|
| 32 |
+
std::function<char* (size_t)> imageBuffer,
|
| 33 |
+
const int P, int D, int M,
|
| 34 |
+
const float* background,
|
| 35 |
+
const int width, int height,
|
| 36 |
+
const float* means3D,
|
| 37 |
+
const float* shs,
|
| 38 |
+
const float* sh_objs,
|
| 39 |
+
const float* colors_precomp,
|
| 40 |
+
const float* opacities,
|
| 41 |
+
const float* scales,
|
| 42 |
+
const float scale_modifier,
|
| 43 |
+
const float* rotations,
|
| 44 |
+
const float* cov3D_precomp,
|
| 45 |
+
const float* alpha,
|
| 46 |
+
const float* viewmatrix,
|
| 47 |
+
const float* projmatrix,
|
| 48 |
+
const float* cam_pos,
|
| 49 |
+
const float tan_fovx, float tan_fovy,
|
| 50 |
+
const bool prefiltered,
|
| 51 |
+
float* out_color,
|
| 52 |
+
float* out_objects,
|
| 53 |
+
float* out_max_trans,
|
| 54 |
+
int* radii = nullptr,
|
| 55 |
+
bool debug = false);
|
| 56 |
+
|
| 57 |
+
static void backward(
|
| 58 |
+
const int P, int D, int M, int R,
|
| 59 |
+
const float* background,
|
| 60 |
+
const int width, int height,
|
| 61 |
+
const float* means3D,
|
| 62 |
+
const float* shs,
|
| 63 |
+
const float* sh_objs,
|
| 64 |
+
const float* colors_precomp,
|
| 65 |
+
const float* scales,
|
| 66 |
+
const float scale_modifier,
|
| 67 |
+
const float* rotations,
|
| 68 |
+
const float* cov3D_precomp,
|
| 69 |
+
const float* viewmatrix,
|
| 70 |
+
const float* projmatrix,
|
| 71 |
+
const float* campos,
|
| 72 |
+
const float tan_fovx, float tan_fovy,
|
| 73 |
+
const int* radii,
|
| 74 |
+
char* geom_buffer,
|
| 75 |
+
char* binning_buffer,
|
| 76 |
+
char* image_buffer,
|
| 77 |
+
const float* dL_dpix,
|
| 78 |
+
const float* dL_dpix_obj,
|
| 79 |
+
float* dL_dmean2D,
|
| 80 |
+
float* dL_dconic,
|
| 81 |
+
float* dL_dopacity,
|
| 82 |
+
float* dL_dcolor,
|
| 83 |
+
float* dL_dobjects,
|
| 84 |
+
float* dL_dmean3D,
|
| 85 |
+
float* dL_dcov3D,
|
| 86 |
+
float* dL_dsh,
|
| 87 |
+
float* dL_dscale,
|
| 88 |
+
float* dL_drot,
|
| 89 |
+
bool debug);
|
| 90 |
+
};
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
#endif
|
gaussian-grouping/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (C) 2023, Gaussian-Grouping
|
| 3 |
+
* Gaussian-Grouping research group, https://github.com/lkeab/gaussian-grouping
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* ------------------------------------------------------------------------
|
| 6 |
+
* Modified from codes in Gaussian-Splatting
|
| 7 |
+
* GRAPHDECO research group, https://team.inria.fr/graphdeco
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "rasterizer_impl.h"
|
| 11 |
+
#include <iostream>
|
| 12 |
+
#include <fstream>
|
| 13 |
+
#include <algorithm>
|
| 14 |
+
#include <numeric>
|
| 15 |
+
#include <cuda.h>
|
| 16 |
+
#include "cuda_runtime.h"
|
| 17 |
+
#include "device_launch_parameters.h"
|
| 18 |
+
#include <cub/cub.cuh>
|
| 19 |
+
#include <cub/device/device_radix_sort.cuh>
|
| 20 |
+
#define GLM_FORCE_CUDA
|
| 21 |
+
#include <glm/glm.hpp>
|
| 22 |
+
|
| 23 |
+
#include <cooperative_groups.h>
|
| 24 |
+
#include <cooperative_groups/reduce.h>
|
| 25 |
+
namespace cg = cooperative_groups;
|
| 26 |
+
|
| 27 |
+
#include "auxiliary.h"
|
| 28 |
+
#include "forward.h"
|
| 29 |
+
#include "backward.h"
|
| 30 |
+
|
| 31 |
+
// Helper function to find the next-highest bit of the MSB
|
| 32 |
+
// on the CPU.
|
| 33 |
+
uint32_t getHigherMsb(uint32_t n)
|
| 34 |
+
{
|
| 35 |
+
uint32_t msb = sizeof(n) * 4;
|
| 36 |
+
uint32_t step = msb;
|
| 37 |
+
while (step > 1)
|
| 38 |
+
{
|
| 39 |
+
step /= 2;
|
| 40 |
+
if (n >> msb)
|
| 41 |
+
msb += step;
|
| 42 |
+
else
|
| 43 |
+
msb -= step;
|
| 44 |
+
}
|
| 45 |
+
if (n >> msb)
|
| 46 |
+
msb++;
|
| 47 |
+
return msb;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
// Wrapper method to call auxiliary coarse frustum containment test.
|
| 51 |
+
// Mark all Gaussians that pass it.
|
| 52 |
+
__global__ void checkFrustum(int P,
|
| 53 |
+
const float* orig_points,
|
| 54 |
+
const float* viewmatrix,
|
| 55 |
+
const float* projmatrix,
|
| 56 |
+
bool* present)
|
| 57 |
+
{
|
| 58 |
+
auto idx = cg::this_grid().thread_rank();
|
| 59 |
+
if (idx >= P)
|
| 60 |
+
return;
|
| 61 |
+
|
| 62 |
+
float3 p_view;
|
| 63 |
+
present[idx] = in_frustum(idx, orig_points, viewmatrix, projmatrix, false, p_view);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// Generates one key/value pair for all Gaussian / tile overlaps.
|
| 67 |
+
// Run once per Gaussian (1:N mapping).
|
| 68 |
+
__global__ void duplicateWithKeys(
|
| 69 |
+
int P,
|
| 70 |
+
const float2* points_xy,
|
| 71 |
+
const float* depths,
|
| 72 |
+
const uint32_t* offsets,
|
| 73 |
+
uint64_t* gaussian_keys_unsorted,
|
| 74 |
+
uint32_t* gaussian_values_unsorted,
|
| 75 |
+
int* radii,
|
| 76 |
+
dim3 grid)
|
| 77 |
+
{
|
| 78 |
+
auto idx = cg::this_grid().thread_rank();
|
| 79 |
+
if (idx >= P)
|
| 80 |
+
return;
|
| 81 |
+
|
| 82 |
+
// Generate no key/value pair for invisible Gaussians
|
| 83 |
+
if (radii[idx] > 0)
|
| 84 |
+
{
|
| 85 |
+
// Find this Gaussian's offset in buffer for writing keys/values.
|
| 86 |
+
uint32_t off = (idx == 0) ? 0 : offsets[idx - 1];
|
| 87 |
+
uint2 rect_min, rect_max;
|
| 88 |
+
|
| 89 |
+
getRect(points_xy[idx], radii[idx], rect_min, rect_max, grid);
|
| 90 |
+
|
| 91 |
+
// For each tile that the bounding rect overlaps, emit a
|
| 92 |
+
// key/value pair. The key is | tile ID | depth |,
|
| 93 |
+
// and the value is the ID of the Gaussian. Sorting the values
|
| 94 |
+
// with this key yields Gaussian IDs in a list, such that they
|
| 95 |
+
// are first sorted by tile and then by depth.
|
| 96 |
+
for (int y = rect_min.y; y < rect_max.y; y++)
|
| 97 |
+
{
|
| 98 |
+
for (int x = rect_min.x; x < rect_max.x; x++)
|
| 99 |
+
{
|
| 100 |
+
uint64_t key = y * grid.x + x;
|
| 101 |
+
key <<= 32;
|
| 102 |
+
key |= *((uint32_t*)&depths[idx]);
|
| 103 |
+
gaussian_keys_unsorted[off] = key;
|
| 104 |
+
gaussian_values_unsorted[off] = idx;
|
| 105 |
+
off++;
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
// Check keys to see if it is at the start/end of one tile's range in
|
| 112 |
+
// the full sorted list. If yes, write start/end of this tile.
|
| 113 |
+
// Run once per instanced (duplicated) Gaussian ID.
|
| 114 |
+
__global__ void identifyTileRanges(int L, uint64_t* point_list_keys, uint2* ranges)
|
| 115 |
+
{
|
| 116 |
+
auto idx = cg::this_grid().thread_rank();
|
| 117 |
+
if (idx >= L)
|
| 118 |
+
return;
|
| 119 |
+
|
| 120 |
+
// Read tile ID from key. Update start/end of tile range if at limit.
|
| 121 |
+
uint64_t key = point_list_keys[idx];
|
| 122 |
+
uint32_t currtile = key >> 32;
|
| 123 |
+
if (idx == 0)
|
| 124 |
+
ranges[currtile].x = 0;
|
| 125 |
+
else
|
| 126 |
+
{
|
| 127 |
+
uint32_t prevtile = point_list_keys[idx - 1] >> 32;
|
| 128 |
+
if (currtile != prevtile)
|
| 129 |
+
{
|
| 130 |
+
ranges[prevtile].y = idx;
|
| 131 |
+
ranges[currtile].x = idx;
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
if (idx == L - 1)
|
| 135 |
+
ranges[currtile].y = L;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
// Mark Gaussians as visible/invisible, based on view frustum testing
|
| 139 |
+
void CudaRasterizer::Rasterizer::markVisible(
|
| 140 |
+
int P,
|
| 141 |
+
float* means3D,
|
| 142 |
+
float* viewmatrix,
|
| 143 |
+
float* projmatrix,
|
| 144 |
+
bool* present)
|
| 145 |
+
{
|
| 146 |
+
checkFrustum << <(P + 255) / 256, 256 >> > (
|
| 147 |
+
P,
|
| 148 |
+
means3D,
|
| 149 |
+
viewmatrix, projmatrix,
|
| 150 |
+
present);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
CudaRasterizer::GeometryState CudaRasterizer::GeometryState::fromChunk(char*& chunk, size_t P)
|
| 154 |
+
{
|
| 155 |
+
GeometryState geom;
|
| 156 |
+
obtain(chunk, geom.depths, P, 128);
|
| 157 |
+
obtain(chunk, geom.clamped, P * 3, 128);
|
| 158 |
+
obtain(chunk, geom.internal_radii, P, 128);
|
| 159 |
+
obtain(chunk, geom.means2D, P, 128);
|
| 160 |
+
obtain(chunk, geom.cov3D, P * 6, 128);
|
| 161 |
+
obtain(chunk, geom.conic_opacity, P, 128);
|
| 162 |
+
obtain(chunk, geom.rgb, P * 3, 128);
|
| 163 |
+
obtain(chunk, geom.tiles_touched, P, 128);
|
| 164 |
+
cub::DeviceScan::InclusiveSum(nullptr, geom.scan_size, geom.tiles_touched, geom.tiles_touched, P);
|
| 165 |
+
obtain(chunk, geom.scanning_space, geom.scan_size, 128);
|
| 166 |
+
obtain(chunk, geom.point_offsets, P, 128);
|
| 167 |
+
return geom;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
CudaRasterizer::ImageState CudaRasterizer::ImageState::fromChunk(char*& chunk, size_t N)
|
| 171 |
+
{
|
| 172 |
+
ImageState img;
|
| 173 |
+
obtain(chunk, img.accum_alpha, N, 128);
|
| 174 |
+
obtain(chunk, img.n_contrib, N, 128);
|
| 175 |
+
obtain(chunk, img.ranges, N, 128);
|
| 176 |
+
return img;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
CudaRasterizer::BinningState CudaRasterizer::BinningState::fromChunk(char*& chunk, size_t P)
|
| 180 |
+
{
|
| 181 |
+
BinningState binning;
|
| 182 |
+
obtain(chunk, binning.point_list, P, 128);
|
| 183 |
+
obtain(chunk, binning.point_list_unsorted, P, 128);
|
| 184 |
+
obtain(chunk, binning.point_list_keys, P, 128);
|
| 185 |
+
obtain(chunk, binning.point_list_keys_unsorted, P, 128);
|
| 186 |
+
cub::DeviceRadixSort::SortPairs(
|
| 187 |
+
nullptr, binning.sorting_size,
|
| 188 |
+
binning.point_list_keys_unsorted, binning.point_list_keys,
|
| 189 |
+
binning.point_list_unsorted, binning.point_list, P);
|
| 190 |
+
obtain(chunk, binning.list_sorting_space, binning.sorting_size, 128);
|
| 191 |
+
return binning;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
// Forward rendering procedure for differentiable rasterization
|
| 195 |
+
// of Gaussians.
|
| 196 |
+
int CudaRasterizer::Rasterizer::forward(
|
| 197 |
+
std::function<char* (size_t)> geometryBuffer,
|
| 198 |
+
std::function<char* (size_t)> binningBuffer,
|
| 199 |
+
std::function<char* (size_t)> imageBuffer,
|
| 200 |
+
const int P, int D, int M,
|
| 201 |
+
const float* background,
|
| 202 |
+
const int width, int height,
|
| 203 |
+
const float* means3D,
|
| 204 |
+
const float* shs,
|
| 205 |
+
const float* sh_objs,
|
| 206 |
+
const float* colors_precomp,
|
| 207 |
+
const float* opacities,
|
| 208 |
+
const float* scales,
|
| 209 |
+
const float scale_modifier,
|
| 210 |
+
const float* rotations,
|
| 211 |
+
const float* cov3D_precomp,
|
| 212 |
+
const float* alpha,
|
| 213 |
+
const float* viewmatrix,
|
| 214 |
+
const float* projmatrix,
|
| 215 |
+
const float* cam_pos,
|
| 216 |
+
const float tan_fovx, float tan_fovy,
|
| 217 |
+
const bool prefiltered,
|
| 218 |
+
float* out_color,
|
| 219 |
+
float* out_objects,
|
| 220 |
+
float* out_max_trans,
|
| 221 |
+
int* radii,
|
| 222 |
+
bool debug)
|
| 223 |
+
{
|
| 224 |
+
const float focal_y = height / (2.0f * tan_fovy);
|
| 225 |
+
const float focal_x = width / (2.0f * tan_fovx);
|
| 226 |
+
|
| 227 |
+
size_t chunk_size = required<GeometryState>(P);
|
| 228 |
+
char* chunkptr = geometryBuffer(chunk_size);
|
| 229 |
+
GeometryState geomState = GeometryState::fromChunk(chunkptr, P);
|
| 230 |
+
|
| 231 |
+
if (radii == nullptr)
|
| 232 |
+
{
|
| 233 |
+
radii = geomState.internal_radii;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
dim3 tile_grid((width + BLOCK_X - 1) / BLOCK_X, (height + BLOCK_Y - 1) / BLOCK_Y, 1);
|
| 237 |
+
dim3 block(BLOCK_X, BLOCK_Y, 1);
|
| 238 |
+
|
| 239 |
+
// Dynamically resize image-based auxiliary buffers during training
|
| 240 |
+
size_t img_chunk_size = required<ImageState>(width * height);
|
| 241 |
+
char* img_chunkptr = imageBuffer(img_chunk_size);
|
| 242 |
+
ImageState imgState = ImageState::fromChunk(img_chunkptr, width * height);
|
| 243 |
+
|
| 244 |
+
if (NUM_CHANNELS != 3 && colors_precomp == nullptr)
|
| 245 |
+
{
|
| 246 |
+
throw std::runtime_error("For non-RGB, provide precomputed Gaussian colors!");
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
// Run preprocessing per-Gaussian (transformation, bounding, conversion of SHs to RGB)
|
| 250 |
+
CHECK_CUDA(FORWARD::preprocess(
|
| 251 |
+
P, D, M,
|
| 252 |
+
means3D,
|
| 253 |
+
(glm::vec3*)scales,
|
| 254 |
+
scale_modifier,
|
| 255 |
+
(glm::vec4*)rotations,
|
| 256 |
+
opacities,
|
| 257 |
+
shs,
|
| 258 |
+
sh_objs,
|
| 259 |
+
geomState.clamped,
|
| 260 |
+
cov3D_precomp,
|
| 261 |
+
colors_precomp,
|
| 262 |
+
viewmatrix, projmatrix,
|
| 263 |
+
(glm::vec3*)cam_pos,
|
| 264 |
+
width, height,
|
| 265 |
+
focal_x, focal_y,
|
| 266 |
+
tan_fovx, tan_fovy,
|
| 267 |
+
radii,
|
| 268 |
+
geomState.means2D,
|
| 269 |
+
geomState.depths,
|
| 270 |
+
geomState.cov3D,
|
| 271 |
+
geomState.rgb,
|
| 272 |
+
geomState.conic_opacity,
|
| 273 |
+
tile_grid,
|
| 274 |
+
geomState.tiles_touched,
|
| 275 |
+
prefiltered
|
| 276 |
+
), debug)
|
| 277 |
+
|
| 278 |
+
// Compute prefix sum over full list of touched tile counts by Gaussians
|
| 279 |
+
// E.g., [2, 3, 0, 2, 1] -> [2, 5, 5, 7, 8]
|
| 280 |
+
CHECK_CUDA(cub::DeviceScan::InclusiveSum(geomState.scanning_space, geomState.scan_size, geomState.tiles_touched, geomState.point_offsets, P), debug)
|
| 281 |
+
|
| 282 |
+
// Retrieve total number of Gaussian instances to launch and resize aux buffers
|
| 283 |
+
int num_rendered;
|
| 284 |
+
CHECK_CUDA(cudaMemcpy(&num_rendered, geomState.point_offsets + P - 1, sizeof(int), cudaMemcpyDeviceToHost), debug);
|
| 285 |
+
|
| 286 |
+
size_t binning_chunk_size = required<BinningState>(num_rendered);
|
| 287 |
+
char* binning_chunkptr = binningBuffer(binning_chunk_size);
|
| 288 |
+
BinningState binningState = BinningState::fromChunk(binning_chunkptr, num_rendered);
|
| 289 |
+
|
| 290 |
+
// For each instance to be rendered, produce adequate [ tile | depth ] key
|
| 291 |
+
// and corresponding dublicated Gaussian indices to be sorted
|
| 292 |
+
duplicateWithKeys << <(P + 255) / 256, 256 >> > (
|
| 293 |
+
P,
|
| 294 |
+
geomState.means2D,
|
| 295 |
+
geomState.depths,
|
| 296 |
+
geomState.point_offsets,
|
| 297 |
+
binningState.point_list_keys_unsorted,
|
| 298 |
+
binningState.point_list_unsorted,
|
| 299 |
+
radii,
|
| 300 |
+
tile_grid)
|
| 301 |
+
CHECK_CUDA(, debug)
|
| 302 |
+
|
| 303 |
+
int bit = getHigherMsb(tile_grid.x * tile_grid.y);
|
| 304 |
+
|
| 305 |
+
// Sort complete list of (duplicated) Gaussian indices by keys
|
| 306 |
+
CHECK_CUDA(cub::DeviceRadixSort::SortPairs(
|
| 307 |
+
binningState.list_sorting_space,
|
| 308 |
+
binningState.sorting_size,
|
| 309 |
+
binningState.point_list_keys_unsorted, binningState.point_list_keys,
|
| 310 |
+
binningState.point_list_unsorted, binningState.point_list,
|
| 311 |
+
num_rendered, 0, 32 + bit), debug)
|
| 312 |
+
|
| 313 |
+
CHECK_CUDA(cudaMemset(imgState.ranges, 0, tile_grid.x * tile_grid.y * sizeof(uint2)), debug);
|
| 314 |
+
|
| 315 |
+
// Identify start and end of per-tile workloads in sorted list
|
| 316 |
+
if (num_rendered > 0)
|
| 317 |
+
identifyTileRanges << <(num_rendered + 255) / 256, 256 >> > (
|
| 318 |
+
num_rendered,
|
| 319 |
+
binningState.point_list_keys,
|
| 320 |
+
imgState.ranges);
|
| 321 |
+
CHECK_CUDA(, debug)
|
| 322 |
+
|
| 323 |
+
// Let each tile blend its range of Gaussians independently in parallel
|
| 324 |
+
const float* feature_ptr = colors_precomp != nullptr ? colors_precomp : geomState.rgb;
|
| 325 |
+
CHECK_CUDA(FORWARD::render(
|
| 326 |
+
tile_grid, block,
|
| 327 |
+
imgState.ranges,
|
| 328 |
+
binningState.point_list,
|
| 329 |
+
width, height,
|
| 330 |
+
geomState.means2D,
|
| 331 |
+
feature_ptr,
|
| 332 |
+
sh_objs,
|
| 333 |
+
geomState.conic_opacity,
|
| 334 |
+
alpha,
|
| 335 |
+
imgState.accum_alpha,
|
| 336 |
+
imgState.n_contrib,
|
| 337 |
+
background,
|
| 338 |
+
out_color,
|
| 339 |
+
out_objects,
|
| 340 |
+
out_max_trans), debug)
|
| 341 |
+
|
| 342 |
+
return num_rendered;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
// Produce necessary gradients for optimization, corresponding
|
| 346 |
+
// to forward render pass
|
| 347 |
+
void CudaRasterizer::Rasterizer::backward(
|
| 348 |
+
const int P, int D, int M, int R,
|
| 349 |
+
const float* background,
|
| 350 |
+
const int width, int height,
|
| 351 |
+
const float* means3D,
|
| 352 |
+
const float* shs,
|
| 353 |
+
const float* sh_objs,
|
| 354 |
+
const float* colors_precomp,
|
| 355 |
+
const float* scales,
|
| 356 |
+
const float scale_modifier,
|
| 357 |
+
const float* rotations,
|
| 358 |
+
const float* cov3D_precomp,
|
| 359 |
+
const float* viewmatrix,
|
| 360 |
+
const float* projmatrix,
|
| 361 |
+
const float* campos,
|
| 362 |
+
const float tan_fovx, float tan_fovy,
|
| 363 |
+
const int* radii,
|
| 364 |
+
char* geom_buffer,
|
| 365 |
+
char* binning_buffer,
|
| 366 |
+
char* img_buffer,
|
| 367 |
+
const float* dL_dpix,
|
| 368 |
+
const float* dL_dpix_obj,
|
| 369 |
+
float* dL_dmean2D,
|
| 370 |
+
float* dL_dconic,
|
| 371 |
+
float* dL_dopacity,
|
| 372 |
+
float* dL_dcolor,
|
| 373 |
+
float* dL_dobjects,
|
| 374 |
+
float* dL_dmean3D,
|
| 375 |
+
float* dL_dcov3D,
|
| 376 |
+
float* dL_dsh,
|
| 377 |
+
float* dL_dscale,
|
| 378 |
+
float* dL_drot,
|
| 379 |
+
bool debug)
|
| 380 |
+
{
|
| 381 |
+
GeometryState geomState = GeometryState::fromChunk(geom_buffer, P);
|
| 382 |
+
BinningState binningState = BinningState::fromChunk(binning_buffer, R);
|
| 383 |
+
ImageState imgState = ImageState::fromChunk(img_buffer, width * height);
|
| 384 |
+
|
| 385 |
+
if (radii == nullptr)
|
| 386 |
+
{
|
| 387 |
+
radii = geomState.internal_radii;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
const float focal_y = height / (2.0f * tan_fovy);
|
| 391 |
+
const float focal_x = width / (2.0f * tan_fovx);
|
| 392 |
+
|
| 393 |
+
const dim3 tile_grid((width + BLOCK_X - 1) / BLOCK_X, (height + BLOCK_Y - 1) / BLOCK_Y, 1);
|
| 394 |
+
const dim3 block(BLOCK_X, BLOCK_Y, 1);
|
| 395 |
+
|
| 396 |
+
// Compute loss gradients w.r.t. 2D mean position, conic matrix,
|
| 397 |
+
// opacity and RGB of Gaussians from per-pixel loss gradients.
|
| 398 |
+
// If we were given precomputed colors and not SHs, use them.
|
| 399 |
+
const float* color_ptr = (colors_precomp != nullptr) ? colors_precomp : geomState.rgb;
|
| 400 |
+
const float* obj_ptr = sh_objs;
|
| 401 |
+
CHECK_CUDA(BACKWARD::render(
|
| 402 |
+
tile_grid,
|
| 403 |
+
block,
|
| 404 |
+
imgState.ranges,
|
| 405 |
+
binningState.point_list,
|
| 406 |
+
width, height,
|
| 407 |
+
background,
|
| 408 |
+
geomState.means2D,
|
| 409 |
+
geomState.conic_opacity,
|
| 410 |
+
color_ptr,
|
| 411 |
+
obj_ptr,
|
| 412 |
+
imgState.accum_alpha,
|
| 413 |
+
imgState.n_contrib,
|
| 414 |
+
dL_dpix,
|
| 415 |
+
dL_dpix_obj,
|
| 416 |
+
(float3*)dL_dmean2D,
|
| 417 |
+
(float4*)dL_dconic,
|
| 418 |
+
dL_dopacity,
|
| 419 |
+
dL_dcolor,
|
| 420 |
+
dL_dobjects), debug)
|
| 421 |
+
|
| 422 |
+
// Take care of the rest of preprocessing. Was the precomputed covariance
|
| 423 |
+
// given to us or a scales/rot pair? If precomputed, pass that. If not,
|
| 424 |
+
// use the one we computed ourselves.
|
| 425 |
+
const float* cov3D_ptr = (cov3D_precomp != nullptr) ? cov3D_precomp : geomState.cov3D;
|
| 426 |
+
CHECK_CUDA(BACKWARD::preprocess(P, D, M,
|
| 427 |
+
(float3*)means3D,
|
| 428 |
+
radii,
|
| 429 |
+
shs,
|
| 430 |
+
geomState.clamped,
|
| 431 |
+
(glm::vec3*)scales,
|
| 432 |
+
(glm::vec4*)rotations,
|
| 433 |
+
scale_modifier,
|
| 434 |
+
cov3D_ptr,
|
| 435 |
+
viewmatrix,
|
| 436 |
+
projmatrix,
|
| 437 |
+
focal_x, focal_y,
|
| 438 |
+
tan_fovx, tan_fovy,
|
| 439 |
+
(glm::vec3*)campos,
|
| 440 |
+
(float3*)dL_dmean2D,
|
| 441 |
+
dL_dconic,
|
| 442 |
+
(glm::vec3*)dL_dmean3D,
|
| 443 |
+
dL_dcolor,
|
| 444 |
+
dL_dcov3D,
|
| 445 |
+
dL_dsh,
|
| 446 |
+
(glm::vec3*)dL_dscale,
|
| 447 |
+
(glm::vec4*)dL_drot), debug)
|
| 448 |
+
}
|