diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..fc93d7ff417499467193153fdaf9ac2599e7e0df 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.glb filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.ply filter=lfs diff=lfs merge=lfs -text +*.eot filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 1258cdaa3a2f2ed7b1c71972c9e5811c45ec4151..8b6ad6a181e50316c36299ded85337eaffcb7ff8 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,72 @@ --- title: GuideFlow3D -emoji: ๐Ÿ“Š -colorFrom: purple -colorTo: yellow +emoji: ๐Ÿค— +colorFrom: yellow +colorTo: blue +app_file: demos/run_gradio_demo.py sdk: gradio sdk_version: 6.0.1 -app_file: app.py pinned: false license: apache-2.0 +python_version: "3.12" short_description: A HF Space that demonstrates all use-cases for GuideFlow3D --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +

+

GuideFlow3D: Optimization-Guided Rectified Flow For Appearance Transfer

+

+ Sayan Deb Sarkar 1 + . + Sinisa Stekovic 2 + . + Vincent Lepetit 2 + . + Iro Armeni1 +

+

Neural Information Processing Systems (NeurIPS) 2025

+

+ 1 Stanford University ยท 2 ENPC, IP Paris +

+

+ + [![arXiv](https://img.shields.io/badge/arXiv-blue?logo=arxiv&color=%23B31B1B)](https://arxiv.org/abs/2510.16136) + [![ProjectPage](https://img.shields.io/badge/Project_Page-GuideFlow3D-blue)](https://sayands.github.io/guideflow3d) + [![License](https://img.shields.io/badge/License-Apache--2.0-929292)](https://www.apache.org/licenses/LICENSE-2.0) +
+

+ +

+ + + +

+ +

+TL;DR: 3D appearance transfer pipeline robust to strong geometric variations between objects. +
+ +## ๐Ÿ“ƒ Abstract + +Transferring appearance to 3D assets using different representations of the appearance object - such as images or text - has garnered interest due to its wide range of applications in industries like gaming, augmented reality, and digital content creation. However, state-of-the-art methods still fail when the geometry between the input and appearance objects is significantly different. A straightforward approach is to directly apply a 3D generative model, but we show that this ultimately fails to produce appealing results. Instead, we propose a principled approach inspired by universal guidance. Given a pretrained rectified flow model conditioned on image or text, our training-free method interacts with the sampling process by periodically adding guidance. This guidance can be modeled as a differentiable loss function, and we experiment with two different types of guidance including part-aware losses for appearance and self-similarity. Our experiments show that our approach successfully transfers texture and geometric details to the input 3D asset, outperforming baselines both qualitatively and quantitatively. We also show that traditional metrics are not suitable for evaluating the task due to their inability of focusing on local details and comparing dissimilar inputs, in absence of ground truth data. We thus evaluate appearance transfer quality with a GPT-based system objectively ranking outputs, ensuring robust and human-like assessment, as further confirmed by our user study. Beyond showcased scenarios, our method is general and could be extended to different types of diffusion models and guidance functions. + +# :newspaper: News + +- [2025-09] GuideFlow3D is accepted to **NeurIPS 2025** ๐Ÿ”ฅ See you in San Diego! + +## ๐Ÿšง Code Release + +โณ Code and data will be released by the end of November! Stay tuned for updates. + +## ๐Ÿ“ง Contact +If you have any questions regarding this project, please use the github issue tracker or contact Sayan Deb Sarkar (sdsarkar@stanford.edu). + +# :page_facing_up: Citation + +```bibtex +@inproceedings{sayandsarkar_2025_guideflow3d, + author = {Deb Sarkar, Sayan and Stekovic, Sinisa and Lepetit, Vincent and Armeni, Iro}, + title = {GuideFlow3D: Optimization-Guided Rectified Flow For 3D Appearance Transfer}, + booktitle = {Advances in Neural Information Processing Systems (NeurIPS)}, + year = {2025}, +} +``` diff --git a/all_outputs/app_image.png b/all_outputs/app_image.png new file mode 100644 index 0000000000000000000000000000000000000000..b885c66a90e76f0ec3dc8b01f097acde430482ef --- /dev/null +++ b/all_outputs/app_image.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd392925312f8fd4320249ce368fc290d9c13b1e4ec04de2bbc49626402006b9 +size 2818576 diff --git a/all_outputs/app_mesh.glb b/all_outputs/app_mesh.glb new file mode 100644 index 0000000000000000000000000000000000000000..56f2fc0e3595e814b34f12925d9e2d28ae4c7551 --- /dev/null +++ b/all_outputs/app_mesh.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:681dfff877193617153577c980d1dee0064df690a841a58dae5940fc6cbcdac6 +size 55292072 diff --git a/all_outputs/app_mesh.hash b/all_outputs/app_mesh.hash new file mode 100644 index 0000000000000000000000000000000000000000..b0063446848d2999b69b7f60ee833e1bdbb3a90b --- /dev/null +++ b/all_outputs/app_mesh.hash @@ -0,0 +1 @@ +01d9494d0538727f6423c7ceb6f1f8e32185e99f0c3c5086a01114ee2ed7977f \ No newline at end of file diff --git a/all_outputs/app_mesh_zup.glb b/all_outputs/app_mesh_zup.glb new file mode 100644 index 0000000000000000000000000000000000000000..56f2fc0e3595e814b34f12925d9e2d28ae4c7551 --- /dev/null +++ b/all_outputs/app_mesh_zup.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:681dfff877193617153577c980d1dee0064df690a841a58dae5940fc6cbcdac6 +size 55292072 diff --git a/all_outputs/app_renders/chunk00_000.png b/all_outputs/app_renders/chunk00_000.png new file mode 100644 index 0000000000000000000000000000000000000000..d904cac344b22690a0ba00185ead1c619c610376 --- /dev/null +++ b/all_outputs/app_renders/chunk00_000.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34cb40de51f443888037a743c165bfd073b408ad3f7a63ce08c01e8dfe87af2d +size 201138 diff --git a/all_outputs/app_renders/chunk00_001.png b/all_outputs/app_renders/chunk00_001.png new file mode 100644 index 0000000000000000000000000000000000000000..6a2cf7cef36d9f655a4fe6d0539f3631ebd2828d --- /dev/null +++ b/all_outputs/app_renders/chunk00_001.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7146e5f04ebbb0afc1a7d54fd3f18456b35396ddd587fcb950b51c9516d89570 +size 207863 diff --git a/all_outputs/app_renders/chunk00_002.png b/all_outputs/app_renders/chunk00_002.png new file mode 100644 index 0000000000000000000000000000000000000000..bf5e6f20f819b8dbb3a8e84468f58737e7a28faa --- /dev/null +++ b/all_outputs/app_renders/chunk00_002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c7f83fef5a78acee2c7a382d72d47db40bbcaaef4f5789c8105d683b7f28fe5 +size 205156 diff --git a/all_outputs/app_renders/chunk00_003.png b/all_outputs/app_renders/chunk00_003.png new file mode 100644 index 0000000000000000000000000000000000000000..c27cd1d4627771ae93c388eea0876cb5183270f6 --- /dev/null +++ b/all_outputs/app_renders/chunk00_003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a7607adc44cb5c8e8f9a425a9c7b1ec1a628e6a9bca2779250f5fd9e60ee48f +size 207435 diff --git a/all_outputs/app_renders/chunk00_004.png b/all_outputs/app_renders/chunk00_004.png new file mode 100644 index 0000000000000000000000000000000000000000..b77fea8b107fb8ada99cdb332a72d59382ce5477 --- /dev/null +++ b/all_outputs/app_renders/chunk00_004.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:633d57ac4231d930b0f6d7914ff525cfa6143937b3968042b3e1b30cff29d654 +size 208395 diff --git a/all_outputs/app_renders/chunk00_005.png b/all_outputs/app_renders/chunk00_005.png new file mode 100644 index 0000000000000000000000000000000000000000..3a5da6e5cc97f4cbac3c97fd597659e437fff811 --- /dev/null +++ b/all_outputs/app_renders/chunk00_005.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cac114b5a7594df2075c95318369060e7492a51a15e1f607cf57ba7b4799aad2 +size 222557 diff --git a/all_outputs/app_renders/chunk00_006.png b/all_outputs/app_renders/chunk00_006.png new file mode 100644 index 0000000000000000000000000000000000000000..03c14545e81dd00ba31ff9476a3f4b14a23ea464 --- /dev/null +++ b/all_outputs/app_renders/chunk00_006.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfcbb6424747ebcd5adce68ffa08dfec84cfe3528826d77af78f38a2ee62e7b1 +size 217920 diff --git a/all_outputs/app_renders/chunk00_007.png b/all_outputs/app_renders/chunk00_007.png new file mode 100644 index 0000000000000000000000000000000000000000..eb3710cd4915f5938c5de8db84b5eff337a3c27a --- /dev/null +++ b/all_outputs/app_renders/chunk00_007.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92b22c7a869d78b8c1f345f163bf6c1c18dd0ac1df8f769931419546d3e836cc +size 210497 diff --git a/all_outputs/app_renders/chunk00_008.png b/all_outputs/app_renders/chunk00_008.png new file mode 100644 index 0000000000000000000000000000000000000000..235584e4c32323a290b0de9848229f0fb44b3889 --- /dev/null +++ b/all_outputs/app_renders/chunk00_008.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c864e26350744d3f7fa011f33a446ad2ea48fa380c7d84479566d66d301de5 +size 198044 diff --git a/all_outputs/app_renders/chunk00_009.png b/all_outputs/app_renders/chunk00_009.png new file mode 100644 index 0000000000000000000000000000000000000000..0de943e2e237dd3f5263042c012ccdad446256cf --- /dev/null +++ b/all_outputs/app_renders/chunk00_009.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:378c207c0897d3d7adb7010e03264801b0ec35b646493662cd1d8983538feea9 +size 223563 diff --git a/all_outputs/app_renders/chunk00_010.png b/all_outputs/app_renders/chunk00_010.png new file mode 100644 index 0000000000000000000000000000000000000000..de37b8957ef67e9c51b4b4e66f6de285d611853b --- /dev/null +++ b/all_outputs/app_renders/chunk00_010.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59b213c35072193105f960f6aeca8229e3b4a99eb4abd9aa8100152e57405827 +size 209479 diff --git a/all_outputs/app_renders/chunk00_011.png b/all_outputs/app_renders/chunk00_011.png new file mode 100644 index 0000000000000000000000000000000000000000..6c320be0d74e43807724e61bdbe9d7305615fad2 --- /dev/null +++ b/all_outputs/app_renders/chunk00_011.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da1fbb084775c25d18713685608db63044f8f98e86b2ac3c81e33392d7810c48 +size 207180 diff --git a/all_outputs/app_renders/chunk00_012.png b/all_outputs/app_renders/chunk00_012.png new file mode 100644 index 0000000000000000000000000000000000000000..9e595822c698c568c5839975ada863c60a4136a5 --- /dev/null +++ b/all_outputs/app_renders/chunk00_012.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d8088f9605564d3b74e21e94106a5cf94bca1562eec79503456a2f9ac0c7004 +size 210182 diff --git a/all_outputs/app_renders/chunk00_013.png b/all_outputs/app_renders/chunk00_013.png new file mode 100644 index 0000000000000000000000000000000000000000..955323eee64e4a59c05235ea85ed9c3ea05a1744 --- /dev/null +++ b/all_outputs/app_renders/chunk00_013.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c258e3ccaa22eae582831bc4b327a852eecf1593de4e6e6b8c1635bda46dba01 +size 224274 diff --git a/all_outputs/app_renders/chunk00_014.png b/all_outputs/app_renders/chunk00_014.png new file mode 100644 index 0000000000000000000000000000000000000000..fe85bb13e1309ed007d8a7c31ae9c2337a34b533 --- /dev/null +++ b/all_outputs/app_renders/chunk00_014.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02a453548838489f49debfc46dfbd022a7d1f531cf449d5e25a188646cc9245 +size 228090 diff --git a/all_outputs/app_renders/chunk00_015.png b/all_outputs/app_renders/chunk00_015.png new file mode 100644 index 0000000000000000000000000000000000000000..8403fe3b338aa85a0a18849cf6d5dfbb4310e743 --- /dev/null +++ b/all_outputs/app_renders/chunk00_015.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c9925f2a70ab4d0d8b27136adef22f429fffa6d781ae66ffbf3f3432f59615c +size 205663 diff --git a/all_outputs/app_renders/chunk00_016.png b/all_outputs/app_renders/chunk00_016.png new file mode 100644 index 0000000000000000000000000000000000000000..bfb756885108c698981126443e6346588c6e9c33 --- /dev/null +++ b/all_outputs/app_renders/chunk00_016.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a959432216b1eea6309c9b23e6e6b1b2c7dba76307ea8398abc38a0317a9cef8 +size 189210 diff --git a/all_outputs/app_renders/chunk00_017.png b/all_outputs/app_renders/chunk00_017.png new file mode 100644 index 0000000000000000000000000000000000000000..327098d4084f0a6e7c55ff36e030e0cdf1694bfc --- /dev/null +++ b/all_outputs/app_renders/chunk00_017.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:327120abcd219d238440bb7b0630f2544435500c84d6cd4508384c0a4bc370ec +size 221644 diff --git a/all_outputs/app_renders/chunk00_018.png b/all_outputs/app_renders/chunk00_018.png new file mode 100644 index 0000000000000000000000000000000000000000..f4311b001a58490818e282d4f8d6010a4142a92c --- /dev/null +++ b/all_outputs/app_renders/chunk00_018.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bee15694fea8836c90ed1b5694551acacb0045e855321cac89e6363b08d7cc3 +size 199418 diff --git a/all_outputs/app_renders/chunk00_019.png b/all_outputs/app_renders/chunk00_019.png new file mode 100644 index 0000000000000000000000000000000000000000..7ee270cdc69a76afeb0f429bfeff5584a8613b08 --- /dev/null +++ b/all_outputs/app_renders/chunk00_019.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c189b0be9fbfa8065127ceb981f22711d1bd2adc5ff0613e374feeae1f2b24b6 +size 203852 diff --git a/all_outputs/app_renders/chunk00_020.png b/all_outputs/app_renders/chunk00_020.png new file mode 100644 index 0000000000000000000000000000000000000000..d7d3662ac9fbe1507bdedac1f28e93ac967c5b46 --- /dev/null +++ b/all_outputs/app_renders/chunk00_020.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f0240530c3c03143d7fa163c025ffee4397203601a0c5dedbdb152231e95a3 +size 200634 diff --git a/all_outputs/app_renders/chunk00_021.png b/all_outputs/app_renders/chunk00_021.png new file mode 100644 index 0000000000000000000000000000000000000000..58afbbbe2737c089298b5af4bbb493c67c110cc8 --- /dev/null +++ b/all_outputs/app_renders/chunk00_021.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddf61abc5d6cdee35212e4e8c752ac2afba0350932a61e34a45dbce81beaef06 +size 219991 diff --git a/all_outputs/app_renders/chunk00_022.png b/all_outputs/app_renders/chunk00_022.png new file mode 100644 index 0000000000000000000000000000000000000000..5b9df73fa2c04b0d3d74416acff1a15f36615128 --- /dev/null +++ b/all_outputs/app_renders/chunk00_022.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9de1991623020ff44de9b95ab51112e62edfeaf6360dc09c3d86c658f241b5f +size 218713 diff --git a/all_outputs/app_renders/chunk00_023.png b/all_outputs/app_renders/chunk00_023.png new file mode 100644 index 0000000000000000000000000000000000000000..8ddd5242b545feb15c9ec07ccecc5ef608643f0e --- /dev/null +++ b/all_outputs/app_renders/chunk00_023.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39799ad9f49683eb28f70f246f91ea25661ef92e1ffc33aef9ca91bdf659ba50 +size 198734 diff --git a/all_outputs/app_renders/chunk00_024.png b/all_outputs/app_renders/chunk00_024.png new file mode 100644 index 0000000000000000000000000000000000000000..a32585ab65207bfd9a2e1625be2124e9a7294950 --- /dev/null +++ b/all_outputs/app_renders/chunk00_024.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79873b40e8bec1caeec061a33f7d317db64af341e7281f0acbd348369d5da2fc +size 195341 diff --git a/all_outputs/app_renders/chunk00_025.png b/all_outputs/app_renders/chunk00_025.png new file mode 100644 index 0000000000000000000000000000000000000000..922678d59c100b657c7d6716cd7c79774a7449c0 --- /dev/null +++ b/all_outputs/app_renders/chunk00_025.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13687d50261e001b9e8757c8074aa9a81324eafa394a86772d7e52e78235b736 +size 215090 diff --git a/all_outputs/app_renders/chunk00_026.png b/all_outputs/app_renders/chunk00_026.png new file mode 100644 index 0000000000000000000000000000000000000000..591f097f45566482d08d8af0db047dca90190f67 --- /dev/null +++ b/all_outputs/app_renders/chunk00_026.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb791d414c1339c59fd29b0d8a69747b3e2c4a6f704c878229e46c5f7162e3d +size 204752 diff --git a/all_outputs/app_renders/chunk00_027.png b/all_outputs/app_renders/chunk00_027.png new file mode 100644 index 0000000000000000000000000000000000000000..b216e38a0bda98a93614ca8aa8bb17309647f8a5 --- /dev/null +++ b/all_outputs/app_renders/chunk00_027.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0d0a96da28db7333877d917e9abd7d32810e7691086da2913f2dada2a9ec55 +size 195915 diff --git a/all_outputs/app_renders/chunk00_028.png b/all_outputs/app_renders/chunk00_028.png new file mode 100644 index 0000000000000000000000000000000000000000..6747f19734740d5087b6f4fe34c803d57aa51150 --- /dev/null +++ b/all_outputs/app_renders/chunk00_028.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd9128626136afdd5ec1a12288a8747c31cbe86867da1896e9c2593fd2ef264 +size 195182 diff --git a/all_outputs/app_renders/chunk00_029.png b/all_outputs/app_renders/chunk00_029.png new file mode 100644 index 0000000000000000000000000000000000000000..da686d2408555d3178b4063dcdddd4869e523ca2 --- /dev/null +++ b/all_outputs/app_renders/chunk00_029.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3efe22eeedadfc033d4d03589a4f69ed2cf4bd61f1d8fd84c1b25a6e1b45020e +size 206244 diff --git a/all_outputs/app_renders/chunk00_030.png b/all_outputs/app_renders/chunk00_030.png new file mode 100644 index 0000000000000000000000000000000000000000..e0bc3ddc7c5e45478373102dbaab54ec110f412c --- /dev/null +++ b/all_outputs/app_renders/chunk00_030.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f1dcad5201bd0fdcd708ba1d8df13a03d9f1dc4ad082b0d3a13458f4cb02d6c +size 212872 diff --git a/all_outputs/app_renders/chunk00_031.png b/all_outputs/app_renders/chunk00_031.png new file mode 100644 index 0000000000000000000000000000000000000000..7e091bc8de6db22afc541a39afa6d73de15c4d26 --- /dev/null +++ b/all_outputs/app_renders/chunk00_031.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37a321c2cf34c8bfe7aeec5fc54cd8b8f32d9ecda18546d60e823a845cedb58 +size 206451 diff --git a/all_outputs/app_renders/chunk00_032.png b/all_outputs/app_renders/chunk00_032.png new file mode 100644 index 0000000000000000000000000000000000000000..93182d6bd1e471fb06d8cfdc367c64da92d8cc57 --- /dev/null +++ b/all_outputs/app_renders/chunk00_032.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81fec5959d2189cde399387875e4920807239ee4113f7726110064b31107bc4d +size 204585 diff --git a/all_outputs/app_renders/chunk00_033.png b/all_outputs/app_renders/chunk00_033.png new file mode 100644 index 0000000000000000000000000000000000000000..a0758e9f404443a049ac619bed0568bb3133c990 --- /dev/null +++ b/all_outputs/app_renders/chunk00_033.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0e0788b880e4a0f94533b3215e463bb8263442c6f43ee29199bbaa5a3c7655b +size 210402 diff --git a/all_outputs/app_renders/chunk00_034.png b/all_outputs/app_renders/chunk00_034.png new file mode 100644 index 0000000000000000000000000000000000000000..427e7593689191c4a18ec8c673c4140a0ee07bf2 --- /dev/null +++ b/all_outputs/app_renders/chunk00_034.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:869f52bafbdf349ae789764722826db47b0414d4fd4ef4f5fd320da4f8b1256e +size 194898 diff --git a/all_outputs/app_renders/chunk00_035.png b/all_outputs/app_renders/chunk00_035.png new file mode 100644 index 0000000000000000000000000000000000000000..3f3a334b0ac2c818c0e82206b1e6fdc2e2e28e8a --- /dev/null +++ b/all_outputs/app_renders/chunk00_035.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51233a49c80f4ace3d3b198267b0e25e7eeebf7b855eee11d69b46dd5017fcb9 +size 196124 diff --git a/all_outputs/app_renders/chunk00_036.png b/all_outputs/app_renders/chunk00_036.png new file mode 100644 index 0000000000000000000000000000000000000000..d62f27d345988d63ab62a404674842cbe2ea1c4c --- /dev/null +++ b/all_outputs/app_renders/chunk00_036.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f91c60fadde5447b81d7270b9f0dbb328b4a7761e2acdd7f57a93bbe5b0bed7 +size 210453 diff --git a/all_outputs/app_renders/chunk00_037.png b/all_outputs/app_renders/chunk00_037.png new file mode 100644 index 0000000000000000000000000000000000000000..443e1a3cffbe0af447b134b20ce32b30104f9f75 --- /dev/null +++ b/all_outputs/app_renders/chunk00_037.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d47e683067cc3b631bca7d8cc7144d64ba6e97dc2fbbdbf77865ffd31eb9c07 +size 213630 diff --git a/all_outputs/app_renders/chunk00_038.png b/all_outputs/app_renders/chunk00_038.png new file mode 100644 index 0000000000000000000000000000000000000000..301b4cf23e8f8ac810e3aba0336bebe7812429c8 --- /dev/null +++ b/all_outputs/app_renders/chunk00_038.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf3c1a7c07321205e53b52a32ae2c5c3715718b6626e8d280fdd9e4a7daba3fb +size 209165 diff --git a/all_outputs/app_renders/chunk00_039.png b/all_outputs/app_renders/chunk00_039.png new file mode 100644 index 0000000000000000000000000000000000000000..8594096a4db134be1c70257d4d9a1226f8b3407b --- /dev/null +++ b/all_outputs/app_renders/chunk00_039.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3ae9dab9875b3204544ef4a4d64516e94998815919d5532a5131eb4313ba7b +size 214490 diff --git a/all_outputs/app_renders/chunk00_040.png b/all_outputs/app_renders/chunk00_040.png new file mode 100644 index 0000000000000000000000000000000000000000..0e9ee915639cd9d5fa65361732a91fb2a53878be --- /dev/null +++ b/all_outputs/app_renders/chunk00_040.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea9d60aa2e6d9c8f1d67e69b54fc8a4cbec0c929921709efb74a28fdb0c74bed +size 215393 diff --git a/all_outputs/app_renders/chunk00_041.png b/all_outputs/app_renders/chunk00_041.png new file mode 100644 index 0000000000000000000000000000000000000000..92d584a70d17f128c99f93ac1ae703be14e67db2 --- /dev/null +++ b/all_outputs/app_renders/chunk00_041.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac9e035af50d531bd8df6755a7f1781fedfa8edd20ae3e9f56a938fd9395ed60 +size 212573 diff --git a/all_outputs/app_renders/chunk00_042.png b/all_outputs/app_renders/chunk00_042.png new file mode 100644 index 0000000000000000000000000000000000000000..4b37a27835f7b2e883d7dbffdf6b087158b623fe --- /dev/null +++ b/all_outputs/app_renders/chunk00_042.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7efeef2970e9768cb5f5aa7988b2cbaafe57387f442706ce1b00dc5c083c7b06 +size 200215 diff --git a/all_outputs/app_renders/chunk00_043.png b/all_outputs/app_renders/chunk00_043.png new file mode 100644 index 0000000000000000000000000000000000000000..604086d646357c781849223a0e4ebc8a3d371393 --- /dev/null +++ b/all_outputs/app_renders/chunk00_043.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:830c75a664efb400b594006f5fbbf0b6888a16256999f3aab6beff416160f94c +size 206780 diff --git a/all_outputs/app_renders/chunk00_044.png b/all_outputs/app_renders/chunk00_044.png new file mode 100644 index 0000000000000000000000000000000000000000..d9d9639464edf4a819efc4603e8a394ec50b537f --- /dev/null +++ b/all_outputs/app_renders/chunk00_044.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d9b4510fc337ce1b5378636be035a43e80662cb26db32335e1e2b53f0becb0 +size 212244 diff --git a/all_outputs/app_renders/chunk00_045.png b/all_outputs/app_renders/chunk00_045.png new file mode 100644 index 0000000000000000000000000000000000000000..1a0f786f0fc0f380a10c5d38712a01e106bf86eb --- /dev/null +++ b/all_outputs/app_renders/chunk00_045.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cb35498537ee674a23cfb175cc071fc93a634adb59603d70a53bcacd3f6524 +size 207507 diff --git a/all_outputs/app_renders/chunk00_046.png b/all_outputs/app_renders/chunk00_046.png new file mode 100644 index 0000000000000000000000000000000000000000..7d328841107b7b2a91f87868301ba36964a1e5fe --- /dev/null +++ b/all_outputs/app_renders/chunk00_046.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9de856c5e77503cc5453062c96ee6cc58c903ccc3301a7d0adbe0378bb82c315 +size 212504 diff --git a/all_outputs/app_renders/chunk00_047.png b/all_outputs/app_renders/chunk00_047.png new file mode 100644 index 0000000000000000000000000000000000000000..2973ec4721360274c348ef27dcf826c54e5df2b7 --- /dev/null +++ b/all_outputs/app_renders/chunk00_047.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d3487901f61d3d06b4d2da1792b9965e7cceb9b0d09b977e786ab953ae9f59 +size 222667 diff --git a/all_outputs/app_renders/chunk00_048.png b/all_outputs/app_renders/chunk00_048.png new file mode 100644 index 0000000000000000000000000000000000000000..fd6fb68e271ee37a36d8f026a3b3807228543618 --- /dev/null +++ b/all_outputs/app_renders/chunk00_048.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:645a51706ad50e5f47d812c0758f6f5cccc6bc056c984a93c24b8fde7a000cd7 +size 219773 diff --git a/all_outputs/app_renders/chunk00_049.png b/all_outputs/app_renders/chunk00_049.png new file mode 100644 index 0000000000000000000000000000000000000000..33f020aa9afff6aa783e443328d56ca9154212c5 --- /dev/null +++ b/all_outputs/app_renders/chunk00_049.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fa78a97a2436554c549188e146a89d695b9ee7c7750c8ef0afd43b261c8d3fc +size 211883 diff --git a/all_outputs/app_renders/chunk01_000.png b/all_outputs/app_renders/chunk01_000.png new file mode 100644 index 0000000000000000000000000000000000000000..7fd6a36b3686d6ec123d4cac7dfc69f4555c0975 --- /dev/null +++ b/all_outputs/app_renders/chunk01_000.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1efe11683e20f32754f16b6252ea08c239428d06c974006057c0a21bb5bfa33a +size 196235 diff --git a/all_outputs/app_renders/chunk01_001.png b/all_outputs/app_renders/chunk01_001.png new file mode 100644 index 0000000000000000000000000000000000000000..4b9447a5444c9eb31481548fb8aa4cdefa45e7b5 --- /dev/null +++ b/all_outputs/app_renders/chunk01_001.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8985de91461f2f9d5e9b24e95655252a04853f077b8fd7778a8fff110606e172 +size 203889 diff --git a/all_outputs/app_renders/chunk01_002.png b/all_outputs/app_renders/chunk01_002.png new file mode 100644 index 0000000000000000000000000000000000000000..d2cf2be6ac4eeb0e2575c815544ec42dabff7227 --- /dev/null +++ b/all_outputs/app_renders/chunk01_002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0dd4be62bf48d5316000ac2de5d73ab036c4058a48e3fb2ae08737ea6c398d +size 221342 diff --git a/all_outputs/app_renders/chunk01_003.png b/all_outputs/app_renders/chunk01_003.png new file mode 100644 index 0000000000000000000000000000000000000000..dfdc6e2b35497ca027b60fb8216f5b845a8010b6 --- /dev/null +++ b/all_outputs/app_renders/chunk01_003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fa1e7cef40ee4f576525a8787aa5aebafa8e527bfceddf34549097d25aed076 +size 211764 diff --git a/all_outputs/app_renders/chunk01_004.png b/all_outputs/app_renders/chunk01_004.png new file mode 100644 index 0000000000000000000000000000000000000000..c92bef2c67c13476dea310ac3445c5babec1be18 --- /dev/null +++ b/all_outputs/app_renders/chunk01_004.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8eabfc5008ce730b402ea004661d34b540c9973c52ee77942eb52580ee4f8d9 +size 211581 diff --git a/all_outputs/app_renders/chunk01_005.png b/all_outputs/app_renders/chunk01_005.png new file mode 100644 index 0000000000000000000000000000000000000000..51c358bbf88512318da0bd19209bfcf7a025aa48 --- /dev/null +++ b/all_outputs/app_renders/chunk01_005.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:032fff3d9d23bf1d64c54ef779c479cb90d4b509edabf4da56eb5caa662cf5a3 +size 228169 diff --git a/all_outputs/app_renders/chunk01_006.png b/all_outputs/app_renders/chunk01_006.png new file mode 100644 index 0000000000000000000000000000000000000000..41a2b59faf0c6e545fbf809808f2196acdee6837 --- /dev/null +++ b/all_outputs/app_renders/chunk01_006.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d54ad4450921f5ee2aea33ba48c3496d4f4cc5e986bee2989d29f87ee61a46d8 +size 225910 diff --git a/all_outputs/app_renders/chunk01_007.png b/all_outputs/app_renders/chunk01_007.png new file mode 100644 index 0000000000000000000000000000000000000000..147e90290fc3d02f2da35ca8678873e183db3d04 --- /dev/null +++ b/all_outputs/app_renders/chunk01_007.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbcf6ebdce0e77e443e1174be60a7d7b6c83df9557d1a6a510ff564d2a8ba96a +size 212989 diff --git a/all_outputs/app_renders/chunk01_008.png b/all_outputs/app_renders/chunk01_008.png new file mode 100644 index 0000000000000000000000000000000000000000..c928e30bc178b7df3cf4772e72720fbc39034016 --- /dev/null +++ b/all_outputs/app_renders/chunk01_008.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:160e77dc151c34b3b3a75cf4f8ececb0f567389fdf3641bd58f5ac2ed5a7ba52 +size 204827 diff --git a/all_outputs/app_renders/chunk01_009.png b/all_outputs/app_renders/chunk01_009.png new file mode 100644 index 0000000000000000000000000000000000000000..7e24f376d025e9efbbaf3c21e501b328ed904437 --- /dev/null +++ b/all_outputs/app_renders/chunk01_009.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89001611c26fdc3437b59ee8888512ea10bfbdf070f2a4dedeb327fb7c14e79 +size 224504 diff --git a/all_outputs/app_renders/chunk01_010.png b/all_outputs/app_renders/chunk01_010.png new file mode 100644 index 0000000000000000000000000000000000000000..41af51734e5097ab9010f5f5d196c2bca8dfe754 --- /dev/null +++ b/all_outputs/app_renders/chunk01_010.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef9d53bf3e1019b084671528ee21baee77a088b1db73b1b2258c32e2eea0af38 +size 216468 diff --git a/all_outputs/app_renders/chunk01_011.png b/all_outputs/app_renders/chunk01_011.png new file mode 100644 index 0000000000000000000000000000000000000000..43d189d432494ae44dd139a7feb9819a948462a1 --- /dev/null +++ b/all_outputs/app_renders/chunk01_011.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2ff07821fbd1f697c58f8220c9b803ad549fdd1ba0317355bcc76dd545881cc +size 203794 diff --git a/all_outputs/app_renders/chunk01_012.png b/all_outputs/app_renders/chunk01_012.png new file mode 100644 index 0000000000000000000000000000000000000000..1617a0d08521d407657addcc7121a9cd28765554 --- /dev/null +++ b/all_outputs/app_renders/chunk01_012.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4531b3315024c853bbe86b040c583fbaf439d5096d1a30875c83e8f5014d9002 +size 211147 diff --git a/all_outputs/app_renders/chunk01_013.png b/all_outputs/app_renders/chunk01_013.png new file mode 100644 index 0000000000000000000000000000000000000000..d4e9ab1cc127da61b5a791dff38d02480896618a --- /dev/null +++ b/all_outputs/app_renders/chunk01_013.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0b3a01507ebd68beb27c06a8730831732a73bd8c996b8df0537318748287f26 +size 231589 diff --git a/all_outputs/app_renders/chunk01_014.png b/all_outputs/app_renders/chunk01_014.png new file mode 100644 index 0000000000000000000000000000000000000000..0bfb0441511af7b16967289861b8b8a67052c5da --- /dev/null +++ b/all_outputs/app_renders/chunk01_014.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f3185aa7cb670b7ea33fa52157902da6ea43e553685fbaa7840f6a56f6978c8 +size 230648 diff --git a/all_outputs/app_renders/chunk01_015.png b/all_outputs/app_renders/chunk01_015.png new file mode 100644 index 0000000000000000000000000000000000000000..3f1f40efd3b60abe981d48079bedda3ea5657c54 --- /dev/null +++ b/all_outputs/app_renders/chunk01_015.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f0eac71c644915be4bd78b18cc430da9b7229e1fadbae6ef497fdc83b9d354 +size 211286 diff --git a/all_outputs/app_renders/chunk01_016.png b/all_outputs/app_renders/chunk01_016.png new file mode 100644 index 0000000000000000000000000000000000000000..d02aa18386e6442a13274662303bf4b76a373320 --- /dev/null +++ b/all_outputs/app_renders/chunk01_016.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44475e7d8f0b2e1b033e768b149fb32a18e5895d5574943ec28ee56a8938aa0b +size 214052 diff --git a/all_outputs/app_renders/chunk01_017.png b/all_outputs/app_renders/chunk01_017.png new file mode 100644 index 0000000000000000000000000000000000000000..14fd8f85bce5110d44cf69019346b561fb8f30a4 --- /dev/null +++ b/all_outputs/app_renders/chunk01_017.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4517fd21d4741225cf8abd62a24930cd469400a310483cb3bba19fbf1905d6e +size 205340 diff --git a/all_outputs/app_renders/chunk01_018.png b/all_outputs/app_renders/chunk01_018.png new file mode 100644 index 0000000000000000000000000000000000000000..f252e7b50078faafd37a7a5267f8f390875b1abc --- /dev/null +++ b/all_outputs/app_renders/chunk01_018.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:354bba5e1ad1eff21d16e66afc89d736db135ad7fc08669251676afc6c0f6740 +size 234827 diff --git a/all_outputs/app_renders/chunk01_019.png b/all_outputs/app_renders/chunk01_019.png new file mode 100644 index 0000000000000000000000000000000000000000..b57d35454a822a5e4429d7e173963c73aa9a359a --- /dev/null +++ b/all_outputs/app_renders/chunk01_019.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bb9496cf8aba2bd47246fe96e5390f687911018283f7cf8d2f56df554d2f13a +size 211691 diff --git a/all_outputs/app_renders/chunk01_020.png b/all_outputs/app_renders/chunk01_020.png new file mode 100644 index 0000000000000000000000000000000000000000..21d41f085457ca5973614f5e4c4cfc0f05eb2e13 --- /dev/null +++ b/all_outputs/app_renders/chunk01_020.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fe3a4aeb3d1d6cf02b45bd88f5f55556d9b35b09ca4be4bad8e7c8d75d632da +size 204134 diff --git a/all_outputs/app_renders/chunk01_021.png b/all_outputs/app_renders/chunk01_021.png new file mode 100644 index 0000000000000000000000000000000000000000..9984955454caa1a9401d79892fe328b139748242 --- /dev/null +++ b/all_outputs/app_renders/chunk01_021.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23218c5190994b6e203f6343eedadc61a1fd5d7a6e3c1a561f10e19f9efb4e63 +size 237800 diff --git a/all_outputs/app_renders/chunk01_022.png b/all_outputs/app_renders/chunk01_022.png new file mode 100644 index 0000000000000000000000000000000000000000..713e0286cdf062467e65b3cc815d1aa524ed3325 --- /dev/null +++ b/all_outputs/app_renders/chunk01_022.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d336719b05b3638bccf403794f32bf23b690d59877a19ac923b4930d296e59ae +size 235066 diff --git a/all_outputs/app_renders/chunk01_023.png b/all_outputs/app_renders/chunk01_023.png new file mode 100644 index 0000000000000000000000000000000000000000..9c12f7b49c3be145a07819a61e3d1bb877334387 --- /dev/null +++ b/all_outputs/app_renders/chunk01_023.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a4b6d2eca2a42fc6dc3e184a103ca1df857fcb87e6a1165b92cb8f8c472ce9 +size 209950 diff --git a/all_outputs/app_renders/chunk01_024.png b/all_outputs/app_renders/chunk01_024.png new file mode 100644 index 0000000000000000000000000000000000000000..f5b559b76f491f90b36012317b36d7c2e179e1d6 --- /dev/null +++ b/all_outputs/app_renders/chunk01_024.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a41c852b0ba9ace3f67edcf0e84fd20c4ad1c94f3b821102d45f5fd8e9a5ce7 +size 206585 diff --git a/all_outputs/app_renders/chunk01_025.png b/all_outputs/app_renders/chunk01_025.png new file mode 100644 index 0000000000000000000000000000000000000000..75d5f35f0449be9235e926eeb29865cb2bc2f150 --- /dev/null +++ b/all_outputs/app_renders/chunk01_025.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27435c695875f8574aef5085ae646c782396bcc8ec5bc31bddb1c2f02fca068b +size 224428 diff --git a/all_outputs/app_renders/chunk01_026.png b/all_outputs/app_renders/chunk01_026.png new file mode 100644 index 0000000000000000000000000000000000000000..a46a51d034d564c2ada1db5462343d65675d07f6 --- /dev/null +++ b/all_outputs/app_renders/chunk01_026.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb96e5e986c5258bf48e79ba7162f336b54562e7237871204cd174cbffa804bd +size 232818 diff --git a/all_outputs/app_renders/chunk01_027.png b/all_outputs/app_renders/chunk01_027.png new file mode 100644 index 0000000000000000000000000000000000000000..859674ffaad9f55c43fbe8801172e149b0ab5b8b --- /dev/null +++ b/all_outputs/app_renders/chunk01_027.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a0c997bff777df245ad6789ff197a9b4ff782b1e0151ea55dc6b450397d2e5 +size 206989 diff --git a/all_outputs/app_renders/chunk01_028.png b/all_outputs/app_renders/chunk01_028.png new file mode 100644 index 0000000000000000000000000000000000000000..dab593dffb2c0910621c792ef63916a82fd7a5a8 --- /dev/null +++ b/all_outputs/app_renders/chunk01_028.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0bf402998c7cd4123afa6b13110fc09b99d813826a31e4e3a64131a896241de +size 209800 diff --git a/all_outputs/app_renders/chunk01_029.png b/all_outputs/app_renders/chunk01_029.png new file mode 100644 index 0000000000000000000000000000000000000000..09077d6a6aa2df938f309d6cd2039813f83fdf81 --- /dev/null +++ b/all_outputs/app_renders/chunk01_029.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9166a56e864123bfe1985f4d4be1648140f2545bb9e38954b196723a786a50da +size 244971 diff --git a/all_outputs/app_renders/chunk01_030.png b/all_outputs/app_renders/chunk01_030.png new file mode 100644 index 0000000000000000000000000000000000000000..46e8b0432c545bea38ec621a54cb05a9ecc5adc2 --- /dev/null +++ b/all_outputs/app_renders/chunk01_030.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f9ee949889eec1d8f6b3a2341645bf6f8e88cc73e010739a7c8ea12c36331b9 +size 240192 diff --git a/all_outputs/app_renders/chunk01_031.png b/all_outputs/app_renders/chunk01_031.png new file mode 100644 index 0000000000000000000000000000000000000000..9a0fca267feeae99721fdbb3d1fd84747c2fd285 --- /dev/null +++ b/all_outputs/app_renders/chunk01_031.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50127c7d15fd6a8c4e0f4ed9a0b1226493f738fdf876c8421c3e8483fe9e6931 +size 207640 diff --git a/all_outputs/app_renders/chunk01_032.png b/all_outputs/app_renders/chunk01_032.png new file mode 100644 index 0000000000000000000000000000000000000000..eaa6d0fd6982646ed354e5c953b361412e88a079 --- /dev/null +++ b/all_outputs/app_renders/chunk01_032.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc2654002586de8fec634a9fd502c9fc3563084c9c8b7aa0913c2a05120f3bc7 +size 214049 diff --git a/all_outputs/app_renders/chunk01_033.png b/all_outputs/app_renders/chunk01_033.png new file mode 100644 index 0000000000000000000000000000000000000000..cebc2058a099381f3880559d914460447f226a66 --- /dev/null +++ b/all_outputs/app_renders/chunk01_033.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbee2bbc170870d1a933d0ab70a2c77606556eca7d2394624caf67925818e322 +size 217712 diff --git a/all_outputs/app_renders/chunk01_034.png b/all_outputs/app_renders/chunk01_034.png new file mode 100644 index 0000000000000000000000000000000000000000..e6d1697a9d39963bbeff212e66c25d363f37b514 --- /dev/null +++ b/all_outputs/app_renders/chunk01_034.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:551a8bb6aa3c990ee67291d38e9167c043846875beed956acdc4e2017a1cb33e +size 240249 diff --git a/all_outputs/app_renders/chunk01_035.png b/all_outputs/app_renders/chunk01_035.png new file mode 100644 index 0000000000000000000000000000000000000000..3d97f0a80a266f0771fe3bb9bfdf8f4846447f1d --- /dev/null +++ b/all_outputs/app_renders/chunk01_035.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad1896f2767ec440c7148fb50a8592a1ba5476c5c86f369e27b1a52b3e01dab +size 207561 diff --git a/all_outputs/app_renders/chunk01_036.png b/all_outputs/app_renders/chunk01_036.png new file mode 100644 index 0000000000000000000000000000000000000000..3915aef646de16316322c8a766ecf31dc798f5fb --- /dev/null +++ b/all_outputs/app_renders/chunk01_036.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:692d8d82321030dbfcd06b6a6bec457955c2df196926e9d777d3550dc9b5431b +size 206633 diff --git a/all_outputs/app_renders/chunk01_037.png b/all_outputs/app_renders/chunk01_037.png new file mode 100644 index 0000000000000000000000000000000000000000..9ce536f387b54af80a180faf6a92c1b6b2c03fec --- /dev/null +++ b/all_outputs/app_renders/chunk01_037.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdbf69ff1659eca395312aa62bf7c5f77a20c583d4ef902ef3476c2a14f62246 +size 247972 diff --git a/all_outputs/app_renders/chunk01_038.png b/all_outputs/app_renders/chunk01_038.png new file mode 100644 index 0000000000000000000000000000000000000000..574759514842b38397a71356558a92a502c146dc --- /dev/null +++ b/all_outputs/app_renders/chunk01_038.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:824a1d5bec7cadfafe4a78d1d36f70cb4f51009b4313fcab47d5a148d58a8f24 +size 244879 diff --git a/all_outputs/app_renders/chunk01_039.png b/all_outputs/app_renders/chunk01_039.png new file mode 100644 index 0000000000000000000000000000000000000000..ef9562481957dc05f2f4b8b862784f1d0fa1fe11 --- /dev/null +++ b/all_outputs/app_renders/chunk01_039.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5166614e7ddb890226d96678c607e7d193a36b5dd0a8681b173d5566d87994d6 +size 205800 diff --git a/all_outputs/app_renders/chunk01_040.png b/all_outputs/app_renders/chunk01_040.png new file mode 100644 index 0000000000000000000000000000000000000000..b840a73867d740035eadc7b919b6d77b99ae5d10 --- /dev/null +++ b/all_outputs/app_renders/chunk01_040.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5534d4ef9fe4d7819d064a0db7cbbf7c846eddd4d4103eede5eebd27a0eec51e +size 207183 diff --git a/all_outputs/app_renders/chunk01_041.png b/all_outputs/app_renders/chunk01_041.png new file mode 100644 index 0000000000000000000000000000000000000000..ad42871c96e25ba2116455797bc2c0f981b7789e --- /dev/null +++ b/all_outputs/app_renders/chunk01_041.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d145374ce809d21add4010393fcc57d3178067c27f0638ff39b1e9a42806966 +size 238878 diff --git a/all_outputs/app_renders/chunk01_042.png b/all_outputs/app_renders/chunk01_042.png new file mode 100644 index 0000000000000000000000000000000000000000..6a1e4ff3ac0190531713fa7d323249e899064679 --- /dev/null +++ b/all_outputs/app_renders/chunk01_042.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e06f1f84678cafd01d820f7770c3452c8072610934a446f75b5b8e0730a29e8d +size 233392 diff --git a/all_outputs/app_renders/chunk01_043.png b/all_outputs/app_renders/chunk01_043.png new file mode 100644 index 0000000000000000000000000000000000000000..1935da6010bcdd1bab715e62214d5b0e94776e5b --- /dev/null +++ b/all_outputs/app_renders/chunk01_043.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef1320b4b13c2c1a984ca0b2b0865a67f8ce54970a8db5789c5f6438432fe76 +size 209567 diff --git a/all_outputs/app_renders/chunk01_044.png b/all_outputs/app_renders/chunk01_044.png new file mode 100644 index 0000000000000000000000000000000000000000..172d7d0eae1ba672f275d02205be7e6dd362bd50 --- /dev/null +++ b/all_outputs/app_renders/chunk01_044.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2010e767750c2b64d1e4d55e11cce95ae939069958b6801d9082a8071654c87 +size 202176 diff --git a/all_outputs/app_renders/chunk01_045.png b/all_outputs/app_renders/chunk01_045.png new file mode 100644 index 0000000000000000000000000000000000000000..03e078263e5e3dd94679e078e1d430664191ba65 --- /dev/null +++ b/all_outputs/app_renders/chunk01_045.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28b1c0510a4ca7b1c19c9a821003bb6b40947f515c090a06de043a89845597f9 +size 251012 diff --git a/all_outputs/app_renders/chunk01_046.png b/all_outputs/app_renders/chunk01_046.png new file mode 100644 index 0000000000000000000000000000000000000000..ff125e5f060a0285b44df07bfa006d86db232e3a --- /dev/null +++ b/all_outputs/app_renders/chunk01_046.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a921de45076910fbba6ca3936af93c5273d80bc14887385e5e948c387d547544 +size 246381 diff --git a/all_outputs/app_renders/chunk01_047.png b/all_outputs/app_renders/chunk01_047.png new file mode 100644 index 0000000000000000000000000000000000000000..e1379f55c79c041cfe8f229295e4a03a18a6208c --- /dev/null +++ b/all_outputs/app_renders/chunk01_047.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca2b6945c250e915a1eb42430e5b7c17b48b17981d34c1a245649121253b60d5 +size 198675 diff --git a/all_outputs/app_renders/chunk01_048.png b/all_outputs/app_renders/chunk01_048.png new file mode 100644 index 0000000000000000000000000000000000000000..6a968c4b3fe9253be87785036a920015b9609a19 --- /dev/null +++ b/all_outputs/app_renders/chunk01_048.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf5f4655f950a29d0e8266541e48fcf9d7e48cbe244eed6746eae40750d72f88 +size 220356 diff --git a/all_outputs/app_renders/chunk01_049.png b/all_outputs/app_renders/chunk01_049.png new file mode 100644 index 0000000000000000000000000000000000000000..0d921ab1eafe8456347e7ac1fb8f8e513f06e23c --- /dev/null +++ b/all_outputs/app_renders/chunk01_049.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15f53fac8ada5f861a8077e1e89797bab918448daa473ddbbb6324bcb673310e +size 220029 diff --git a/all_outputs/app_renders/chunk02_000.png b/all_outputs/app_renders/chunk02_000.png new file mode 100644 index 0000000000000000000000000000000000000000..e72abbd96c8877806664daf0f21158c2d88a3572 --- /dev/null +++ b/all_outputs/app_renders/chunk02_000.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69afac603b21f332abd6a4fb859cb465ca2c0875bb6cde8d90ea2837bc4e00c0 +size 245526 diff --git a/all_outputs/app_renders/chunk02_001.png b/all_outputs/app_renders/chunk02_001.png new file mode 100644 index 0000000000000000000000000000000000000000..ad06f0b2d51d715dd0353fd6ff2e223a3cfaa334 --- /dev/null +++ b/all_outputs/app_renders/chunk02_001.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07f34dd0decba611fa314bade0639d40c353f6cf820bf35c46c1c7451eff12c7 +size 201164 diff --git a/all_outputs/app_renders/chunk02_002.png b/all_outputs/app_renders/chunk02_002.png new file mode 100644 index 0000000000000000000000000000000000000000..1ea44a221f9a3de438721680b256a901db5e3121 --- /dev/null +++ b/all_outputs/app_renders/chunk02_002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49d48d04672cc1d6bc177c2ddcf2ce90b8e814d2369ecccca467e2be128b7e14 +size 203646 diff --git a/all_outputs/app_renders/chunk02_003.png b/all_outputs/app_renders/chunk02_003.png new file mode 100644 index 0000000000000000000000000000000000000000..35db292c79e2e5424cbf80e2a079f7017b2c95dd --- /dev/null +++ b/all_outputs/app_renders/chunk02_003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3947109958cc9497b9f1aab98a580a99071041be5fe5eec8c7074e5d8e76209 +size 244974 diff --git a/all_outputs/app_renders/chunk02_004.png b/all_outputs/app_renders/chunk02_004.png new file mode 100644 index 0000000000000000000000000000000000000000..bc7219d10f422825f09f92d70d507505760ee9e7 --- /dev/null +++ b/all_outputs/app_renders/chunk02_004.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4129eac74bc2a5f63c590a2acdadbe3af0084c118a36701b1062668f30a93eaf +size 244757 diff --git a/all_outputs/app_renders/chunk02_005.png b/all_outputs/app_renders/chunk02_005.png new file mode 100644 index 0000000000000000000000000000000000000000..eb1a67625489597ed6cee1592473045a02bcded7 --- /dev/null +++ b/all_outputs/app_renders/chunk02_005.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa4d37eaba27e4ea6049cee4510a340877fd9332d34dff56e5410be59841e60a +size 195995 diff --git a/all_outputs/app_renders/chunk02_006.png b/all_outputs/app_renders/chunk02_006.png new file mode 100644 index 0000000000000000000000000000000000000000..5f85028b4fcccc5e10b70b6ba93ca878b84ddbb6 --- /dev/null +++ b/all_outputs/app_renders/chunk02_006.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16542a21f446abc23194bc1e6fb0d97d20a13dc9f753d18e6ffec8cc4348a808 +size 209437 diff --git a/all_outputs/app_renders/chunk02_007.png b/all_outputs/app_renders/chunk02_007.png new file mode 100644 index 0000000000000000000000000000000000000000..757b28df66fc421ccf2b8e9f41221305ebe05d7b --- /dev/null +++ b/all_outputs/app_renders/chunk02_007.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da9bbafa9dd26af6cb8198879f1dcf380f24cd030d7dff6086e849602df1c5c5 +size 235660 diff --git a/all_outputs/app_renders/chunk02_008.png b/all_outputs/app_renders/chunk02_008.png new file mode 100644 index 0000000000000000000000000000000000000000..d4ac2496069f54993bec5f9fe56df351cb0594dc --- /dev/null +++ b/all_outputs/app_renders/chunk02_008.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dfc4376dcc53202f8218f5bde23e5ddc7a4607e16c04e4e2a14cb5626723481 +size 237630 diff --git a/all_outputs/app_renders/chunk02_009.png b/all_outputs/app_renders/chunk02_009.png new file mode 100644 index 0000000000000000000000000000000000000000..bbd73d57dc550edb78a3fbebc05ba9c1620f3d5e --- /dev/null +++ b/all_outputs/app_renders/chunk02_009.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cdf42341524036f0e2a5b0616414f3dff2941f70547df2cff6a3ed4739abfdf +size 206424 diff --git a/all_outputs/app_renders/chunk02_010.png b/all_outputs/app_renders/chunk02_010.png new file mode 100644 index 0000000000000000000000000000000000000000..c6d274e2e81defbdbe380e4ad918e06bf3efbb48 --- /dev/null +++ b/all_outputs/app_renders/chunk02_010.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f243d092ea9e189367f18e502087f44f48299a675222860c06c05b18d6b8a089 +size 195724 diff --git a/all_outputs/app_renders/chunk02_011.png b/all_outputs/app_renders/chunk02_011.png new file mode 100644 index 0000000000000000000000000000000000000000..cf8a5f30416ed4b5811383476c2c08b543400614 --- /dev/null +++ b/all_outputs/app_renders/chunk02_011.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0e2a185972d4321ea579853424fad0fc1365601711529735eb1e242816b79d3 +size 251755 diff --git a/all_outputs/app_renders/chunk02_012.png b/all_outputs/app_renders/chunk02_012.png new file mode 100644 index 0000000000000000000000000000000000000000..df6a6371713cd9814145a5d148fb278218680882 --- /dev/null +++ b/all_outputs/app_renders/chunk02_012.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:174f5848dcd14a010c0be879f5f284973f369eb1a98488fd19e5c04f03904631 +size 245706 diff --git a/all_outputs/app_renders/chunk02_013.png b/all_outputs/app_renders/chunk02_013.png new file mode 100644 index 0000000000000000000000000000000000000000..adbbe20a1edc33eeb009322b1d9a60311f55b5af --- /dev/null +++ b/all_outputs/app_renders/chunk02_013.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c45ef3003e92b9125b5ec6cc9437c261e4538cc529b2c0e57d5845175815d4d +size 184515 diff --git a/all_outputs/app_renders/chunk02_014.png b/all_outputs/app_renders/chunk02_014.png new file mode 100644 index 0000000000000000000000000000000000000000..ad07481802134797578db8c1fad35e8a5206e893 --- /dev/null +++ b/all_outputs/app_renders/chunk02_014.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80d4328cb4454ac1223638d3a2662a99918a70868ccfe3394a8c6baf7a34640b +size 213248 diff --git a/all_outputs/app_renders/chunk02_015.png b/all_outputs/app_renders/chunk02_015.png new file mode 100644 index 0000000000000000000000000000000000000000..aaf4d6c47363b0d2f076ebea0049d1896b528b19 --- /dev/null +++ b/all_outputs/app_renders/chunk02_015.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac6185abe50ac5b4b2f54bb11569564fc88e8b1559b6ee5aa24b0a82043cede +size 225724 diff --git a/all_outputs/app_renders/chunk02_016.png b/all_outputs/app_renders/chunk02_016.png new file mode 100644 index 0000000000000000000000000000000000000000..1a79448dae5682f8c5d799c24668633dc101a1d1 --- /dev/null +++ b/all_outputs/app_renders/chunk02_016.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18dc0f319684eba73d0102bb7e6063315a24dd85e686ba8343767ed439210d52 +size 241458 diff --git a/all_outputs/app_renders/chunk02_017.png b/all_outputs/app_renders/chunk02_017.png new file mode 100644 index 0000000000000000000000000000000000000000..a3c9862b83a4bdc5eb9d920a718993c60be0f221 --- /dev/null +++ b/all_outputs/app_renders/chunk02_017.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12898e096ed4998a66ce971455c9294085bd598d8def36af16f33fa6f7b3c124 +size 201587 diff --git a/all_outputs/app_renders/chunk02_018.png b/all_outputs/app_renders/chunk02_018.png new file mode 100644 index 0000000000000000000000000000000000000000..00d627742049106569c1d12538da5823ea2615cf --- /dev/null +++ b/all_outputs/app_renders/chunk02_018.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc26a7fe0bdd406eeb3c56f2815442c0d9e1e0ff804d30502eb3087d27dab76 +size 198097 diff --git a/all_outputs/app_renders/chunk02_019.png b/all_outputs/app_renders/chunk02_019.png new file mode 100644 index 0000000000000000000000000000000000000000..312a691b5fb79a4b3f11a671f70685d1167512b5 --- /dev/null +++ b/all_outputs/app_renders/chunk02_019.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fbcd715eb12da2953b007ff37550fa2b6d9ec0289a2588825293848d69db843 +size 248487 diff --git a/all_outputs/app_renders/chunk02_020.png b/all_outputs/app_renders/chunk02_020.png new file mode 100644 index 0000000000000000000000000000000000000000..623c1f46d4c33d49c6a13b7c53ecef2c33ca0bf9 --- /dev/null +++ b/all_outputs/app_renders/chunk02_020.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51cb7811bb46f5f98836a97245bc6e6d0eebf775c75cabf809a0496837502153 +size 246061 diff --git a/all_outputs/app_renders/chunk02_021.png b/all_outputs/app_renders/chunk02_021.png new file mode 100644 index 0000000000000000000000000000000000000000..f2077a201085c2d7ba97e57b82487256129139e9 --- /dev/null +++ b/all_outputs/app_renders/chunk02_021.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f9dba66325c08021cc43f67a460d8a2cb17aaf07311acf13525ad6daac8828 +size 193806 diff --git a/all_outputs/app_renders/chunk02_022.png b/all_outputs/app_renders/chunk02_022.png new file mode 100644 index 0000000000000000000000000000000000000000..351b032a148cfbdb863ef95cff72911b9e658f9b --- /dev/null +++ b/all_outputs/app_renders/chunk02_022.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cba3f2bdee1632a7f44c85b02a7c6abbbdc96e14c5f2642813ac697ad88e796 +size 205615 diff --git a/all_outputs/app_renders/chunk02_023.png b/all_outputs/app_renders/chunk02_023.png new file mode 100644 index 0000000000000000000000000000000000000000..5d13f360d747e94986af20a536bf0b9b1d953133 --- /dev/null +++ b/all_outputs/app_renders/chunk02_023.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f459f7439287db5c5bfbc27450635b215af0d56b3de509e549d6a624330b334 +size 240047 diff --git a/all_outputs/app_renders/chunk02_024.png b/all_outputs/app_renders/chunk02_024.png new file mode 100644 index 0000000000000000000000000000000000000000..bf5923555cec250f04d75256ff1a3aff748b25af --- /dev/null +++ b/all_outputs/app_renders/chunk02_024.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c880dbdaec50c39ee25fb5d9b425fa98181c68cbf33d47c7278afc78f593a6f +size 229404 diff --git a/all_outputs/app_renders/chunk02_025.png b/all_outputs/app_renders/chunk02_025.png new file mode 100644 index 0000000000000000000000000000000000000000..c286a200afd2dce33c30d96f6241c5041a60bead --- /dev/null +++ b/all_outputs/app_renders/chunk02_025.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0b5e132acb076ed707d6277f0f15a712a97a44eb66e474d10af58ce28436820 +size 212306 diff --git a/all_outputs/app_renders/chunk02_026.png b/all_outputs/app_renders/chunk02_026.png new file mode 100644 index 0000000000000000000000000000000000000000..de7ac62e294925b038c823e43dad936a2ca9b5c1 --- /dev/null +++ b/all_outputs/app_renders/chunk02_026.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f24949d86a242db3bd899cfb7d91555f697ca09572ab477b7c0116531c24ab0 +size 190664 diff --git a/all_outputs/app_renders/chunk02_027.png b/all_outputs/app_renders/chunk02_027.png new file mode 100644 index 0000000000000000000000000000000000000000..49566e55126e311759bfc94726c06fbd89aa9a31 --- /dev/null +++ b/all_outputs/app_renders/chunk02_027.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37843c6d59bca78a5e29b242166b6088549b721c67e92d85a25fe6f4b1ad47e +size 244739 diff --git a/all_outputs/app_renders/chunk02_028.png b/all_outputs/app_renders/chunk02_028.png new file mode 100644 index 0000000000000000000000000000000000000000..94e593f4d063da0986661fd0d5eab3c53b599901 --- /dev/null +++ b/all_outputs/app_renders/chunk02_028.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb94b5fea4b4aac8d95e7cbc9a2a0bc05347b5812e55dcd82f0fde9cc6df2bc6 +size 243845 diff --git a/all_outputs/app_renders/chunk02_029.png b/all_outputs/app_renders/chunk02_029.png new file mode 100644 index 0000000000000000000000000000000000000000..6aa5d1e6ec1e767daf1adbc344e4b7b10f1b2546 --- /dev/null +++ b/all_outputs/app_renders/chunk02_029.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58c668a2953b24477806a538248f9cc20bf5be509ecefc497c4dbb28daba300e +size 188388 diff --git a/all_outputs/app_renders/chunk02_030.png b/all_outputs/app_renders/chunk02_030.png new file mode 100644 index 0000000000000000000000000000000000000000..f5e1e7c21b7eb6593e5066348919466bb78fae83 --- /dev/null +++ b/all_outputs/app_renders/chunk02_030.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c067083474642a57e7ce966dcb12580183ec8ac4f1ecb0fb3d82cd3ab262f84d +size 225457 diff --git a/all_outputs/app_renders/chunk02_031.png b/all_outputs/app_renders/chunk02_031.png new file mode 100644 index 0000000000000000000000000000000000000000..cf9f208faa1b7c5277701a4fb90434cdc32f139f --- /dev/null +++ b/all_outputs/app_renders/chunk02_031.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:981007b66c950c0d19652b4e7b4b001d194d3e5f5a00407bf7a0751d52c19d93 +size 211872 diff --git a/all_outputs/app_renders/chunk02_032.png b/all_outputs/app_renders/chunk02_032.png new file mode 100644 index 0000000000000000000000000000000000000000..d50e8964428243dda81a4d12dff99294fd96a719 --- /dev/null +++ b/all_outputs/app_renders/chunk02_032.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c9a925e7aa4a6ee32954cfb01b880b3089b9b06c3899aeb8384fb8b0a93717c +size 241948 diff --git a/all_outputs/app_renders/chunk02_033.png b/all_outputs/app_renders/chunk02_033.png new file mode 100644 index 0000000000000000000000000000000000000000..28fabd5d30f0cd30f6a20dd35990c92970f48b82 --- /dev/null +++ b/all_outputs/app_renders/chunk02_033.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70b67725eb031c54058616f44d0479419b762237b8abf942ebeea76478fd7377 +size 195694 diff --git a/all_outputs/app_renders/chunk02_034.png b/all_outputs/app_renders/chunk02_034.png new file mode 100644 index 0000000000000000000000000000000000000000..ea07b2e10be4ef52e9e67565c088ca7d3d9e91ac --- /dev/null +++ b/all_outputs/app_renders/chunk02_034.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165102138363d71af89190063355e739f453c9a719846b9ee0adcfa91a3ae6fd +size 202965 diff --git a/all_outputs/app_renders/chunk02_035.png b/all_outputs/app_renders/chunk02_035.png new file mode 100644 index 0000000000000000000000000000000000000000..ec2076da9339d10ddb97c924f8e0a42920e394fd --- /dev/null +++ b/all_outputs/app_renders/chunk02_035.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b72837c54728974f5fc6a06c602f8f8f4d71f78daf5b559b2f488c81ba603b7f +size 239567 diff --git a/all_outputs/app_renders/chunk02_036.png b/all_outputs/app_renders/chunk02_036.png new file mode 100644 index 0000000000000000000000000000000000000000..3eef3e5c2aedf8b49f2bc8432accfd23c4810eca --- /dev/null +++ b/all_outputs/app_renders/chunk02_036.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b257eaba3f038904fc7063841767ce603ce21146d467399f049d4cc072bc574 +size 238761 diff --git a/all_outputs/app_renders/chunk02_037.png b/all_outputs/app_renders/chunk02_037.png new file mode 100644 index 0000000000000000000000000000000000000000..83b5389cf31e39c771f029e452c82388d1146541 --- /dev/null +++ b/all_outputs/app_renders/chunk02_037.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90e16f4137d5d302bb9559bee782838eacdd4d50717f1ed05684de38e07e5456 +size 187503 diff --git a/all_outputs/app_renders/chunk02_038.png b/all_outputs/app_renders/chunk02_038.png new file mode 100644 index 0000000000000000000000000000000000000000..e896c241bff937b5400313558f20c21051dca17d --- /dev/null +++ b/all_outputs/app_renders/chunk02_038.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec6ccc2b1c82e443291a85f1bfa165e148e908c8c36fbe3eebbfe5055233d7a4 +size 206347 diff --git a/all_outputs/app_renders/chunk02_039.png b/all_outputs/app_renders/chunk02_039.png new file mode 100644 index 0000000000000000000000000000000000000000..007cbb3eea31e8877ccf7ba2ea318af224bd727e --- /dev/null +++ b/all_outputs/app_renders/chunk02_039.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53e180f252a87848883a672948236c9e1b678972900837ac9dd37c1868170790 +size 221845 diff --git a/all_outputs/app_renders/chunk02_040.png b/all_outputs/app_renders/chunk02_040.png new file mode 100644 index 0000000000000000000000000000000000000000..8b493955db7d07843783a6a17a974406684438e4 --- /dev/null +++ b/all_outputs/app_renders/chunk02_040.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6fb4561cda37648e27bc6830ce5511e44c134a1ba9324fd6325f8d2a2e64b7 +size 231230 diff --git a/all_outputs/app_renders/chunk02_041.png b/all_outputs/app_renders/chunk02_041.png new file mode 100644 index 0000000000000000000000000000000000000000..4eb1fef0ce357115e96f676ab1835df25a2af5d8 --- /dev/null +++ b/all_outputs/app_renders/chunk02_041.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5ddbc656e53584e2f788ccaa4721d408a73467d67d525b7b898946a1fc6ef56 +size 204949 diff --git a/all_outputs/app_renders/chunk02_042.png b/all_outputs/app_renders/chunk02_042.png new file mode 100644 index 0000000000000000000000000000000000000000..5ec719ad175efca596478368a4bb76154798da23 --- /dev/null +++ b/all_outputs/app_renders/chunk02_042.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:226331ec60aab882434fe62bb0ccf2ebd3ac9fed3028e5fc3940f3bea3de1463 +size 199140 diff --git a/all_outputs/app_renders/chunk02_043.png b/all_outputs/app_renders/chunk02_043.png new file mode 100644 index 0000000000000000000000000000000000000000..95d752dba0b6f951beb63d4d461dd1ba8be829ce --- /dev/null +++ b/all_outputs/app_renders/chunk02_043.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c9c5bdfbb41b07e1dc5998a6ad11e337593df3a7c65691e6ea9e6a318365496 +size 235916 diff --git a/all_outputs/app_renders/chunk02_044.png b/all_outputs/app_renders/chunk02_044.png new file mode 100644 index 0000000000000000000000000000000000000000..41614b955707393ade4e9068803485ada7bea3f1 --- /dev/null +++ b/all_outputs/app_renders/chunk02_044.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57e0ab417e068b93426694eb512b045c87906ff8cfeedc31832bdc4d02446a13 +size 229019 diff --git a/all_outputs/app_renders/chunk02_045.png b/all_outputs/app_renders/chunk02_045.png new file mode 100644 index 0000000000000000000000000000000000000000..cdf62d9792776caa372222a983c6de3c6481b1da --- /dev/null +++ b/all_outputs/app_renders/chunk02_045.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4bf6d8a73cc1fba1ca0e55d79c3e815796f83a6e49914ad5c8c753a46fd8933 +size 185162 diff --git a/all_outputs/app_renders/chunk02_046.png b/all_outputs/app_renders/chunk02_046.png new file mode 100644 index 0000000000000000000000000000000000000000..e943bedc8f111e485b8a0a5475c4cbc3b15c432a --- /dev/null +++ b/all_outputs/app_renders/chunk02_046.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f50cfedef079caec4c3f25c0f96eb8e602faa652c7878e12329a6c1c417ce54 +size 207303 diff --git a/all_outputs/app_renders/chunk02_047.png b/all_outputs/app_renders/chunk02_047.png new file mode 100644 index 0000000000000000000000000000000000000000..b98cdd39906d43c928a56720ea20e646af7777c5 --- /dev/null +++ b/all_outputs/app_renders/chunk02_047.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11e7227e1ad4d2b3429cca853ffc19d34b38da432b62fc62806f19e12abd4bc1 +size 205395 diff --git a/all_outputs/app_renders/chunk02_048.png b/all_outputs/app_renders/chunk02_048.png new file mode 100644 index 0000000000000000000000000000000000000000..e9a00c1bd8448e741a932344f1588751b0b2e141 --- /dev/null +++ b/all_outputs/app_renders/chunk02_048.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165257e416b17461c8b3f45613293db2b204a3a7b06e6c1c291d6b381f166c43 +size 225269 diff --git a/all_outputs/app_renders/chunk02_049.png b/all_outputs/app_renders/chunk02_049.png new file mode 100644 index 0000000000000000000000000000000000000000..de7aedfa5e78cd98c25e0dc4358b62dc44e8dde8 --- /dev/null +++ b/all_outputs/app_renders/chunk02_049.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ed266a84c9e2324e3c09c90e332dcbbef90aadb5f9462ff1eb55f4bdac32fb +size 214534 diff --git a/all_outputs/app_renders/mesh.ply b/all_outputs/app_renders/mesh.ply new file mode 100644 index 0000000000000000000000000000000000000000..bd9d2e6cf3d22a213ca4dd6e1d12f18ca9e14c3d --- /dev/null +++ b/all_outputs/app_renders/mesh.ply @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e677014177ed7dedcec26f7b2f2156d9bfc5418cf7176608a2a9877fad18dfd +size 9395009 diff --git a/all_outputs/app_renders/transforms.json b/all_outputs/app_renders/transforms.json new file mode 100644 index 0000000000000000000000000000000000000000..a2efe61686e1b3987aec5f35de082e0eb6504a5c --- /dev/null +++ b/all_outputs/app_renders/transforms.json @@ -0,0 +1,4504 @@ +{ + "frames": [ + { + "file_path": "chunk00_046.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.871155321598053, + -0.03949369490146637, + 0.4894167184829712, + 0.9788333773612976 + ], + [ + 0.49100762605667114, + -0.07007046043872833, + 0.8683327436447144, + 1.7366653680801392 + ], + [ + -2.9019565417343074e-08, + 0.9967599511146545, + 0.08043389767408371, + 0.16086779534816742 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_011.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.1731010228395462, + -0.6815847158432007, + -0.7109699845314026, + -1.4219398498535156 + ], + [ + -0.9849041104316711, + -0.11979138851165771, + -0.12495599687099457, + -0.24991197884082794 + ], + [ + 3.541380877436495e-08, + 0.7218672633171082, + -0.6920316219329834, + -1.3840632438659668 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_034.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.12025505304336548, + 0.07812711596488953, + 0.9896641373634338, + 1.9793280363082886 + ], + [ + 0.9927431344985962, + -0.009463910013437271, + -0.11988221108913422, + -0.23976434767246246 + ], + [ + -7.940588631072387e-08, + 0.9968985915184021, + -0.07869833707809448, + -0.1573966145515442 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_037.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7870085835456848, + 0.00026766775408759713, + -0.6169421076774597, + -1.2338840961456299 + ], + [ + -0.6169421672821045, + -0.0003414764069020748, + 0.7870084643363953, + 1.5740169286727905 + ], + [ + 1.3563358436385897e-08, + 0.9999999403953552, + 0.0004337994323577732, + 0.0008677956066094339 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_042.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.26880496740341187, + -0.043226439505815506, + 0.9622240662574768, + 1.9244482517242432 + ], + [ + 0.9631944894790649, + -0.012063350528478622, + 0.26853424310684204, + 0.5370684266090393 + ], + [ + -4.6836930778226815e-08, + 0.998992383480072, + 0.0448782779276371, + 0.08975668251514435 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_019.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.0223702359944582, + -0.4785785675048828, + -0.8777598142623901, + -1.7555195093154907 + ], + [ + -0.9997498393058777, + 0.010708422400057316, + 0.019640572369098663, + 0.039281249046325684 + ], + [ + -6.683964670628484e-08, + 0.8779795169830322, + -0.4786984324455261, + -0.9573966264724731 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_036.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.7870085835456848, + 0.01564878784120083, + 0.6167436242103577, + 1.2334871292114258 + ], + [ + 0.6169421076774597, + -0.019962599501013756, + -0.786755383014679, + -1.5735106468200684 + ], + [ + -7.888079522899716e-09, + 0.9996783137321472, + -0.02536514773964882, + -0.05072994530200958 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_032.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9927430152893066, + -0.015877481549978256, + -0.1192023828625679, + -0.2384047508239746 + ], + [ + -0.12025515735149384, + -0.13107366859912872, + -0.9840521812438965, + -1.9681041240692139 + ], + [ + -6.352125936359698e-09, + 0.9912456274032593, + -0.13203181326389313, + -0.2640632688999176 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_002.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.2169816792011261, + 0.9098266959190369, + 0.3537435531616211, + 0.7074869871139526 + ], + [ + 0.9761757254600525, + -0.2022339254617691, + -0.07862922549247742, + -0.15725846588611603 + ], + [ + -4.4795996245738934e-08, + 0.36237695813179016, + -0.9320317506790161, + -1.8640632629394531 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_048.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9971283674240112, + -0.007437578868120909, + 0.07536396384239197, + 0.15072792768478394 + ], + [ + 0.07573007792234421, + 0.09792963415384293, + -0.9923077821731567, + -1.9846155643463135 + ], + [ + 7.882334784881095e-10, + 0.9951655864715576, + 0.09821166098117828, + 0.19642335176467896 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_004.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8436897993087769, + 0.47171249985694885, + 0.25627103447914124, + 0.5125421285629272 + ], + [ + 0.5368310213088989, + -0.7413488626480103, + -0.40275856852531433, + -0.8055170774459839 + ], + [ + -2.2782268516152726e-08, + 0.4773775339126587, + -0.8786983489990234, + -1.7573965787887573 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_007.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5368309020996094, + -0.6738536953926086, + -0.5076749324798584, + -1.0153497457504272 + ], + [ + -0.8436898589134216, + -0.4287659525871277, + -0.32302820682525635, + -0.6460564732551575 + ], + [ + 2.247926644827203e-08, + 0.6017317175865173, + -0.7986983060836792, + -1.5973966121673584 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_038.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.616942286491394, + -0.007337070535868406, + 0.7869744300842285, + 1.5739487409591675 + ], + [ + 0.7870086431503296, + -0.005751764867454767, + 0.6169153451919556, + 1.2338305711746216 + ], + [ + 3.8136018787326975e-08, + 0.9999566674232483, + 0.009322633035480976, + 0.018645573407411575 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_012.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5740314722061157, + 0.5448229908943176, + 0.6112738847732544, + 1.2225478887557983 + ], + [ + 0.8188332319259644, + -0.38194042444229126, + -0.428524911403656, + -0.8570497632026672 + ], + [ + 1.1855476600430848e-08, + 0.7465181946754456, + -0.6653649806976318, + -1.3307299613952637 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_045.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.491007536649704, + 0.06232677027583122, + -0.8689228892326355, + -1.7378456592559814 + ], + [ + -0.871155321598053, + -0.03512917831540108, + 0.4897492527961731, + 0.9794986248016357 + ], + [ + -4.999462177579517e-08, + 0.9974373579025269, + 0.07154497504234314, + 0.14309002459049225 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_047.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8711552619934082, + 0.043858159333467484, + -0.4890449345111847, + -0.9780897498130798 + ], + [ + -0.49100762605667114, + 0.077814020216465, + -0.8676730394363403, + -1.7353460788726807 + ], + [ + 1.0856062715447479e-09, + 0.996002733707428, + 0.08932279050350189, + 0.1786455661058426 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_015.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8188333511352539, + -0.3360178768634796, + -0.4654073119163513, + -0.9308147430419922 + ], + [ + -0.5740313529968262, + -0.4793164134025574, + -0.6638853549957275, + -1.3277708292007446 + ], + [ + -2.569166213106655e-09, + 0.8107698559761047, + -0.5853650569915771, + -1.1707299947738647 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_013.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5740314722061157, + -0.5229872465133667, + -0.6300572156906128, + -1.2601144313812256 + ], + [ + -0.8188331723213196, + 0.36663293838500977, + 0.4416927397251129, + 0.8833852410316467 + ], + [ + 3.825975625204592e-08, + 0.7694573402404785, + -0.638698160648346, + -1.277396559715271 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_001.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9761756658554077, + 0.20802022516727448, + 0.06171507388353348, + 0.12343014776706696 + ], + [ + 0.2169819474220276, + 0.9358579516410828, + 0.2776486873626709, + 0.5552974343299866 + ], + [ + -1.1585714609907427e-09, + 0.28442493081092834, + -0.9586983919143677, + -1.9173966646194458 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_025.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9322091341018677, + -0.1153433546423912, + -0.3430483639240265, + -0.686096727848053 + ], + [ + -0.3619202673435211, + 0.2970934212207794, + 0.8836001753807068, + 1.767200231552124 + ], + [ + 1.8305106763705226e-08, + 0.9478562474250793, + -0.31869834661483765, + -0.6373966336250305 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_030.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9150875806808472, + 0.07474938035011292, + 0.39626654982566833, + 0.7925331592559814 + ], + [ + 0.40325507521629333, + 0.16962523758411407, + 0.8992289304733276, + 1.7984578609466553 + ], + [ + 1.716244923954946e-09, + 0.9826697111129761, + -0.1853650063276291, + -0.3707299530506134 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_017.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9997498393058777, + 0.011901685036718845, + 0.018941443413496017, + 0.037882883101701736 + ], + [ + 0.02237025648355484, + 0.5318984985351562, + 0.8465126156806946, + 1.6930251121520996 + ], + [ + 8.493424297206786e-10, + 0.8467245101928711, + -0.5320317149162292, + -1.064063310623169 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_024.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9322090744972229, + 0.12499461323022842, + 0.3396507501602173, + 0.6793014407157898 + ], + [ + 0.3619202673435211, + -0.32195255160331726, + -0.8748487830162048, + -1.7496975660324097 + ], + [ + 2.4830260514363545e-08, + 0.9384684562683105, + -0.34536516666412354, + -0.6907299757003784 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_020.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.7227480411529541, + 0.3124043047428131, + 0.6164728403091431, + 1.2329457998275757 + ], + [ + 0.6911115646362305, + -0.32670503854751587, + -0.644692599773407, + -1.289385199546814 + ], + [ + 4.830169331171419e-08, + 0.8920018672943115, + -0.452031672000885, + -0.9040632843971252 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_008.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9849040508270264, + 0.13363951444625854, + 0.11002029478549957, + 0.22004058957099915 + ], + [ + 0.17310108244419098, + -0.7603772878646851, + -0.6259893774986267, + -1.2519787549972534 + ], + [ + 4.749894255695608e-09, + 0.6355841159820557, + -0.772031843662262, + -1.5440633296966553 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_028.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.4032551050186157, + 0.2184298038482666, + 0.8886358737945557, + 1.7772717475891113 + ], + [ + 0.9150876402854919, + -0.09625634551048279, + -0.39159855246543884, + -0.7831969857215881 + ], + [ + 3.2025262441948144e-08, + 0.9710938334465027, + -0.23869828879833221, + -0.4773966073989868 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_022.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.6911121010780334, + 0.28815844655036926, + 0.6628194451332092, + 1.3256385326385498 + ], + [ + 0.7227481603622437, + 0.2755449116230011, + 0.6338063478469849, + 1.267612338066101 + ], + [ + -2.2627428819532724e-08, + 0.9170823693275452, + -0.398698627948761, + -0.7973966002464294 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_021.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7227480411529541, + -0.29397472739219666, + -0.6254712343215942, + -1.250942349433899 + ], + [ + -0.69111168384552, + 0.3074316680431366, + 0.6541028022766113, + 1.308205485343933 + ], + [ + -6.4104170860446175e-09, + 0.9050219058990479, + -0.42536503076553345, + -0.8507299423217773 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_029.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.40325504541397095, + -0.19402749836444855, + -0.8942811489105225, + -1.7885621786117554 + ], + [ + -0.9150876402854919, + 0.08550280332565308, + 0.3940861225128174, + 0.7881724238395691 + ], + [ + 9.464635297717905e-09, + 0.9772628545761108, + -0.21203161776065826, + -0.4240632653236389 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_033.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9927430748939514, + 0.012670684605836868, + 0.11958576738834381, + 0.23917151987552643 + ], + [ + 0.12025514990091324, + 0.10460034757852554, + 0.9872170686721802, + 1.9744340181350708 + ], + [ + 2.5984103757537014e-10, + 0.9944336414337158, + -0.10536503791809082, + -0.2107299417257309 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_049.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9971283674240112, + 0.008110730908811092, + -0.07529449462890625, + -0.1505889892578125 + ], + [ + -0.07573007792234421, + -0.10679296404123306, + 0.9913930892944336, + 1.9827861785888672 + ], + [ + 6.4155347700989296e-12, + 0.9942482113838196, + 0.10710052400827408, + 0.21420112252235413 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_006.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5368310213088989, + 0.6963520050048828, + 0.4763469696044922, + 0.95269376039505 + ], + [ + 0.8436897993087769, + 0.44308143854141235, + 0.3030945658683777, + 0.6061891317367554 + ], + [ + -6.281499209670471e-10, + 0.5645996332168579, + -0.8253649473190308, + -1.6507298946380615 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_016.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9997497797012329, + -0.012498226016759872, + -0.0185532383620739, + -0.037106480449438095 + ], + [ + -0.02237025462090969, + -0.5585586428642273, + -0.8291633725166321, + -1.6583268642425537 + ], + [ + 2.5864677066778086e-10, + 0.82937091588974, + -0.5586984753608704, + -1.117396593093872 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_005.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8436897993087769, + -0.4573970139026642, + -0.281025767326355, + -0.5620516538619995 + ], + [ + -0.5368310213088989, + 0.7188504338264465, + 0.44166338443756104, + 0.8833268284797668 + ], + [ + -7.76289965642718e-09, + 0.5234902501106262, + -0.8520317077636719, + -1.7040632963180542 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_027.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.36192020773887634, + -0.24737566709518433, + -0.8987875580787659, + -1.7975751161575317 + ], + [ + -0.9322091341018677, + -0.09604101628065109, + -0.34894469380378723, + -0.6978893876075745 + ], + [ + 3.2009950245992513e-09, + 0.9641481041908264, + -0.2653650641441345, + -0.5307299494743347 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_035.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.12025521695613861, + -0.051653992384672165, + -0.9913984537124634, + -1.982796549797058 + ], + [ + -0.992743194103241, + 0.006256781052798033, + 0.12009219080209732, + 0.24018451571464539 + ], + [ + -1.3832058698426408e-07, + 0.9986456036567688, + -0.052031710743904114, + -0.10406327992677689 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_018.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.022370176389813423, + 0.5052384734153748, + 0.8626898527145386, + 1.725379467010498 + ], + [ + 0.9997498393058777, + -0.01130523532629013, + -0.019303403794765472, + -0.03860684111714363 + ], + [ + 2.516898689464142e-08, + 0.8629058003425598, + -0.5053650140762329, + -1.0107299089431763 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_014.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8188334107398987, + 0.3513253927230835, + 0.45396310091018677, + 0.9079262614250183 + ], + [ + 0.574031412601471, + 0.501151978969574, + 0.6475605964660645, + 1.295121192932129 + ], + [ + -6.104955474484086e-08, + 0.7908332347869873, + -0.6120318174362183, + -1.2240632772445679 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_043.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.26880496740341187, + 0.05178830772638321, + -0.9618014097213745, + -1.9236027002334595 + ], + [ + -0.9631946682929993, + 0.014452844858169556, + -0.268416166305542, + -0.5368323922157288 + ], + [ + -1.0946415329726733e-08, + 0.9985535144805908, + 0.05376718193292618, + 0.10753446072340012 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_044.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.49100756645202637, + -0.05458323657512665, + 0.8694436550140381, + 1.7388873100280762 + ], + [ + 0.871155321598053, + 0.030764596536755562, + -0.49004286527633667, + -0.9800856709480286 + ], + [ + 2.7613968001105604e-08, + 0.998035192489624, + 0.06265611201524734, + 0.12531223893165588 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_039.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6169419884681702, + 0.014332816936075687, + -0.7868781089782715, + -1.5737560987472534 + ], + [ + -0.7870086431503296, + 0.011235497891902924, + -0.6168397665023804, + -1.2336795330047607 + ], + [ + -4.8207603242644836e-08, + 0.9998341798782349, + 0.01821168325841427, + 0.03642335161566734 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_041.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9631946682929993, + 0.009674138389527798, + -0.2686309516429901, + -0.5372617840766907 + ], + [ + -0.2688050866127014, + -0.03466471657156944, + 0.9625706076622009, + 1.9251412153244019 + ], + [ + 3.187987829278427e-08, + 0.999352216720581, + 0.03598928079009056, + 0.07197890430688858 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_026.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.36192038655281067, + 0.2722345292568207, + 0.891572892665863, + 1.7831456661224365 + ], + [ + 0.9322091341018677, + 0.10569204390048981, + 0.3461437225341797, + 0.6922873258590698 + ], + [ + 3.529831005266715e-08, + 0.9564087390899658, + -0.2920317053794861, + -0.5840632915496826 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_040.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9631946086883545, + -0.007284775376319885, + 0.2687063217163086, + 0.5374126434326172 + ], + [ + 0.26880502700805664, + 0.026103179901838303, + -0.9628408551216125, + -1.925681710243225 + ], + [ + -1.695989304550949e-08, + 0.999632716178894, + 0.02710062451660633, + 0.05420112982392311 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_000.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9761756062507629, + -0.2138063758611679, + -0.036986272782087326, + -0.07397253066301346 + ], + [ + -0.2169819176197052, + -0.9618893265724182, + -0.1663968414068222, + -0.33279353380203247 + ], + [ + 6.311709821460454e-09, + 0.1704578846693039, + -0.9853649735450745, + -1.970729947090149 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_003.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.21698197722434998, + -0.8837953209877014, + -0.4145176112651825, + -0.829035222530365 + ], + [ + -0.9761756658554077, + 0.19644777476787567, + 0.09213794022798538, + 0.18427592515945435 + ], + [ + 7.170520177623985e-09, + 0.42463424801826477, + -0.9053651094436646, + -1.81072998046875 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_010.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.17310109734535217, + 0.7078489065170288, + 0.6848254799842834, + 1.369650959968567 + ], + [ + 0.9849040508270264, + 0.12440746277570724, + 0.12036097794771194, + 0.24072200059890747 + ], + [ + -2.5726871299980303e-08, + 0.6953220367431641, + -0.7186983227729797, + -1.4373966455459595 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_031.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9150875806808472, + -0.06399592012166977, + -0.3981447219848633, + -0.7962893843650818 + ], + [ + -0.4032551348209381, + -0.14522308111190796, + -0.9034909009933472, + -1.8069816827774048 + ], + [ + -3.7874336555887567e-08, + 0.9873271584510803, + -0.15869852900505066, + -0.3173966109752655 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_023.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6911116242408752, + -0.2688852548599243, + -0.6708692312240601, + -1.3417383432388306 + ], + [ + -0.7227481007575989, + -0.2571156919002533, + -0.6415037512779236, + -1.2830073833465576 + ], + [ + -3.5878695570090713e-09, + 0.9282200932502747, + -0.3720319867134094, + -0.7440632581710815 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk00_009.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9849044680595398, + -0.1290234923362732, + -0.11539901047945023, + -0.23079797625541687 + ], + [ + -0.17310111224651337, + 0.7341129183769226, + 0.6565930247306824, + 1.313185691833496 + ], + [ + 1.2061724952161512e-09, + 0.6666567921638489, + -0.7453652620315552, + -1.4907299280166626 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_046.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9974478483200073, + 0.03747563064098358, + -0.06077295169234276, + -0.12154590338468552 + ], + [ + -0.07139869779348373, + 0.5235387682914734, + -0.8490049242973328, + -1.6980098485946655 + ], + [ + 4.518736496095244e-09, + 0.8511772751808167, + 0.5248783826828003, + 1.049756646156311 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_011.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.3116190433502197, + 0.20312319695949554, + -0.9282427430152893, + -1.8564854860305786 + ], + [ + -0.9502071142196655, + -0.06661402434110641, + 0.30441585183143616, + 0.6088317036628723 + ], + [ + -6.546089537096123e-08, + 0.9768846035003662, + 0.21376730501651764, + 0.42753446102142334 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_034.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6879662275314331, + -0.3035140633583069, + 0.6592281460762024, + 1.3184562921524048 + ], + [ + 0.725742757320404, + 0.28771549463272095, + -0.6249138116836548, + -1.2498276233673096 + ], + [ + -7.176144123377526e-09, + 0.9083496332168579, + 0.4182116687297821, + 0.8364233374595642 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_037.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.725742757320404, + 0.3060612380504608, + -0.6161363124847412, + -1.2322726249694824 + ], + [ + -0.6879661679267883, + 0.32286718487739563, + -0.6499687433242798, + -1.29993736743927 + ], + [ + -2.3485830169533983e-08, + 0.8955910205841064, + 0.44487830996513367, + 0.8897566795349121 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_042.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.35786816477775574, + -0.45691606402397156, + 0.8143451809883118, + 1.628690481185913 + ], + [ + 0.9337721467018127, + 0.17511309683322906, + -0.31209778785705566, + -0.6241955757141113 + ], + [ + -7.545573055267596e-09, + 0.872102677822113, + 0.48932287096977234, + 0.9786455631256104 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_019.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8163325190544128, + 0.16454067826271057, + -0.5536494255065918, + -1.107298731803894 + ], + [ + -0.5775822997093201, + -0.23255543410778046, + 0.7825067043304443, + 1.5650132894515991 + ], + [ + 2.2985661374264055e-08, + 0.9585637450218201, + 0.28487828373908997, + 0.5697566866874695 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_036.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7257428765296936, + -0.29994598031044006, + 0.619136393070221, + 1.238272786140442 + ], + [ + 0.6879661679267883, + -0.31641626358032227, + 0.6531335115432739, + 1.3062670230865479 + ], + [ + 1.395890336652883e-08, + 0.8999518156051636, + 0.4359894096851349, + 0.8719788789749146 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_032.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.026712018996477127, + -0.40029096603393555, + 0.9159986972808838, + 1.8319973945617676 + ], + [ + 0.9996431469917297, + -0.010696420446038246, + 0.024476900696754456, + 0.048953939229249954 + ], + [ + 2.6315266410392724e-08, + 0.9163256883621216, + 0.40043383836746216, + 0.8008677959442139 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_002.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6515269875526428, + -0.10147920250892639, + 0.7518075704574585, + 1.5036150217056274 + ], + [ + 0.7586254477500916, + 0.08715302497148514, + -0.6456716060638428, + -1.291343092918396 + ], + [ + -8.811258567220648e-08, + 0.9910128116607666, + 0.133767232298851, + 0.267534464597702 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_048.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.07139849662780762, + -0.5412712693214417, + 0.8378113508224487, + 1.675622582435608 + ], + [ + 0.9974479675292969, + 0.03874471038579941, + -0.05997166410088539, + -0.11994338780641556 + ], + [ + 8.349418578745826e-08, + 0.8399550318717957, + 0.5426560640335083, + 1.0853122472763062 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_004.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7586255073547363, + -0.0987357571721077, + 0.6440020799636841, + 1.2880041599273682 + ], + [ + 0.6515270471572876, + -0.11496599018573761, + 0.7498636245727539, + 1.4997272491455078 + ], + [ + -3.895905820172629e-08, + 0.9884504079818726, + 0.15154506266117096, + 0.30309000611305237 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_007.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8922457695007324, + 0.08047144114971161, + -0.4443216621875763, + -0.8886432647705078 + ], + [ + -0.4515500068664551, + -0.15900854766368866, + 0.8779628872871399, + 1.7559257745742798 + ], + [ + -1.3520873309857961e-08, + 0.9839921593666077, + 0.17821162939071655, + 0.35642334818840027 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_038.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9133275747299194, + -0.18478570878505707, + 0.3628869950771332, + 0.7257739901542664 + ], + [ + 0.407225638628006, + 0.4144381582736969, + -0.8138847351074219, + -1.6277694702148438 + ], + [ + 2.66239617019437e-08, + 0.8911202549934387, + 0.4537672698497772, + 0.9075344800949097 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_012.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9502070546150208, + -0.06938391178846359, + 0.3037964701652527, + 0.6075929403305054 + ], + [ + 0.3116190433502197, + -0.21156944334506989, + 0.9263540506362915, + 1.8527082204818726 + ], + [ + -5.67315083799258e-10, + 0.9748969674110413, + 0.2226562201976776, + 0.4453122317790985 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_045.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9337721467018127, + 0.18465620279312134, + -0.30654802918434143, + -0.6130960583686829 + ], + [ + -0.35786813497543335, + 0.4818166196346283, + -0.7998644709587097, + -1.5997289419174194 + ], + [ + 2.1623725032782204e-09, + 0.8565949201583862, + 0.5159894824028015, + 1.031978964805603 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_047.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9974478483200073, + -0.038110289722681046, + 0.06037699803709984, + 0.12075398117303848 + ], + [ + 0.07139871269464493, + -0.532404899597168, + 0.8434733152389526, + 1.6869465112686157 + ], + [ + -3.404947879204201e-09, + 0.84563148021698, + 0.5337671637535095, + 1.0675344467163086 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_015.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9856466054916382, + -0.04209115356206894, + 0.16349056363105774, + 0.3269811272621155 + ], + [ + 0.16882188618183136, + -0.2457442730665207, + 0.9545202851295471, + 1.9090405702590942 + ], + [ + 5.919534196152654e-09, + 0.9684204459190369, + 0.24932286143302917, + 0.4986455738544464 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_013.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9502071142196655, + 0.07215379923582077, + -0.30315056443214417, + -0.6063010692596436 + ], + [ + -0.3116190433502197, + 0.22001557052135468, + -0.924384593963623, + -1.848768949508667 + ], + [ + 4.352729732204352e-09, + 0.9728242754936218, + 0.2315448522567749, + 0.46309003233909607 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_001.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.07572991400957108, + 0.12451969087123871, + -0.9893230199813843, + -1.9786458015441895 + ], + [ + -0.9971284866333008, + 0.009456825442612171, + -0.07513722777366638, + -0.15027453005313873 + ], + [ + -1.0078598933205285e-07, + 0.9921721816062927, + 0.1248781755566597, + 0.24975667893886566 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_025.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.22121955454349518, + 0.32983216643333435, + -0.9177541136741638, + -1.8355083465576172 + ], + [ + -0.9752240180969238, + 0.07481908798217773, + -0.20818307995796204, + -0.4163661599159241 + ], + [ + 4.5388034664028964e-08, + 0.941070020198822, + 0.33821171522140503, + 0.6764233708381653 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_030.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9996431469917297, + -0.01022154837846756, + 0.02467905730009079, + 0.049358103424310684 + ], + [ + 0.026712093502283096, + 0.38251951336860657, + -0.9235613346099854, + -1.847122311592102 + ], + [ + 1.3583351110568742e-10, + 0.923891007900238, + 0.3826560378074646, + 0.7653122544288635 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_017.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.16882194578647614, + 0.26326683163642883, + -0.949836790561676, + -1.8996734619140625 + ], + [ + -0.985646665096283, + -0.045092519372701645, + 0.16268834471702576, + 0.3253767192363739 + ], + [ + -2.0046254434191724e-08, + 0.963668704032898, + 0.26710057258605957, + 0.5342011451721191 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_024.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.22121965885162354, + -0.3211635947227478, + 0.9208236932754517, + 1.8416470289230347 + ], + [ + 0.9752242565155029, + -0.0728527158498764, + 0.20887932181358337, + 0.41775864362716675 + ], + [ + -9.124337196908527e-08, + 0.9442175626754761, + 0.3293226957321167, + 0.6586455702781677 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_020.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5775823593139648, + -0.23981177806854248, + 0.7803133726119995, + 1.5606268644332886 + ], + [ + 0.8163324594497681, + -0.1696748584508896, + 0.5520976781845093, + 1.104195237159729 + ], + [ + 2.4654394081835562e-08, + 0.9558769464492798, + 0.2937673032283783, + 0.587534487247467 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_008.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.4515500068664551, + -0.16693967580795288, + 0.876489520072937, + 1.7529789209365845 + ], + [ + 0.892245888710022, + -0.08448521047830582, + 0.4435759484767914, + 0.8871518969535828 + ], + [ + -6.554485310061864e-08, + 0.9823408126831055, + 0.18710047006607056, + 0.37420111894607544 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_028.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8460137844085693, + -0.19453921914100647, + 0.4964030683040619, + 0.9928059577941895 + ], + [ + 0.5331618189811707, + -0.3086922764778137, + 0.7876853346824646, + 1.5753703117370605 + ], + [ + -6.205164160277832e-11, + 0.9310554265975952, + 0.36487823724746704, + 0.7297567129135132 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_022.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9752240180969238, + -0.068919837474823, + 0.21020974218845367, + 0.42041948437690735 + ], + [ + 0.2212195247411728, + 0.30382612347602844, + -0.9266884922981262, + -1.8533769845962524 + ], + [ + 3.9377026084252975e-09, + 0.9502314329147339, + 0.3115449547767639, + 0.6230900287628174 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_021.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5775822401046753, + 0.24706798791885376, + -0.7780463695526123, + -1.5560927391052246 + ], + [ + -0.8163325190544128, + 0.17480875551700592, + -0.550493597984314, + -1.1009870767593384 + ], + [ + -8.043642196753353e-09, + 0.9530998468399048, + 0.30265605449676514, + 0.6053122282028198 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_029.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8460133671760559, + 0.19927838444709778, + -0.4945194721221924, + -0.9890390038490295 + ], + [ + -0.5331616997718811, + 0.31621211767196655, + -0.7846964001655579, + -1.5693929195404053 + ], + [ + 1.114261394263849e-08, + 0.9275224804878235, + 0.37376728653907776, + 0.747534453868866 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_033.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.026712097227573395, + 0.4091767966747284, + -0.9120640158653259, + -1.8241281509399414 + ], + [ + -0.9996431469917297, + 0.010933931916952133, + -0.0243717972189188, + -0.04874366149306297 + ], + [ + 5.070664244044565e-08, + 0.9123895764350891, + 0.4093228876590729, + 0.8186455965042114 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_049.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.07139888405799866, + 0.550137460231781, + -0.8320164084434509, + -1.6640325784683228 + ], + [ + -0.9974480271339417, + -0.0393797904253006, + 0.05955687537789345, + 0.11911376565694809 + ], + [ + -1.1674700317598763e-08, + 0.8341453075408936, + 0.5515449047088623, + 1.1030900478363037 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_006.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8922458291053772, + -0.0764576643705368, + 0.4450298547744751, + 0.890059769153595 + ], + [ + 0.4515499472618103, + 0.1510774940252304, + -0.8793624639511108, + -1.7587248086929321 + ], + [ + -9.420890734190834e-09, + 0.9855606555938721, + 0.16932271420955658, + 0.3386455774307251 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_016.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.1688218116760254, + -0.2545055150985718, + 0.9522217512130737, + 1.904443383216858 + ], + [ + 0.9856466054916382, + 0.043591730296611786, + -0.1630968302488327, + -0.3261937201023102 + ], + [ + 6.736630808745758e-09, + 0.9660884141921997, + 0.2582117021083832, + 0.5164233446121216 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_005.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.7586255073547363, + 0.10452700406312943, + -0.6430874466896057, + -1.2861748933792114 + ], + [ + -0.6515269875526428, + 0.12170925736427307, + -0.7487987279891968, + -1.497597336769104 + ], + [ + 2.8265223050993882e-08, + 0.9870465993881226, + 0.16043390333652496, + 0.3208678066730499 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_027.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5331616997718811, + 0.3011718988418579, + -0.7905909419059753, + -1.5811820030212402 + ], + [ + -0.8460133671760559, + -0.1897999793291092, + 0.49823427200317383, + 0.9964685440063477 + ], + [ + 9.413696488991263e-09, + 0.9344899654388428, + 0.3559895157814026, + 0.7119789123535156 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_035.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.6879660487174988, + 0.3099651634693146, + -0.6562196612358093, + -1.312439203262329 + ], + [ + -0.7257428169250488, + -0.29383066296577454, + 0.6220617890357971, + 1.2441236972808838 + ], + [ + 2.4681849453145333e-08, + 0.9042041301727295, + 0.4271005690097809, + 0.8542011380195618 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_018.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8163324594497681, + -0.1594066023826599, + 0.5551493763923645, + 1.110298752784729 + ], + [ + 0.5775822997093201, + 0.22529909014701843, + -0.7846266627311707, + -1.5692533254623413 + ], + [ + 2.156687095578036e-08, + 0.9611606597900391, + 0.2759893834590912, + 0.5519788861274719 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_014.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9856465458869934, + 0.040590520948171616, + -0.16386963427066803, + -0.3277391791343689 + ], + [ + -0.16882193088531494, + 0.23698273301124573, + -0.9567333459854126, + -1.9134663343429565 + ], + [ + 6.8691305976642525e-09, + 0.9706657528877258, + 0.24043378233909607, + 0.48086780309677124 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_043.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.3578681945800781, + 0.4652162194252014, + -0.8096321821212769, + -1.6192643642425537 + ], + [ + -0.9337721467018127, + -0.1782941073179245, + 0.3102915585041046, + 0.6205829977989197 + ], + [ + 3.050529784331957e-08, + 0.8670554161071777, + 0.49821168184280396, + 0.9964233636856079 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_044.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9337722063064575, + -0.18147513270378113, + 0.3084418475627899, + 0.6168836951255798 + ], + [ + 0.35786813497543335, + -0.47351640462875366, + 0.8048059940338135, + 1.609611988067627 + ], + [ + -8.332278866873821e-09, + 0.8618869185447693, + 0.507100522518158, + 1.0142011642456055 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_039.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9133276343345642, + 0.18840540945529938, + -0.36102092266082764, + -0.7220418453216553 + ], + [ + -0.4072256088256836, + -0.42255666851997375, + 0.8096994757652283, + 1.6193989515304565 + ], + [ + -1.69695777429979e-08, + 0.8865378499031067, + 0.4626561105251312, + 0.9253122210502625 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_041.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.4072255790233612, + 0.43879351019859314, + -0.801016628742218, + -1.6020331382751465 + ], + [ + -0.9133276343345642, + 0.19564493000507355, + -0.35714948177337646, + -0.7142989635467529 + ], + [ + -9.750104723593722e-09, + 0.8770310282707214, + 0.4804338216781616, + 0.9608678221702576 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_026.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5331616401672363, + -0.2936517298221588, + 0.7934149503707886, + 1.5868299007415771 + ], + [ + 0.8460133671760559, + 0.18506069481372833, + -0.5000139474868774, + -1.0000278949737549 + ], + [ + -3.0723654731446004e-09, + 0.9378279447555542, + 0.34710055589675903, + 0.6942011117935181 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_040.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.4072256088256836, + -0.43067508935928345, + 0.805410623550415, + 1.61082124710083 + ], + [ + 0.9133276343345642, + -0.19202519953250885, + 0.35910865664482117, + 0.7182173132896423 + ], + [ + -2.8639375315719917e-08, + 0.8818420171737671, + 0.47154501080513, + 0.94309002161026 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_000.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.07573004812002182, + -0.11565639078617096, + 0.9903981685638428, + 1.9807963371276855 + ], + [ + 0.9971283674240112, + -0.008783889934420586, + 0.07521891593933105, + 0.1504378616809845 + ], + [ + 1.1444480918498812e-08, + 0.9932504296302795, + 0.11598948389291763, + 0.2319789081811905 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_003.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.6515271067619324, + 0.10822252184152603, + -0.7508665919303894, + -1.5017329454421997 + ], + [ + -0.7586255669593811, + -0.09294436126947403, + 0.6448633670806885, + 1.289726734161377 + ], + [ + 9.634071318487258e-10, + 0.9897724390029907, + 0.14265595376491547, + 0.2853122353553772 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_010.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.3116190731525421, + -0.1946769505739212, + 0.9300507307052612, + 1.860101580619812 + ], + [ + 0.9502071142196655, + 0.06384404748678207, + -0.30500879883766174, + -0.6100175976753235 + ], + [ + 1.0254495919070905e-08, + 0.978787362575531, + 0.20487846434116364, + 0.40975669026374817 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_031.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9996432662010193, + 0.010458986274898052, + -0.0245793666690588, + -0.0491587333381176 + ], + [ + -0.02671208791434765, + -0.39140546321868896, + 0.9198306202888489, + 1.8396612405776978 + ], + [ + -1.3625008898898727e-09, + 0.9201589822769165, + 0.3915450870990753, + 0.7830899953842163 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_023.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9752240180969238, + 0.07088623195886612, + -0.20955482125282288, + -0.41910964250564575 + ], + [ + -0.2212195247411728, + -0.31249478459358215, + 0.9238013029098511, + 1.8476026058197021 + ], + [ + 2.298907592068744e-10, + 0.9472708702087402, + 0.3204338848590851, + 0.6408677697181702 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk01_009.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.4515499472618103, + 0.17487074434757233, + -0.8749416470527649, + -1.7498832941055298 + ], + [ + -0.892245888710022, + 0.08849891275167465, + -0.4427926540374756, + -0.8855852484703064 + ], + [ + -6.184133383158041e-08, + 0.9806060791015625, + 0.19598935544490814, + 0.391978919506073 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_046.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.0021715061739087105, + -0.9693204760551453, + 0.24579058587551117, + 0.4915813207626343 + ], + [ + 0.999997615814209, + -0.0021048723720014095, + 0.0005338113405741751, + 0.001067505800165236 + ], + [ + -7.169570892529009e-08, + 0.24579116702079773, + 0.9693228006362915, + 1.938645601272583 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_011.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8941985368728638, + 0.2946620285511017, + -0.337021142244339, + -0.674042284488678 + ], + [ + -0.4476706087589264, + 0.5885719060897827, + -0.6731821298599243, + -1.3463642597198486 + ], + [ + -4.13156513445756e-08, + 0.7528329491615295, + 0.6582116484642029, + 1.3164232969284058 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_034.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5573745369911194, + -0.7162299156188965, + 0.41993850469589233, + 0.8398770689964294 + ], + [ + 0.8302611708641052, + -0.4808225631713867, + 0.28191497921943665, + 0.5638298988342285 + ], + [ + -2.7109798850233346e-08, + 0.5057908892631531, + 0.8626561164855957, + 1.7253122329711914 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_037.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9803593158721924, + 0.1753920018672943, + -0.09018462896347046, + -0.18036924302577972 + ], + [ + -0.19721972942352295, + -0.8718558549880981, + 0.44829875230789185, + 0.8965973854064941 + ], + [ + 1.3634604556500562e-08, + 0.4572800397872925, + 0.8893227577209473, + 1.7786455154418945 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_042.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8326744437217712, + -0.5170861482620239, + 0.19818134605884552, + 0.39636266231536865 + ], + [ + 0.5537634491920471, + -0.7775241136550903, + 0.2979981601238251, + 0.5959963798522949 + ], + [ + 9.671700773594694e-09, + 0.35788092017173767, + 0.9337672591209412, + 1.8675345182418823 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_019.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.789680540561676, + 0.4474528431892395, + -0.4197506010532379, + -0.8395012021064758 + ], + [ + -0.613518238067627, + 0.5759320259094238, + -0.5402755737304688, + -1.0805511474609375 + ], + [ + -2.2834598212284618e-08, + 0.6841697692871094, + 0.7293227910995483, + 1.4586455821990967 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_036.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9803593158721924, + -0.17363891005516052, + 0.09351544827222824, + 0.18703091144561768 + ], + [ + 0.19721969962120056, + 0.8631415963172913, + -0.4648558497428894, + -0.9297118186950684 + ], + [ + -1.1569128766097947e-08, + 0.4741688370704651, + 0.8804339170455933, + 1.7608678340911865 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_032.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8302611708641052, + -0.47091370820999146, + 0.2981722354888916, + 0.5963444709777832 + ], + [ + 0.5573745369911194, + 0.7014696598052979, + -0.4441552758216858, + -0.8883105516433716 + ], + [ + 7.955815561899726e-09, + 0.5349584817886353, + 0.8448783755302429, + 1.6897566318511963 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_002.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.6548156142234802, + -0.43700578808784485, + 0.6166380643844604, + 1.2332760095596313 + ], + [ + 0.7557886838912964, + -0.37862199544906616, + 0.5342554450035095, + 1.068510890007019 + ], + [ + -1.3972071499779304e-08, + 0.8158868551254272, + 0.5782116055488586, + 1.1564233303070068 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_048.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.7055695652961731, + -0.6994997262954712, + 0.11345444619655609, + 0.22690880298614502 + ], + [ + 0.7086408138275146, + 0.6964680552482605, + -0.11296280473470688, + -0.22592543065547943 + ], + [ + 2.175198154930058e-08, + 0.16010154783725739, + 0.9871005415916443, + 1.9742010831832886 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_004.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9488447904586792, + -0.18817944824695587, + 0.25353914499282837, + 0.5070782899856567 + ], + [ + 0.31574293971061707, + 0.5655014514923096, + -0.7619150280952454, + -1.5238300561904907 + ], + [ + -5.70235414443232e-09, + 0.8029922842979431, + 0.5959894061088562, + 1.191978931427002 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_007.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.31574302911758423, + 0.5908039808273315, + -0.7424668073654175, + -1.4849337339401245 + ], + [ + -0.9488447308540344, + 0.19659936428070068, + -0.24706749618053436, + -0.49413496255874634 + ], + [ + 1.9968233289091586e-08, + 0.7824955582618713, + 0.6226561665534973, + 1.245312213897705 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_038.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.19721996784210205, + -0.8805703520774841, + 0.4309300184249878, + 0.8618597388267517 + ], + [ + 0.9803595542907715, + -0.17714530229568481, + 0.08669053018093109, + 0.173381045460701 + ], + [ + 6.4376006747579595e-09, + 0.4395633339881897, + 0.8982115983963013, + 1.796423316001892 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_012.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9922113418579102, + -0.08309782296419144, + 0.09279739111661911, + 0.18559472262859344 + ], + [ + 0.12456566095352173, + 0.6619046330451965, + -0.7391656041145325, + -1.4783308506011963 + ], + [ + 1.3344986449226326e-08, + 0.7449678778648376, + 0.6671004295349121, + 1.3342010974884033 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_045.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9999976754188538, + 0.0020856494084000587, + -0.0006047997740097344, + -0.001209599431604147 + ], + [ + -0.0021715699695050716, + -0.9604316353797913, + 0.27850741147994995, + 0.5570147633552551 + ], + [ + -3.217276445255379e-11, + 0.2785080671310425, + 0.9604339003562927, + 1.9208678007125854 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_047.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.0021716379560530186, + 0.978209376335144, + -0.207609623670578, + -0.4152190387248993 + ], + [ + -0.9999976754188538, + 0.0021242971997708082, + -0.00045096935355104506, + -0.0009016793337650597 + ], + [ + -1.1631762930619516e-07, + 0.2076101154088974, + 0.9782116413116455, + 1.9564234018325806 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_015.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.12456566840410233, + 0.6883637309074402, + -0.7145898938179016, + -1.4291799068450928 + ], + [ + -0.9922113418579102, + 0.08641958981752396, + -0.08971208333969116, + -0.17942415177822113 + ], + [ + 5.379689582696301e-09, + 0.7201992869377136, + 0.693767249584198, + 1.387534499168396 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_013.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9922114014625549, + 0.08420506119728088, + -0.09179381281137466, + -0.18358762562274933 + ], + [ + -0.12456563115119934, + -0.6707244515419006, + 0.7311717867851257, + 1.4623435735702515 + ], + [ + 8.864133427266552e-09, + 0.7369112968444824, + 0.6759894490242004, + 1.3519788980484009 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_001.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7557885646820068, + 0.37280142307281494, + -0.5383332371711731, + -1.0766663551330566 + ], + [ + -0.654815673828125, + -0.43028756976127625, + 0.6213445067405701, + 1.2426890134811401 + ], + [ + 1.098294255541532e-08, + 0.8221141695976257, + 0.5693227052688599, + 1.1386455297470093 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_025.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.26461926102638245, + 0.7547568082809448, + -0.600265622138977, + -1.2005313634872437 + ], + [ + -0.9643529653549194, + -0.2071058750152588, + 0.16471339762210846, + 0.32942676544189453 + ], + [ + 2.4348643989924312e-08, + 0.6224542856216431, + 0.7826561331748962, + 1.5653122663497925 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_030.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.19295991957187653, + -0.8115566372871399, + 0.5514910817146301, + 1.1029821634292603 + ], + [ + 0.9812067151069641, + 0.1595972627401352, + -0.1084539070725441, + -0.21690788865089417 + ], + [ + -1.7773217564354127e-08, + 0.562053918838501, + 0.8271005749702454, + 1.6542011499404907 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_017.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.6135181784629822, + 0.5618932843208313, + -0.5548616051673889, + -1.1097232103347778 + ], + [ + -0.7896806001663208, + -0.43654584884643555, + 0.43108272552490234, + 0.8621654510498047 + ], + [ + -2.1958413753964123e-08, + 0.7026405334472656, + 0.7115449905395508, + 1.4230899810791016 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_024.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.26461926102638245, + -0.7461847066879272, + 0.6108887195587158, + 1.2217774391174316 + ], + [ + 0.9643529653549194, + 0.20475371181964874, + -0.16762834787368774, + -0.3352566957473755 + ], + [ + -2.0823939905767475e-08, + 0.6334700584411621, + 0.7737672328948975, + 1.547534465789795 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_020.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8690145611763, + -0.3652571439743042, + 0.33376768231391907, + 0.6675353646278381 + ], + [ + 0.49478647112846375, + 0.6415166854858398, + -0.5862104296684265, + -1.172420859336853 + ], + [ + 2.2729853554892543e-09, + 0.6745691299438477, + 0.7382116913795471, + 1.4764233827590942 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_008.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.4476705491542816, + -0.5647266507148743, + 0.6933072209358215, + 1.386614441871643 + ], + [ + 0.8941985964775085, + 0.2827240824699402, + -0.3470965623855591, + -0.6941931247711182 + ], + [ + 1.8510100119328854e-09, + 0.7753392457962036, + 0.6315450072288513, + 1.2630900144577026 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_028.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9812065958976746, + 0.1561669260263443, + -0.11333779245615005, + -0.2266755849123001 + ], + [ + -0.1929600089788437, + 0.7941128611564636, + -0.5763255953788757, + -1.1526511907577515 + ], + [ + 6.790858542160549e-09, + 0.587364137172699, + 0.8093227744102478, + 1.6186455488204956 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_022.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.49478644132614136, + -0.6569659113883972, + 0.5688429474830627, + 1.137685775756836 + ], + [ + 0.8690146803855896, + -0.37405329942703247, + 0.323879212141037, + 0.647758424282074 + ], + [ + -2.917397878832162e-08, + 0.6545838713645935, + 0.7559893727302551, + 1.5119788646697998 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_021.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8690145611763, + 0.3696552813053131, + -0.32888999581336975, + -0.6577800512313843 + ], + [ + -0.49478647112846375, + -0.6492412686347961, + 0.5776435136795044, + 1.1552871465682983 + ], + [ + -4.871466785516532e-09, + 0.6647109389305115, + 0.7471005916595459, + 1.4942011833190918 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_029.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9812065362930298, + -0.15788215398788452, + 0.11093602329492569, + 0.221872016787529 + ], + [ + 0.19296003878116608, + -0.8028346300125122, + 0.5641124844551086, + 1.1282248497009277 + ], + [ + -9.954787216770455e-09, + 0.5749170780181885, + 0.8182116746902466, + 1.6364233493804932 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_033.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8302612900733948, + 0.4758680760860443, + -0.29019978642463684, + -0.5803995132446289 + ], + [ + -0.5573744773864746, + -0.7088499069213867, + 0.4322795867919922, + 0.8645591139793396 + ], + [ + -1.924470538483547e-08, + 0.5206549167633057, + 0.8537672162055969, + 1.7075344324111938 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_049.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7055697441101074, + 0.7057986855506897, + -0.06340262293815613, + -0.1268051713705063 + ], + [ + -0.7086406946182251, + -0.7027400135993958, + 0.06312789767980576, + 0.12625563144683838 + ], + [ + 3.3815034328199545e-08, + 0.08947079628705978, + 0.9959894418716431, + 1.9919788837432861 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_006.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.3157431185245514, + -0.5823699831962585, + 0.7491007447242737, + 1.4982012510299683 + ], + [ + 0.9488449096679688, + -0.19379277527332306, + 0.24927501380443573, + 0.4985499680042267 + ], + [ + -8.877297830167663e-08, + 0.7894871234893799, + 0.6137672066688538, + 1.2275344133377075 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_016.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6135181784629822, + -0.5548739433288574, + 0.5618811845779419, + 1.1237622499465942 + ], + [ + 0.7896806597709656, + 0.4310922920703888, + -0.43653634190559387, + -0.8730726838111877 + ], + [ + -2.708064883449879e-08, + 0.7115296125411987, + 0.702656090259552, + 1.405312180519104 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_005.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9488449096679688, + 0.19098611176013947, + -0.25143173336982727, + -0.5028635263442993 + ], + [ + -0.31574296951293945, + -0.573935866355896, + 0.7555820345878601, + 1.5111640691757202 + ], + [ + -5.848949768960665e-09, + 0.7963178753852844, + 0.6048784255981445, + 1.2097567319869995 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_027.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9643529653549194, + 0.2118101865053177, + -0.15861831605434418, + -0.31723666191101074 + ], + [ + -0.26461920142173767, + 0.7719008326530457, + -0.5780534148216248, + -1.156106948852539 + ], + [ + 3.1994433769000352e-09, + 0.5994210243225098, + 0.8004339337348938, + 1.600867748260498 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_035.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5573745369911194, + 0.7236100435256958, + -0.4070899188518524, + -0.8141798973083496 + ], + [ + -0.83026123046875, + 0.48577702045440674, + -0.2732893228530884, + -0.5465787649154663 + ], + [ + 4.9952152636478786e-08, + 0.4903154671192169, + 0.8715450167655945, + 1.743090033531189 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_018.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.7896807789802551, + -0.44199928641319275, + 0.4254893362522125, + 0.850978672504425 + ], + [ + 0.6135181784629822, + -0.5689128637313843, + 0.5476621985435486, + 1.0953242778778076 + ], + [ + 2.7610346009510067e-08, + 0.693523645401001, + 0.7204338908195496, + 1.4408677816390991 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_014.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.12456594407558441, + -0.6795441508293152, + 0.7229822874069214, + 1.4459643363952637 + ], + [ + 0.9922114610671997, + -0.0853126123547554, + 0.09076575189828873, + 0.18153133988380432 + ], + [ + 7.589785866457532e-08, + 0.7286575436592102, + 0.6848782896995544, + 1.3697566986083984 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_043.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8326740860939026, + 0.52200847864151, + -0.18482716381549835, + -0.36965423822402954 + ], + [ + -0.5537634491920471, + 0.7849252820014954, + -0.2779180109500885, + -0.5558358430862427 + ], + [ + -3.9174199883973415e-08, + 0.3337656259536743, + 0.9426560997962952, + 1.8853121995925903 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_044.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.999997615814209, + -0.0020663468167185783, + 0.000667778542265296, + 0.0013355568516999483 + ], + [ + 0.0021715702023357153, + 0.9515427350997925, + -0.3075088560581207, + -0.6150175333023071 + ], + [ + -6.308249755893058e-11, + 0.30750957131385803, + 0.951545000076294, + 1.903090000152588 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_039.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.19721969962120056, + 0.8892844915390015, + -0.4126468896865845, + -0.8252938389778137 + ], + [ + -0.9803593158721924, + 0.17889811098575592, + -0.08301252126693726, + -0.1660250574350357 + ], + [ + 9.366249997810883e-10, + 0.4209139347076416, + 0.9071005582809448, + 1.8142011165618896 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_041.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5537631511688232, + 0.7701222896575928, + -0.31663525104522705, + -0.6332705616950989 + ], + [ + -0.8326741456985474, + -0.5121635794639587, + 0.21057571470737457, + 0.4211515486240387 + ], + [ + -1.0297616981347346e-08, + 0.380263090133667, + 0.9248783588409424, + 1.8497567176818848 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_026.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9643530249595642, + -0.20945799350738525, + 0.1617116630077362, + 0.3234233558177948 + ], + [ + 0.26461920142173767, + -0.7633287906646729, + 0.5893266201019287, + 1.1786532402038574 + ], + [ + 8.772249593391734e-10, + 0.6111109256744385, + 0.7915449738502502, + 1.58309006690979 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_040.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5537632703781128, + -0.7627207636833191, + 0.3340708017349243, + 0.6681416034698486 + ], + [ + 0.8326741456985474, + 0.5072413086891174, + -0.2221711128950119, + -0.4443422853946686 + ], + [ + 1.180316200333209e-08, + 0.40120232105255127, + 0.9159894585609436, + 1.8319789171218872 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_000.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.7557886242866516, + -0.3669809103012085, + 0.542317807674408, + 1.0846354961395264 + ], + [ + 0.654815673828125, + 0.4235695004463196, + -0.6259435415267944, + -1.2518870830535889 + ], + [ + 2.319525194138805e-08, + 0.8281992077827454, + 0.5604338645935059, + 1.1208678483963013 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_003.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.6548155546188354, + 0.44372400641441345, + -0.6118215322494507, + -1.2236429452896118 + ], + [ + -0.7557887434959412, + 0.3844425082206726, + -0.5300824046134949, + -1.0601648092269897 + ], + [ + -6.902338611780579e-08, + 0.809514045715332, + 0.5871005058288574, + 1.1742011308670044 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_010.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8941985368728638, + -0.29068276286125183, + 0.34045925736427307, + 0.6809185147285461 + ], + [ + 0.4476706087589264, + -0.5806235074996948, + 0.6800495386123657, + 1.3600990772247314 + ], + [ + -1.5843827583239545e-08, + 0.7605128884315491, + 0.6493228077888489, + 1.2986456155776978 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_031.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.19296003878116608, + 0.8202784657478333, + -0.5384326577186584, + -1.0768654346466064 + ], + [ + -0.9812066555023193, + -0.16131256520748138, + 0.10588594526052475, + 0.2117718905210495 + ], + [ + 1.964443363533519e-08, + 0.5487454533576965, + 0.8359894752502441, + 1.6719789505004883 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_023.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.4947863221168518, + 0.6646906137466431, + -0.559797465801239, + -1.1195944547653198 + ], + [ + -0.8690148591995239, + 0.37845125794410706, + -0.31872907280921936, + -0.6374579071998596 + ], + [ + -1.0347328327497962e-08, + 0.6441749930381775, + 0.7648782730102539, + 1.5297566652297974 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "chunk02_009.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.4476705491542816, + 0.5726751089096069, + -0.686756432056427, + -1.3735127449035645 + ], + [ + -0.8941985964775085, + -0.28670334815979004, + 0.34381696581840515, + 0.6876338720321655 + ], + [ + 5.3712501113523103e-08, + 0.7680133581161499, + 0.6404338479042053, + 1.2808678150177002 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + } + ] +} \ No newline at end of file diff --git a/all_outputs/features/dinov2_vitl14_reg/appearance.npz b/all_outputs/features/dinov2_vitl14_reg/appearance.npz new file mode 100644 index 0000000000000000000000000000000000000000..330f984cb396906c8586698b009f04a770e02325 --- /dev/null +++ b/all_outputs/features/dinov2_vitl14_reg/appearance.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e22a23f05b0e653fe08da059c872191a55f0254be9a96eca3c6b7678a45a701 +size 20816831 diff --git a/all_outputs/latents/dinov2_vitl14_reg_slat_enc_swin8_B_64l8_fp16/appearance.npz b/all_outputs/latents/dinov2_vitl14_reg_slat_enc_swin8_B_64l8_fp16/appearance.npz new file mode 100644 index 0000000000000000000000000000000000000000..5f2eaf574af10ad818a4920aef56496ef7a674fe --- /dev/null +++ b/all_outputs/latents/dinov2_vitl14_reg_slat_enc_swin8_B_64l8_fp16/appearance.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb528775124556743c91760cc63c5cebfbff7432bfef5deed75877175828c2b5 +size 370956 diff --git a/all_outputs/out_app.glb b/all_outputs/out_app.glb new file mode 100644 index 0000000000000000000000000000000000000000..98a45cb4d5571b8c8345b48e22b43e2bbd8b5685 --- /dev/null +++ b/all_outputs/out_app.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05bcc49a07d356841706927f3f80db62d4112e7d8b8d2f78c7f77ae62f1078c4 +size 1423344 diff --git a/all_outputs/out_gaussian_app.mp4 b/all_outputs/out_gaussian_app.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a025e5aefba10e4c5c2bef3938bc0edbe90e2981 --- /dev/null +++ b/all_outputs/out_gaussian_app.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf3ae17fe2050b0273b84e3a29b156673feee5c2844ee4aada09baade7bcafd8 +size 684047 diff --git a/all_outputs/out_gaussian_sim.mp4 b/all_outputs/out_gaussian_sim.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..edbca8dc219e4f138edc850a3e8c105c9d393573 --- /dev/null +++ b/all_outputs/out_gaussian_sim.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c82ca472cda4d49988950cc2f5e016f31cafea2f8f6b54e6e28c71e2838ae421 +size 495367 diff --git a/all_outputs/out_sim.glb b/all_outputs/out_sim.glb new file mode 100644 index 0000000000000000000000000000000000000000..cff35c24a95da6f8d2b946cd6a22d4af312dcba6 --- /dev/null +++ b/all_outputs/out_sim.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:656c40f9d69b5ed7cb52419ce6341a08b6d58507e03ccd65eb4655094a71d60a +size 1187180 diff --git a/all_outputs/partfield/part_feat_app_mesh_zup_batch_part_plane.npy b/all_outputs/partfield/part_feat_app_mesh_zup_batch_part_plane.npy new file mode 100644 index 0000000000000000000000000000000000000000..954f764e377a9b29dbd15ae4c16cff2188a155f7 --- /dev/null +++ b/all_outputs/partfield/part_feat_app_mesh_zup_batch_part_plane.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ccec77e42b5b1f5d1d7861615c3896833f52ea08c1cbb6ab55bb8073864243a +size 44040320 diff --git a/all_outputs/partfield/part_feat_struct_mesh_zup_batch_part_plane.npy b/all_outputs/partfield/part_feat_struct_mesh_zup_batch_part_plane.npy new file mode 100644 index 0000000000000000000000000000000000000000..a107cbb802334ccfe96443d8b01611818005bf79 --- /dev/null +++ b/all_outputs/partfield/part_feat_struct_mesh_zup_batch_part_plane.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65357407ed6fdb70a5b90c0da6702aee65a1cbd1f94f23cf563459443e99ab90 +size 44040320 diff --git a/all_outputs/struct_mesh.hash b/all_outputs/struct_mesh.hash new file mode 100644 index 0000000000000000000000000000000000000000..b010d7459b0e5c4bd21c29b7e078b8057ed7ff75 --- /dev/null +++ b/all_outputs/struct_mesh.hash @@ -0,0 +1 @@ +c08f4a29ff18ca006ddc16b8aa2c495e7ef2dce51248a40bacc15294d4534f8b \ No newline at end of file diff --git a/all_outputs/struct_mesh_zup.glb b/all_outputs/struct_mesh_zup.glb new file mode 100644 index 0000000000000000000000000000000000000000..e60f9f9b38ad31c05a2da5d1110958919057e3a6 --- /dev/null +++ b/all_outputs/struct_mesh_zup.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c08f4a29ff18ca006ddc16b8aa2c495e7ef2dce51248a40bacc15294d4534f8b +size 128216 diff --git a/all_outputs/struct_renders/000.png b/all_outputs/struct_renders/000.png new file mode 100644 index 0000000000000000000000000000000000000000..bc076c933589212cb44b5da33a4cca767d6a3308 --- /dev/null +++ b/all_outputs/struct_renders/000.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6977ce3e98492bca36c7792fc99716763bae420e9d29462d8b19c5d959c14540 +size 173051 diff --git a/all_outputs/struct_renders/001.png b/all_outputs/struct_renders/001.png new file mode 100644 index 0000000000000000000000000000000000000000..2d0f3d768998b7fe31a93e67fc614900d3db82a8 --- /dev/null +++ b/all_outputs/struct_renders/001.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdc28a949ad06533fdf22fb25dbcadf4044e3e33d65ce4804e02f34adee30439 +size 186009 diff --git a/all_outputs/struct_renders/002.png b/all_outputs/struct_renders/002.png new file mode 100644 index 0000000000000000000000000000000000000000..7c71b16aa4af50788a931bb18fedddd1230df6a5 --- /dev/null +++ b/all_outputs/struct_renders/002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8070b9470a486419c7246b98c9a8e74c4e32d82fb6111ea3bf70abd2c5eecbe +size 170398 diff --git a/all_outputs/struct_renders/003.png b/all_outputs/struct_renders/003.png new file mode 100644 index 0000000000000000000000000000000000000000..80a44ab11dd042f614b44cada7155550f5e34fd9 --- /dev/null +++ b/all_outputs/struct_renders/003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8715302c293fc775428667e74567842717ac5d3705c2920002f6f8e1fab0ebb2 +size 170744 diff --git a/all_outputs/struct_renders/004.png b/all_outputs/struct_renders/004.png new file mode 100644 index 0000000000000000000000000000000000000000..98aeafe80147c76f2e673a9a8e0bc4facf9515a6 --- /dev/null +++ b/all_outputs/struct_renders/004.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49d9a9f8887fc1581ff0170371f33d5a14002d972d39b8c94165bc7807e2cf4a +size 180259 diff --git a/all_outputs/struct_renders/005.png b/all_outputs/struct_renders/005.png new file mode 100644 index 0000000000000000000000000000000000000000..0c02427492804bae200f3d428416c495bf4ca91b --- /dev/null +++ b/all_outputs/struct_renders/005.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797357eef57985adfb991485d5ff1c6e8a8b58e6b633096aa6dd11766744c407 +size 184665 diff --git a/all_outputs/struct_renders/006.png b/all_outputs/struct_renders/006.png new file mode 100644 index 0000000000000000000000000000000000000000..485a4a227cb54d963cb4c6d01c9d7f7df78450b2 --- /dev/null +++ b/all_outputs/struct_renders/006.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbc574591b6adee0822ff9b5a32fb6756a5bd554ce91586bf07f84fb0e5ff6fb +size 187714 diff --git a/all_outputs/struct_renders/007.png b/all_outputs/struct_renders/007.png new file mode 100644 index 0000000000000000000000000000000000000000..969246ee36b3068c72b13032f65076591d9b7b48 --- /dev/null +++ b/all_outputs/struct_renders/007.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a48489d225780b4ebf89ba88c91f802bd0b74c83c350ba76ece1275e2bae78d +size 189020 diff --git a/all_outputs/struct_renders/008.png b/all_outputs/struct_renders/008.png new file mode 100644 index 0000000000000000000000000000000000000000..59b6cb34285811ed00486f0e36ee9ad26954edae --- /dev/null +++ b/all_outputs/struct_renders/008.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86fdd62d0a2de8fa3af2750cc5926f1c8feffc7c950c4998d57c8f8f7307b2ef +size 189678 diff --git a/all_outputs/struct_renders/009.png b/all_outputs/struct_renders/009.png new file mode 100644 index 0000000000000000000000000000000000000000..932416571e406ef7aadb81b452ce29ecb9127cbe --- /dev/null +++ b/all_outputs/struct_renders/009.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3182a218d0c80be24c0894ccf975bec4433d212433ce9051eb616ee3f5033766 +size 184121 diff --git a/all_outputs/struct_renders/010.png b/all_outputs/struct_renders/010.png new file mode 100644 index 0000000000000000000000000000000000000000..27c7e2079307e6e031204d3de94caef26029d24f --- /dev/null +++ b/all_outputs/struct_renders/010.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52631b4c074d8a064e7dba679d3089c7463917978e605260b6ddf2d70e845175 +size 178743 diff --git a/all_outputs/struct_renders/011.png b/all_outputs/struct_renders/011.png new file mode 100644 index 0000000000000000000000000000000000000000..42dff17eec14bae8f1197672dcf7be417f78d654 --- /dev/null +++ b/all_outputs/struct_renders/011.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d7ed3b682f4e43f39f87dfb869c020a5b1eedb118417371b4e7eb5771e0a62e +size 182479 diff --git a/all_outputs/struct_renders/012.png b/all_outputs/struct_renders/012.png new file mode 100644 index 0000000000000000000000000000000000000000..ac8a4970f77ae76b64730a79a6228e3ddc27034e --- /dev/null +++ b/all_outputs/struct_renders/012.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff302e7f185e2954649a260daf25331c4b2f03b4bc557f314dcfaebed96b879 +size 163309 diff --git a/all_outputs/struct_renders/013.png b/all_outputs/struct_renders/013.png new file mode 100644 index 0000000000000000000000000000000000000000..7687990e227422d7f246b1764084d3183d6a1226 --- /dev/null +++ b/all_outputs/struct_renders/013.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d0213fd34aa958e5017411df971c53e216a8616856643d37b28b5116b6ee012 +size 153201 diff --git a/all_outputs/struct_renders/014.png b/all_outputs/struct_renders/014.png new file mode 100644 index 0000000000000000000000000000000000000000..33a23427485e162275267130eb286fdaf61c8418 --- /dev/null +++ b/all_outputs/struct_renders/014.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0fef5cc985fab3e643b825a3d29a56636526ed4283e16e4652d5f11153037b +size 140605 diff --git a/all_outputs/struct_renders/mesh.ply b/all_outputs/struct_renders/mesh.ply new file mode 100644 index 0000000000000000000000000000000000000000..6f6441e3b49b628ca9eb65c536d77bf7250d0002 --- /dev/null +++ b/all_outputs/struct_renders/mesh.ply @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ce7d77c3707eb344fe5b0dcbae202c55563322e568845f55221f4fe3e2846c7 +size 202313 diff --git a/all_outputs/struct_renders/transforms.json b/all_outputs/struct_renders/transforms.json new file mode 100644 index 0000000000000000000000000000000000000000..ccba49b7a9771708eb9f077aed25edf2b5a63330 --- /dev/null +++ b/all_outputs/struct_renders/transforms.json @@ -0,0 +1,472 @@ +{ + "aabb": [ + [ + -0.5, + -0.5, + -0.5 + ], + [ + 0.5, + 0.5, + 0.5 + ] + ], + "scale": 0.9852216887622752, + "offset": [ + -0.0, + -0.02463054656982422, + -0.5 + ], + "frames": [ + { + "file_path": "000.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.9862522482872009, + 0.1334693729877472, + 0.0974293053150177, + 0.1948586106300354 + ], + [ + 0.16524691879749298, + -0.796592652797699, + -0.5814926624298096, + -1.1629853248596191 + ], + [ + 2.4402453391303425e-09, + 0.5895982980728149, + -0.8076966404914856, + -1.6153932809829712 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "001.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9862522482872009, + -0.08940353244543076, + -0.1389731913805008, + -0.277946412563324 + ], + [ + -0.1652469038963318, + 0.5335920453071594, + 0.8294413685798645, + 1.6588828563690186 + ], + [ + -2.296408396773586e-09, + 0.8410032987594604, + -0.5410299897193909, + -1.0820598602294922 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "002.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.16524693369865417, + 0.2705914378166199, + 0.948405921459198, + 1.896811842918396 + ], + [ + 0.9862522482872009, + 0.04533767327666283, + 0.1589057445526123, + 0.317811518907547 + ], + [ + -1.3578528523794375e-08, + 0.9616261124610901, + -0.27436336874961853, + -0.5487265586853027 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "003.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.16524682939052582, + -0.007590848952531815, + -0.9862231612205505, + -1.972446084022522 + ], + [ + -0.9862523674964905, + -0.0012720405356958508, + -0.1652420610189438, + -0.3304840624332428 + ], + [ + -5.984040996054318e-08, + 0.9999704957008362, + -0.007696801330894232, + -0.015393242239952087 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "004.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5805383324623108, + -0.070287324488163, + 0.811193585395813, + 1.6223869323730469 + ], + [ + 0.8142329454421997, + 0.050113897770643234, + -0.5783713459968567, + -1.1567426919937134 + ], + [ + 5.3817579726000986e-08, + 0.9962672591209412, + 0.08632326871156693, + 0.17264670133590698 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "005.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5805386304855347, + 0.14266347885131836, + -0.8016373515129089, + -1.6032744646072388 + ], + [ + -0.8142329454421997, + -0.10171753168106079, + 0.5715579986572266, + 1.143115758895874 + ], + [ + -2.5670495062968257e-08, + 0.9845308065414429, + 0.17521202564239502, + 0.35042446851730347 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "006.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8142330050468445, + -0.1533208042383194, + 0.559926450252533, + 1.1198527812957764 + ], + [ + 0.5805384516716003, + -0.21503981947898865, + 0.7853236198425293, + 1.5706470012664795 + ], + [ + -1.1269913002820431e-08, + 0.9644951224327087, + 0.26410096883773804, + 0.5282022356987 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "007.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.8142328262329102, + 0.20492421090602875, + -0.5431675314903259, + -1.0863349437713623 + ], + [ + -0.5805385112762451, + 0.28741589188575745, + -0.7618184089660645, + -1.5236365795135498 + ], + [ + -1.7190155077173586e-08, + 0.9356272220611572, + 0.3529898226261139, + 0.7059800028800964 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "008.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.847940981388092, + -0.23423588275909424, + 0.4755308926105499, + 0.9510617852210999 + ], + [ + 0.5300906300544739, + 0.37468716502189636, + -0.7606664896011353, + -1.521332859992981 + ], + [ + 3.494987765861879e-08, + 0.8970747590065002, + 0.4418788254261017, + 0.8837578296661377 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "009.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.8479413986206055, + 0.2813551127910614, + -0.4492610692977905, + -0.8985219597816467 + ], + [ + -0.5300907492637634, + -0.4500601291656494, + 0.7186447381973267, + 1.4372892379760742 + ], + [ + -2.5484467869318905e-09, + 0.8475174307823181, + 0.5307677388191223, + 1.0615355968475342 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "010.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.5300906896591187, + -0.525432288646698, + 0.6655259132385254, + 1.3310519456863403 + ], + [ + 0.8479409217834473, + -0.3284742832183838, + 0.416053831577301, + 0.832107663154602 + ], + [ + 4.015531729351096e-08, + 0.7848729491233826, + 0.6196567416191101, + 1.2393133640289307 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "011.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.5300907492637634, + 0.6008047461509705, + -0.5983622670173645, + -1.1967246532440186 + ], + [ + -0.8479409217834473, + 0.37559348344802856, + -0.37406647205352783, + -0.7481328845024109 + ], + [ + 4.3913011893437215e-08, + 0.7056650519371033, + 0.7085455656051636, + 1.4170911312103271 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "012.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + 0.22475412487983704, + -0.7770324945449829, + 0.5879677534103394, + 1.1759355068206787 + ], + [ + 0.9744155406951904, + 0.17922666668891907, + -0.1356179118156433, + -0.27123579382896423 + ], + [ + 1.4260791658671224e-08, + 0.6034055948257446, + 0.7974344491958618, + 1.5948688983917236 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "013.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.2247542440891266, + 0.8636472225189209, + -0.4512195289134979, + -0.9024390578269958 + ], + [ + -0.9744154810905457, + -0.1992049366235733, + 0.10407621413469315, + 0.2081523835659027 + ], + [ + -2.093431028526993e-08, + 0.46306687593460083, + 0.8863233327865601, + 1.7726466655731201 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + }, + { + "file_path": "014.png", + "camera_angle_x": 0.6981317007977318, + "transform_matrix": [ + [ + -0.9744155406951904, + -0.21918298304080963, + 0.049731720238924026, + 0.09946338832378387 + ], + [ + 0.22475413978099823, + -0.9502618908882141, + 0.2156105786561966, + 0.4312208294868469 + ], + [ + -1.3513196783776493e-08, + 0.22127170860767365, + 0.9752122163772583, + 1.9504244327545166 + ], + [ + 0, + 0, + 0, + 1 + ] + ] + } + ] +} \ No newline at end of file diff --git a/all_outputs/voxels/app_voxels.ply b/all_outputs/voxels/app_voxels.ply new file mode 100644 index 0000000000000000000000000000000000000000..988b1986acf73f3cfcb0499ea9b85d3a43376f6b --- /dev/null +++ b/all_outputs/voxels/app_voxels.ply @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bde820c214972c8af12c9af25fc8920df5aeeb32bf336084be7bb6673291409f +size 138383 diff --git a/all_outputs/voxels/struct_voxels.ply b/all_outputs/voxels/struct_voxels.ply new file mode 100644 index 0000000000000000000000000000000000000000..abcd30baecd3962fec8917e60da716598c031b1e --- /dev/null +++ b/all_outputs/voxels/struct_voxels.ply @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae235191f122d3223b724451807ea6fdbd20c0dae155218d60e50ad6f0785a17 +size 79414 diff --git a/bash/run.sh b/bash/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..7fdf78ef366ac9db1a87105be7f37227f1ef4cba --- /dev/null +++ b/bash/run.sh @@ -0,0 +1,48 @@ +export PYTHONWARNINGS="ignore" + + +# Blender installation configuration +BLENDER_LINK='https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +BLENDER_INSTALLATION_PATH='/tmp' +export BLENDER_HOME="${BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender" + +# Function to install Blender +install_blender() { + if [ ! -f "$BLENDER_HOME" ]; then + echo "Installing Blender..." + sudo apt-get update + sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6 + wget "$BLENDER_LINK" -P "$BLENDER_INSTALLATION_PATH" + tar -xvf "${BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz" -C "$BLENDER_INSTALLATION_PATH" + echo "Blender installed at $BLENDER_HOME" + else + echo "Blender already installed at $BLENDER_HOME" + fi +} + +install_blender + +# Appearance Guidance (with rendered image) +# python run.py \ +# --guidance_mode appearance \ +# --appearance_mesh examples/B07QC84LP1.glb \ +# --structure_mesh examples/example1.glb \ +# --output_dir outputs/experiment1 \ +# --convert_yup_to_zup \ + +# # Appearance Guidance +# python run.py \ +# --guidance_mode appearance \ +# --appearance_mesh examples/B07QC84LP1.glb \ +# --structure_mesh examples/example1.glb \ +# --output_dir outputs/experiment2 \ +# --appearance_image examples/B07QC84LP1_orig.png \ +# --convert_yup_to_zup + +# Similarity Guidance +python run.py \ + --guidance_mode similarity \ + --structure_mesh examples/example1.glb \ + --output_dir outputs/experiment3 \ + --appearance_text "A light-colored wooden chair with a straight-back design, cushioned rectangular backrest and seat in light beige, slightly outward back legs, and tapered front legs." \ + --convert_yup_to_zup \ No newline at end of file diff --git a/config/default.yaml b/config/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e1866c6c926423387bfac64cada3a0c4439ba2df --- /dev/null +++ b/config/default.yaml @@ -0,0 +1,37 @@ +# dinov2 +dinov2_repo: "facebookresearch/dinov2" +feature_name: "dinov2_vitl14_reg" + +# slat +enc_pretrained: "JeffreyXiang/TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16" +latent_name: "dinov2_vitl14_reg_slat_enc_swin8_B_64l8_fp16" + +# render +num_views: 150 + +# trellis +trellis_img_model_name: "JeffreyXiang/TRELLIS-image-large" +trellis_text_model_name: "JeffreyXiang/TRELLIS-text-large" +flow_model_in_channels: 8 + +# appearance guidance +app_guidance: + num_part_clusters: 30 + steps: 300 + learning_rate: 5e-4 + loss_weight: 10.0 + rescale_t: 3.0 + cfg_strength: 5.0 + cfg_interval: [0.5, 1.0] + +# self-similarity guidance +sim_guidance: + num_part_clusters: 10 + steps: 300 + learning_rate: 5e-4 + loss_weight: 1.0 + rescale_t: 3.0 + cfg_strength: 7.5 + cfg_interval: [0.5, 9.5] + +log_every: 100 \ No newline at end of file diff --git a/demos/__init__.py b/demos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.eot new file mode 100644 index 0000000000000000000000000000000000000000..3f0d9bc944256556490c48834d68f35163546940 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc5e5286943351eaab153aaec1e7d46e2f03401b77c66c0a28d1ae54fb96c8fc +size 88752 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..ae974ec7cdbf0dda2457e504aac6d062cba148ec --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b61ecc5bdef8da523481a59d121e9efce8ae8847028ebe2ced25af9dab36ba57 +size 88540 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff new file mode 100644 index 0000000000000000000000000000000000000000..6b1698f51cfb7cde5f8af6d676a29526fe9816e4 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..37097149f0a539d4b09f8459e056ba765b71db90 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Bold.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..fe86dca3f8522fc76c91ed50c89089ee9d2057f7 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85e982336b2ed45e0cfdde0ba41ae7cda28b0e516a5365f3d34f168539d779bb +size 92698 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3194d1e213f84662530d71492545a480cf035814 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7c5b2d8cb1a1ffebdb8a656794bc1b26ebde9457910fe68938123488e3ef705 +size 92460 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..a1cbd300355a5191ab6d67b91f77d34efe957a85 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..db639d0626ffee7a1c35375417824074c014aec7 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-BoldItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.eot new file mode 100644 index 0000000000000000000000000000000000000000..25c18f1ea2dda7b583a606e4aaa5170b467dec91 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1e06391ad01bc5b84a2f1a17551b57fc869c6a6d9561534e27089ad4f123ae4 +size 88930 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.ttf new file mode 100644 index 0000000000000000000000000000000000000000..a348df682b37f36004d68f45e4933f27ce294226 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73a9281f99050f0d17112ea434c10a3842adbdb9152f21a56a011cf2f10d16c4 +size 88732 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff new file mode 100644 index 0000000000000000000000000000000000000000..743cf30b014e270e26ff1fc1b482babea1701c90 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..324b7c5f82f0ac22441c5e7e1da6818bc022abc0 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Demi.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..e79a13d1d6d796edcc21a7c07540181e54cb3e85 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f1e6e6bdcc58a53e20fe86c5cb54490e014dc2db648223bb41b5787c707fc04 +size 92112 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2766e1ebb2cfbe8dd7ed0b76e0df22a0a89a1a6e --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2775a0dd9e0f0d39d94436438c4311a643b0b454169af9f4d4f04b288d515bc7 +size 91888 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..7b755cb3dd5692453f9a1b5c63aabf2baeca09da Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..80a04c45e9ee40f8da9208b76bde46aa10db23bd Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-DemiItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.eot new file mode 100644 index 0000000000000000000000000000000000000000..c0e751b50d9a6f21d4bb2ce7c69cf1d82643f5a1 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:579c85417e232a7f0c49f97dba6525dae66ff306fce66e4269e603c5d885c48d +size 89038 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.ttf new file mode 100644 index 0000000000000000000000000000000000000000..d1949baa205938aed9ed48b8a6a75b64791f7478 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9b68dea7215b31cc60ea2f99f2014151f9416f1951ed3aca7b6888a053a55da +size 88820 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff new file mode 100644 index 0000000000000000000000000000000000000000..82da6b2c559b6dcbe644c80ea7847f4880b10deb Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..8d4ec8484e9995750e011420ae18276eeff1acdf Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Heavy.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..a8f148cda6d3eba20d465c43daaae5206ecdd216 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb89897beb45a4def9aebb77375065f6231b8898ffa86d99ed5135ce15e43d73 +size 91896 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..401811aa67cd74efcd4c60b4c131112c02992454 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7c27c014a411ffdfc10c7d7b0cc28f2280a0a4b8dd1a4d0eca59b4c4a19484f +size 91668 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..fd2ee0e583208e3c4cdbdc94f70ca62315b376f3 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..95cc874642a4edd0498afe6016066a04f8cf4ca8 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-HeavyItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.eot new file mode 100644 index 0000000000000000000000000000000000000000..55468c96e61ff69b2a5f801c1a9912a2cb94d50d --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5858aa48c593a7f42a817b957ca585492302f11697b8306c183b8bb84409a0ee +size 92950 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..64d9f2b64f6c71e6d5fcf06c248a4efd5e9f0a3b --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:745c79278f1a696ab690b1da412cccdf4404efceb98136390c78eab5fd1c8a21 +size 92744 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff new file mode 100644 index 0000000000000000000000000000000000000000..3ea2ba5dc92cb7d232225a87fa146f75f2b2a36a Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..fe3705ee1cb45de8226ed35f766aaa06c364af01 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Italic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.eot new file mode 100644 index 0000000000000000000000000000000000000000..50c67ffc08641194e34199fff56210ddc934dac7 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb7fa72732463bc8d87b6b99902b2d8f6d2cae50100b47c7a5e057b2c066cb44 +size 98658 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.ttf new file mode 100644 index 0000000000000000000000000000000000000000..f874e278408846ec51af7f9e467675a78a51fef6 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bd436eb54379f23bbc0f6fb2437b1785fc03afe5162f0daddd33ca5d9bcf404 +size 98440 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff new file mode 100644 index 0000000000000000000000000000000000000000..687c4fa7c2a6c3db9b1dec331745ed85667e981b Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..1a403901fc6d3011836532aca38267bd4267a3e9 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Light.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..6066230a7f1350fcb338961d1d22f408667e7a8e --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa1037760944d6f45f89ac894ea8a9d2a93be46d586f7182328b23bdb2484d0 +size 102644 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..02589bd4c89087dd61d7d3a53c687d5ef9d90441 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22772661816f742c9b3dfc03b5d6a2c59606a90f943d419d88031b024729f8c8 +size 102416 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..0f9dd9f67fa22363a60981a4faf07e8b35047432 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..8bd9634e209bf39cd5768e2db340bc02cdf576a6 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-LightItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.eot new file mode 100644 index 0000000000000000000000000000000000000000..2b89508da99d510f666f09900010a9cdd0597465 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aa089a6c7b85c1afa469029ccfe04d3e447a83755448d598944947fd9368447 +size 89158 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.ttf new file mode 100644 index 0000000000000000000000000000000000000000..4bfc775823061e75817931d57f524feec9d4c763 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b9478f9169e6cccf44efa01b8ae8cfdb44d9600113aefb6174e86816cc5e44c +size 88936 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff new file mode 100644 index 0000000000000000000000000000000000000000..71a785eb995608968da42b40a89e1c2c82e888dd Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..32fa03f6a1d11beb352a34d154eb49d3a3fe8507 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Medium.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..54bdb57dbedf87a819735ed73f1f478867a22839 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f460030e58008ea4338fe95514c678c0b79dbf81b45148b302ef6c9cd500e2 +size 92756 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..ccd1b88133f23705a8c0d7d46a79e780c10f10bb --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e27b92b592080753144bf7f5489c9a5e753e662d1b2002fe2d388eb5a894b39 +size 92524 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..51a761842cec9a14e128018df94fac6113d57445 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..b52d511478d628d5fd9513b5df1017cc184c5c01 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-MediumItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.eot new file mode 100644 index 0000000000000000000000000000000000000000..b00dd67d15f6dfb52c94c55b03545f4584f6901b --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22d580c16d5d3b4c6cf229d5a675f9c8f0d49238ad497f6b242557acd9ea6fea +size 89230 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..00fa500f3ebcabbb4e2444252049f573213e7f08 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db85d89cd481bb438d6b0301cadc4ed4bddbfe89768cdccd442c4b2f59a04be4 +size 89020 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff new file mode 100644 index 0000000000000000000000000000000000000000..7fca550ea30e5f1119c60f11630d9125b723fc78 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..0c50b6a1f45e980073740479197fe98dd7dfdc0d Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Regular.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.eot new file mode 100644 index 0000000000000000000000000000000000000000..e9971845a0f687f4a8751207b0f9c7dec69b8d59 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bf2a1678fc6c6e2632f55a1ae42ee1dc2c0d5c5cf5e7e77696adfffcb35904d +size 99742 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.ttf new file mode 100644 index 0000000000000000000000000000000000000000..c5b91fa9f785dc4aab7935d55d76c35899b0141d --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faad5d29a7f5c1b6a332d33ee35a36bb6504f4d10f706e7f29672987dbfa05cf +size 99528 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff new file mode 100644 index 0000000000000000000000000000000000000000..99d9e381f2c9bb9396551f2b4b4c6436614ae45d Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..57cbc78ff35826e82088d49b1710a0fe6cd2afb0 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-Thin.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.eot new file mode 100644 index 0000000000000000000000000000000000000000..0d2e1678cf59bda455b0a72ef7d980c58bb94aac --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5515955a727881b216f5ecb7cce19aeb0f43c6a2e9f9772190a7fb513c809348 +size 102784 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..7f7c1c8675d06388a340c0d362856de0cb4bf1fe --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54872ac929ae240e73ddf1b5ad1018dc78eea236a7ae0ca57501118f559d4e17 +size 102560 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff new file mode 100644 index 0000000000000000000000000000000000000000..33bdc445b162d7d296d281e6cec287ea60417229 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..868443fb30e9e32dcf24624891835d542df38d33 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-ThinItalic.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.eot new file mode 100644 index 0000000000000000000000000000000000000000..6e89684499d95a4163e3a4d4b4651a2ee04fc5f0 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44bafedc060b3509378ecd8aa52f5226d084c2b5ae224b87372591eba57cbbb1 +size 90116 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.ttf new file mode 100644 index 0000000000000000000000000000000000000000..ed18373397fe2178d7d64394685818079b64f053 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a010909eb5e40525236f0f1e226fc79f21875ef15c42fd364db7bf37f4e20c3 +size 89876 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff new file mode 100644 index 0000000000000000000000000000000000000000..008b182e0e9ac9b7eb5eff1cd0ea8279aee0701a Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..50e2d877c703d3861595d925e9e31b313b116cdb Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLight.woff2 differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.eot b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.eot new file mode 100644 index 0000000000000000000000000000000000000000..27cdaaf0292cf4a50af4a8d7817ba343da67ca23 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e7fcbb7332a831f63e6e527faba903c8c89bb83e78e653e3dff15ce84434ced +size 94074 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.ttf b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.ttf new file mode 100644 index 0000000000000000000000000000000000000000..a5399fc339c4fe80b7077f53af8ba7a3a00da587 --- /dev/null +++ b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8415be8bf86943cca34c2c8842b822e96b42637997767c28e14fda81d3ce702b +size 93832 diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff new file mode 100644 index 0000000000000000000000000000000000000000..efc8443fbdac4d3f8d143a49d5475cf143710849 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff differ diff --git a/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff2 b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..be6507d1937eaaaeeaa1c8039bbd705feee1ce31 Binary files /dev/null and b/demos/assets/fonts/avenir-next/AvenirNextCyr-UltraLightIt.woff2 differ diff --git a/demos/assets/fonts/avenir-next/demo.html b/demos/assets/fonts/avenir-next/demo.html new file mode 100644 index 0000000000000000000000000000000000000000..d2b585b851541eb209e399991b0f1d9c058f077e --- /dev/null +++ b/demos/assets/fonts/avenir-next/demo.html @@ -0,0 +1,629 @@ + + + + + + + + + Transfonter demo + + + + +
+
+

Avenir Next Cyr Bold

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: bold;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Thin Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 100;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Medium Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 500;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Ultra Light Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr Ultra';
+    font-weight: 200;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Light

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 300;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: normal;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Bold Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: bold;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Heavy

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 900;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Heavy Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 900;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Thin

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 100;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Medium

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 500;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Demi Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: bold;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Demi

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: bold;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Regular

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: normal;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Light Italic

+
.your-style {
+    font-family: 'Avenir Next Cyr';
+    font-weight: 300;
+    font-style: italic;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+

Avenir Next Cyr Ultra Light

+
.your-style {
+    font-family: 'Avenir Next Cyr Ultra';
+    font-weight: 200;
+    font-style: normal;
+}
+
+

+ ะฐะฑะฒะณะดะตั‘ะถะทะธะนะบะปะผะฝะพะฟั€ัั‚ัƒั„ั…ั†ั‡ัˆั‰ัŠั‹ัŒััŽั
+ะะ‘ะ’ะ“ะ”ะ•ะะ–ะ—ะ˜ะ™ะšะ›ะœะะžะŸะ ะกะขะฃะคะฅะฆะงะจะฉะชะซะฌะญะฎะฏ
+abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+

ะกัŠะตัˆัŒ ะถะต ะตั‰ั‘ ัั‚ะธั… ะผัะณะบะธั… ั„ั€ะฐะฝั†ัƒะทัะบะธั… ะฑัƒะปะพะบ, ะดะฐ ะฒั‹ะฟะตะน ั‡ะฐัŽ.

+
+
+
+ + \ No newline at end of file diff --git a/demos/assets/fonts/avenir-next/stylesheet.css b/demos/assets/fonts/avenir-next/stylesheet.css new file mode 100644 index 0000000000000000000000000000000000000000..ea5d92d4d39e2656985aaa685327bf49b3ae3e5a --- /dev/null +++ b/demos/assets/fonts/avenir-next/stylesheet.css @@ -0,0 +1,192 @@ +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-UltraLight.eot'); + src: local('Avenir Next Cyr Ultra Light'), local('AvenirNextCyr-UltraLight'), + url('AvenirNextCyr-UltraLight.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-UltraLight.woff2') format('woff2'), + url('AvenirNextCyr-UltraLight.woff') format('woff'), + url('AvenirNextCyr-UltraLight.ttf') format('truetype'); + font-weight: 100; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-UltraLightIt.eot'); + src: local('Avenir Next Cyr Ultra Light Italic'), local('AvenirNextCyr-UltraLightIt'), + url('AvenirNextCyr-UltraLightIt.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-UltraLightIt.woff2') format('woff2'), + url('AvenirNextCyr-UltraLightIt.woff') format('woff'), + url('AvenirNextCyr-UltraLightIt.ttf') format('truetype'); + font-weight: 100; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Thin.eot'); + src: local('Avenir Next Cyr Thin'), local('AvenirNextCyr-Thin'), + url('AvenirNextCyr-Thin.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Thin.woff2') format('woff2'), + url('AvenirNextCyr-Thin.woff') format('woff'), + url('AvenirNextCyr-Thin.ttf') format('truetype'); + font-weight: 200; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-ThinItalic.eot'); + src: local('Avenir Next Cyr Thin Italic'), local('AvenirNextCyr-ThinItalic'), + url('AvenirNextCyr-ThinItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-ThinItalic.woff2') format('woff2'), + url('AvenirNextCyr-ThinItalic.woff') format('woff'), + url('AvenirNextCyr-ThinItalic.ttf') format('truetype'); + font-weight: 200; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Light.eot'); + src: local('Avenir Next Cyr Light'), local('AvenirNextCyr-Light'), + url('AvenirNextCyr-Light.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Light.woff2') format('woff2'), + url('AvenirNextCyr-Light.woff') format('woff'), + url('AvenirNextCyr-Light.ttf') format('truetype'); + font-weight: 300; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-LightItalic.eot'); + src: local('Avenir Next Cyr Light Italic'), local('AvenirNextCyr-LightItalic'), + url('AvenirNextCyr-LightItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-LightItalic.woff2') format('woff2'), + url('AvenirNextCyr-LightItalic.woff') format('woff'), + url('AvenirNextCyr-LightItalic.ttf') format('truetype'); + font-weight: 300; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Regular.eot'); + src: local('Avenir Next Cyr Regular'), local('AvenirNextCyr-Regular'), + url('AvenirNextCyr-Regular.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Regular.woff2') format('woff2'), + url('AvenirNextCyr-Regular.woff') format('woff'), + url('AvenirNextCyr-Regular.ttf') format('truetype'); + font-weight: 400; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Italic.eot'); + src: local('Avenir Next Cyr Italic'), local('AvenirNextCyr-Italic'), + url('AvenirNextCyr-Italic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Italic.woff2') format('woff2'), + url('AvenirNextCyr-Italic.woff') format('woff'), + url('AvenirNextCyr-Italic.ttf') format('truetype'); + font-weight: 400; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Medium.eot'); + src: local('Avenir Next Cyr Medium'), local('AvenirNextCyr-Medium'), + url('AvenirNextCyr-Medium.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Medium.woff2') format('woff2'), + url('AvenirNextCyr-Medium.woff') format('woff'), + url('AvenirNextCyr-Medium.ttf') format('truetype'); + font-weight: 500; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-MediumItalic.eot'); + src: local('Avenir Next Cyr Medium Italic'), local('AvenirNextCyr-MediumItalic'), + url('AvenirNextCyr-MediumItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-MediumItalic.woff2') format('woff2'), + url('AvenirNextCyr-MediumItalic.woff') format('woff'), + url('AvenirNextCyr-MediumItalic.ttf') format('truetype'); + font-weight: 500; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Demi.eot'); + src: local('Avenir Next Cyr Demi'), local('AvenirNextCyr-Demi'), + url('AvenirNextCyr-Demi.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Demi.woff2') format('woff2'), + url('AvenirNextCyr-Demi.woff') format('woff'), + url('AvenirNextCyr-Demi.ttf') format('truetype'); + font-weight: 600; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-DemiItalic.eot'); + src: local('Avenir Next Cyr Demi Italic'), local('AvenirNextCyr-DemiItalic'), + url('AvenirNextCyr-DemiItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-DemiItalic.woff2') format('woff2'), + url('AvenirNextCyr-DemiItalic.woff') format('woff'), + url('AvenirNextCyr-DemiItalic.ttf') format('truetype'); + font-weight: 600; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Bold.eot'); + src: local('Avenir Next Cyr Bold'), local('AvenirNextCyr-Bold'), + url('AvenirNextCyr-Bold.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Bold.woff2') format('woff2'), + url('AvenirNextCyr-Bold.woff') format('woff'), + url('AvenirNextCyr-Bold.ttf') format('truetype'); + font-weight: 700; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-BoldItalic.eot'); + src: local('Avenir Next Cyr Bold Italic'), local('AvenirNextCyr-BoldItalic'), + url('AvenirNextCyr-BoldItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-BoldItalic.woff2') format('woff2'), + url('AvenirNextCyr-BoldItalic.woff') format('woff'), + url('AvenirNextCyr-BoldItalic.ttf') format('truetype'); + font-weight: 700; + font-style: italic; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-Heavy.eot'); + src: local('Avenir Next Cyr Heavy'), local('AvenirNextCyr-Heavy'), + url('AvenirNextCyr-Heavy.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-Heavy.woff2') format('woff2'), + url('AvenirNextCyr-Heavy.woff') format('woff'), + url('AvenirNextCyr-Heavy.ttf') format('truetype'); + font-weight: 900; + font-style: normal; +} + +@font-face { + font-family: 'Avenir Next Cyr'; + src: url('AvenirNextCyr-HeavyItalic.eot'); + src: local('Avenir Next Cyr Heavy Italic'), local('AvenirNextCyr-HeavyItalic'), + url('AvenirNextCyr-HeavyItalic.eot?#iefix') format('embedded-opentype'), + url('AvenirNextCyr-HeavyItalic.woff2') format('woff2'), + url('AvenirNextCyr-HeavyItalic.woff') format('woff'), + url('AvenirNextCyr-HeavyItalic.ttf') format('truetype'); + font-weight: 900; + font-style: italic; +} + diff --git a/demos/assets/fonts/segoe-print/SegoePrint.eot b/demos/assets/fonts/segoe-print/SegoePrint.eot new file mode 100644 index 0000000000000000000000000000000000000000..93ba239c099f9174cead05adf6cb51094f91d11f --- /dev/null +++ b/demos/assets/fonts/segoe-print/SegoePrint.eot @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41515f66ec1d28b7915c129ac4660b28a609ea9d5ddd1decda8ee5904cb0bc12 +size 151690 diff --git a/demos/assets/fonts/segoe-print/SegoePrint.ttf b/demos/assets/fonts/segoe-print/SegoePrint.ttf new file mode 100644 index 0000000000000000000000000000000000000000..5b5ae571bf0008ebdcd2620f90fdc894031d534e --- /dev/null +++ b/demos/assets/fonts/segoe-print/SegoePrint.ttf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e2f005e531418162fc0759a7d634823f679d7c9bf57271b2ea44226bcde6d56 +size 151508 diff --git a/demos/assets/fonts/segoe-print/SegoePrint.woff b/demos/assets/fonts/segoe-print/SegoePrint.woff new file mode 100644 index 0000000000000000000000000000000000000000..60646cad28ad9d3410e7700a4d2e55a57abb4e5f Binary files /dev/null and b/demos/assets/fonts/segoe-print/SegoePrint.woff differ diff --git a/demos/assets/fonts/segoe-print/SegoePrint.woff2 b/demos/assets/fonts/segoe-print/SegoePrint.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..1cebd5a571e1190c74cc3044081850897e13e6d9 Binary files /dev/null and b/demos/assets/fonts/segoe-print/SegoePrint.woff2 differ diff --git a/demos/assets/fonts/segoe-print/demo.html b/demos/assets/fonts/segoe-print/demo.html new file mode 100644 index 0000000000000000000000000000000000000000..6ba4bf351df0bb506bd1e48aa131be39f5658fea --- /dev/null +++ b/demos/assets/fonts/segoe-print/demo.html @@ -0,0 +1,192 @@ + + + + + + + + + Transfonter demo + + + + +
+
+

Segoe Print

+
.your-style {
+    font-family: 'Segoe Print';
+    font-weight: normal;
+    font-style: normal;
+}
+
+<link rel="preload" href="SegoePrint.woff2" as="font" type="font/woff2" crossorigin>
+
+

+ abcdefghijklmnopqrstuvwxyz
+ABCDEFGHIJKLMNOPQRSTUVWXYZ
+ 0123456789.:,;()*!?'@#<>$%&^+-=~ +

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+

The quick brown fox jumps over the lazy dog.

+
+
+ +
+ + diff --git a/demos/assets/fonts/segoe-print/stylesheet.css b/demos/assets/fonts/segoe-print/stylesheet.css new file mode 100644 index 0000000000000000000000000000000000000000..0ab8af07742195dd1eb5ff50ffb02d817c68baca --- /dev/null +++ b/demos/assets/fonts/segoe-print/stylesheet.css @@ -0,0 +1,12 @@ +@font-face { + font-family: 'Segoe Print'; + src: url('SegoePrint.eot'); + src: url('SegoePrint.eot?#iefix') format('embedded-opentype'), + url('SegoePrint.woff2') format('woff2'), + url('SegoePrint.woff') format('woff'), + url('SegoePrint.ttf') format('truetype'); + font-weight: normal; + font-style: normal; + font-display: swap; +} + diff --git a/demos/assets/logo.png b/demos/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a471115690bef9f0bf7e40d0b4413fd5a4d494dd Binary files /dev/null and b/demos/assets/logo.png differ diff --git a/demos/assets/wheels/diff_gaussian_rasterization-0.0.0-cp312-cp312-linux_x86_64.whl b/demos/assets/wheels/diff_gaussian_rasterization-0.0.0-cp312-cp312-linux_x86_64.whl new file mode 100644 index 0000000000000000000000000000000000000000..5409b9c44f3478398095e527d1f4e3c67eeb8436 --- /dev/null +++ b/demos/assets/wheels/diff_gaussian_rasterization-0.0.0-cp312-cp312-linux_x86_64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22c4a9041472bec33a0f0edc9910e3ff9d4bf2e310171c9c447b34578dd76fd5 +size 3321750 diff --git a/demos/assets/wheels/diffoctreerast-0.0.0-cp312-cp312-linux_x86_64.whl b/demos/assets/wheels/diffoctreerast-0.0.0-cp312-cp312-linux_x86_64.whl new file mode 100644 index 0000000000000000000000000000000000000000..3cbc3ba3fb56ebecf96f36ac9fcb785b217a5334 --- /dev/null +++ b/demos/assets/wheels/diffoctreerast-0.0.0-cp312-cp312-linux_x86_64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d7fe2cca4409bdbbe4c6b5be7047ff2c5330e5b68fd599d0090fc6a034362ea +size 10956508 diff --git a/demos/assets/wheels/nvdiffrast-0.3.5-py3-none-any.whl b/demos/assets/wheels/nvdiffrast-0.3.5-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..3d5e79bc95b41d7dd7c45aa86def56b021499d12 --- /dev/null +++ b/demos/assets/wheels/nvdiffrast-0.3.5-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67bc78cf66a7acec42f123988c5e03b6376158729a78842e4015282514cbe797 +size 140181 diff --git a/demos/assets/wheels/open3d_pycg_cpu-0.19.0-cp312-cp312-manylinux_2_31_x86_64.whl b/demos/assets/wheels/open3d_pycg_cpu-0.19.0-cp312-cp312-manylinux_2_31_x86_64.whl new file mode 100644 index 0000000000000000000000000000000000000000..04381378cf965fb0e5d6c41f0760c73ac74a6eff --- /dev/null +++ b/demos/assets/wheels/open3d_pycg_cpu-0.19.0-cp312-cp312-manylinux_2_31_x86_64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde859ab7b79caccb86bbc126cb808ee500008db5318e2b374353c7668ba835d +size 97219416 diff --git a/demos/assets/wheels/python_pycg-1.0.1-py3-none-any.whl b/demos/assets/wheels/python_pycg-1.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..394aaf5ba0278f639ccbfe85f83ce35b9e28eacb --- /dev/null +++ b/demos/assets/wheels/python_pycg-1.0.1-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a5ace3138b681c8b8cb77b806704eab97ef0c670a3b2b4099a775e023984a00 +size 31388336 diff --git a/demos/assets/wheels/torch_scatter-2.1.2+pt25cu124-cp312-cp312-linux_x86_64.whl b/demos/assets/wheels/torch_scatter-2.1.2+pt25cu124-cp312-cp312-linux_x86_64.whl new file mode 100644 index 0000000000000000000000000000000000000000..81fd4d60afe93454aed2ac565e29291b384d16fa --- /dev/null +++ b/demos/assets/wheels/torch_scatter-2.1.2+pt25cu124-cp312-cp312-linux_x86_64.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47887eb05578ffefee4eac15fb1d16f53e2601f4300f9c459bca4a74b7c6d4d3 +size 10759014 diff --git a/demos/build_wheels.sh b/demos/build_wheels.sh new file mode 100644 index 0000000000000000000000000000000000000000..ea2777e0f8057332c9569c4ef61738cf94c6e238 --- /dev/null +++ b/demos/build_wheels.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Create a directory to store wheels +mkdir -p ./wheels + +# Update system packages +apt-get update -y +apt-get install -y xvfb libx11-6 libgl1 libxrender1 + +# 1. Basic Dependencies +# We use 'pip wheel' to build/download wheels instead of installing +pip wheel --wheel-dir=./wheels \ + torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 \ + --index-url https://download.pytorch.org/whl/cu124 + +pip wheel --wheel-dir=./wheels \ + pyvirtualdisplay \ + pillow imageio imageio-ffmpeg tqdm easydict opencv-python-headless \ + scipy ninja rembg onnxruntime trimesh open3d xatlas pyvista \ + pymeshfix igraph transformers tensorview psutil \ + lightning==2.2 h5py yacs scikit-image loguru boto3 \ + mesh2sdf tetgen==0.6.4 pymeshlab plyfile einops libigl \ + polyscope potpourri3d simple_parsing arrgh vtk numpy==1.26.4 + +# 2. Git Repositories +# pip wheel handles git urls perfectly +pip wheel --wheel-dir=./wheels \ + git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8 + +# 3. Extensions with Custom Build Steps (nvdiffrast, diffoctreerast, mip-splatting) +# These often require cloning first if they have submodules or complex setups + +# nvdiffrast +mkdir -p /tmp/extensions +if [ ! -d "/tmp/extensions/nvdiffrast" ]; then + git clone https://github.com/NVlabs/nvdiffrast.git /tmp/extensions/nvdiffrast +fi +pip wheel --wheel-dir=./wheels /tmp/extensions/nvdiffrast + +# diffoctreerast +if [ ! -d "/tmp/extensions/diffoctreerast" ]; then + git clone --recurse-submodules https://github.com/JeffreyXiang/diffoctreerast.git /tmp/extensions/diffoctreerast +fi +pip wheel --wheel-dir=./wheels /tmp/extensions/diffoctreerast + +# mip-splatting (diff-gaussian-rasterization) +if [ ! -d "/tmp/extensions/mip-splatting" ]; then + git clone https://github.com/autonomousvision/mip-splatting.git /tmp/extensions/mip-splatting +fi +pip wheel --wheel-dir=./wheels /tmp/extensions/mip-splatting/submodules/diff-gaussian-rasterization/ + +# 4. Pre-built Wheels (Kaolin, torch-scatter, spconv) +# These are already wheels, so we just download them to the folder +pip download --dest ./wheels \ + kaolin==0.16.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.0_cu124.html + +pip download --dest ./wheels \ + spconv-cu124 + +pip download --dest ./wheels \ + torch-scatter -f https://data.pyg.org/whl/torch-2.5.0+cu124.html + +# 5. Python-PyCG +pip wheel --wheel-dir=./wheels 'python-pycg[all]' + +echo "All wheels built in ./wheels" diff --git a/demos/custom_utils.py b/demos/custom_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a145767a34e43a1456473e12a0d2ff15a2752ef3 --- /dev/null +++ b/demos/custom_utils.py @@ -0,0 +1,268 @@ +import os +import json +from subprocess import call, DEVNULL +import numpy as np +import shutil +import multiprocessing as mp +from lib.util.render import _install_blender, sphere_hammersley_sequence, BLENDER_PATH + +try: + mp.set_start_method("spawn", force=False) +except RuntimeError: + pass + +def _get_optimal_threads(num_workers): + """Calculate optimal CPU threads per Blender instance.""" + total_cores = os.cpu_count() or 4 + # Reserve 1 core for system/orchestration if possible + available_cores = max(1, total_cores - 1) + # Distribute remaining cores among workers + threads = max(1, available_cores // num_workers) + # Cap at 4 threads per instance since we are GPU bound anyway + # and too many threads just adds contention + return min(threads, 4) + +def _render_views_chunk(file_path, chunk_output_folder, views_chunk, blender_render_engine, cuda_device_id=None, threads=None): + """Render a subset of views into a chunk-specific folder.""" + os.makedirs(chunk_output_folder, exist_ok=True) + + # Prepare environment with GPU selection if provided + env = os.environ.copy() + if cuda_device_id is not None: + env["CUDA_VISIBLE_DEVICES"] = str(cuda_device_id) + + blender_exec = env.get('BLENDER_HOME', BLENDER_PATH) + if not os.path.exists(blender_exec) and blender_exec == BLENDER_PATH: + blender_exec = 'blender' # Fallback if specific path missing + + output_root = os.path.dirname(os.path.dirname(chunk_output_folder)) + blender_cache_dir = os.path.join(output_root, "blender_cache") + os.makedirs(blender_cache_dir, exist_ok=True) + env["XDG_CACHE_HOME"] = blender_cache_dir + + args = [ + blender_exec, '-b', + '-P', os.path.join(os.getcwd(), 'third_party/TRELLIS/dataset_toolkits', 'blender_script', 'render.py'), + '--', + '--views', json.dumps(views_chunk), + '--object', os.path.expanduser(file_path), + '--resolution', '512', + '--output_folder', chunk_output_folder, + '--engine', blender_render_engine, + '--save_mesh', + ] + + if threads: + args.extend(['--threads', str(threads)]) + + if file_path.endswith('.blend'): + args.insert(1, file_path) + + call(args, stdout=DEVNULL, stderr=DEVNULL, env=env) + +def _merge_blender_chunks(output_folder, chunk_infos, file_path, blender_render_engine): + """Merge chunk_* folders into the main output_folder and write transforms.json.""" + frames = [] + mesh_copied = False + + for i, (chunk_path, chunk_views) in enumerate(chunk_infos): + if not os.path.isdir(chunk_path): + continue + + # Copy mesh.ply once (from first chunk that has it) + mesh_src = os.path.join(chunk_path, "mesh.ply") + mesh_dst = os.path.join(output_folder, "mesh.ply") + if not mesh_copied and os.path.exists(mesh_src): + shutil.copy2(mesh_src, mesh_dst) + mesh_copied = True + + chunk_transforms_path = os.path.join(chunk_path, "transforms.json") + + # Simple retry logic if chunk failed + if not os.path.exists(chunk_transforms_path): + print(f"[merge_chunks] Warning: missing transforms.json in {chunk_path}, re-rendering chunk.") + shutil.rmtree(chunk_path, ignore_errors=True) + # Use default 1 thread for retry to be safe + _render_views_chunk(file_path, chunk_path, chunk_views, blender_render_engine, threads=2) + + if not os.path.exists(chunk_transforms_path): + # If still missing, raise error + raise RuntimeError(f"Unable to generate transforms.json for {chunk_path}") + + with open(chunk_transforms_path, "r") as f: + chunk_data = json.load(f) + chunk_frames = chunk_data.get("frames", []) + + if not chunk_frames: + # Empty frames could mean render failure + raise RuntimeError(f"No frames found in {chunk_transforms_path}") + + frame_lookup = { + os.path.basename(frame.get("file_path", "")): frame for frame in chunk_frames + } + + for img_name in os.listdir(chunk_path): + if not img_name.lower().endswith((".png", ".jpg", ".jpeg")): + continue + + src = os.path.join(chunk_path, img_name) + if img_name not in frame_lookup: + print(f"[merge_chunks] Warning: no metadata for {img_name} in {chunk_transforms_path}, skipping image.") + os.remove(src) + continue + + # Rename to avoid collisions if needed, though chunks are distinct + # Use chunk index prefix + dst_name = f"chunk{i:02d}_{img_name}" + dst = os.path.join(output_folder, dst_name) + shutil.move(src, dst) + + frame = frame_lookup[img_name].copy() + frame["file_path"] = dst_name + frames.append(frame) + + shutil.rmtree(chunk_path) + + if not frames: + raise RuntimeError("No frames were merged when building transforms.json") + + transforms_path = os.path.join(output_folder, "transforms.json") + with open(transforms_path, "w") as f: + json.dump({"frames": frames}, f, indent=4) + +def _run_single_render(file_path, output_folder, views, blender_render_engine): + # For single render, we can use more CPU threads since we are the only process + threads = min(os.cpu_count() or 4, 8) + + output_root = os.path.dirname(output_folder) + blender_cache_dir = os.path.join(output_root, "blender_cache") + os.makedirs(blender_cache_dir, exist_ok=True) + env = os.environ.copy() + env["XDG_CACHE_HOME"] = blender_cache_dir + + blender_exec = os.environ.get('BLENDER_HOME', BLENDER_PATH) + if not os.path.exists(blender_exec) and blender_exec == BLENDER_PATH: + blender_exec = 'blender' # Fallback + + args = [ + # 'xvfb-run', + # "-s", "-screen 0 1920x1080x24", + blender_exec, '-b', + '-P', os.path.join(os.getcwd(), 'third_party/TRELLIS/dataset_toolkits', 'blender_script', 'render.py'), + '--', + '--views', json.dumps(views), + '--object', os.path.expanduser(file_path), + '--resolution', '512', + '--output_folder', output_folder, + '--engine', blender_render_engine, + '--save_mesh', + '--threads', str(threads) + ] + if file_path.endswith('.blend'): + args.insert(1, file_path) + + # call(args, stdout=DEVNULL, stderr=DEVNULL) + call(args, env=env) + + +def render_all_views(file_path, output_folder, num_views=150, blender_render_engine="CYCLES", num_workers=None): + _install_blender() + # Build camera {yaw, pitch, radius, fov} + yaws = [] + pitchs = [] + offset = (np.random.rand(), np.random.rand()) + for i in range(num_views): + y, p = sphere_hammersley_sequence(i, num_views, offset) + yaws.append(y) + pitchs.append(p) + radius = [2] * num_views + fov = [40 / 180 * np.pi] * num_views + views = [{'yaw': y, 'pitch': p, 'radius': r, 'fov': f} for y, p, r, f in zip(yaws, pitchs, radius, fov)] + + # Determine GPU availability using torch if available (safe check) + num_gpus = 0 + try: + import torch + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + except ImportError: + pass + + # Smart worker count logic + if num_workers is None: + if blender_render_engine == 'CYCLES': + if num_gpus > 0: + # To maximize VRAM usage and overlap CPU preparation with GPU rendering, + # we can run multiple concurrent Blender instances per GPU. + # For object-level scenes, 2-3 workers per GPU is usually the sweet spot. + # Too many will cause context thrashing; too few leaves VRAM idle. + WORKERS_PER_GPU = 3 + num_workers = num_gpus * WORKERS_PER_GPU + else: + # No GPU found: fallback to CPU. Parallelizing CPU might help if RAM permits. + # Cap at 4 to be safe. + num_workers = min(os.cpu_count() or 4, 4) + else: + # For non-cycles (e.g. Eevee), we can be slightly more aggressive but still bound by GPU + if num_gpus > 0: + num_workers = num_gpus + else: + num_workers = min(os.cpu_count() or 4, 8) + + # Override: Force serial for small batches to avoid startup overhead + # 15 views is small enough that overhead of 2+ processes > gain + if len(views) < 30: + num_workers = 1 + + if num_workers > 1: + print(f"[render_all_views] Running with {num_workers} workers (GPUs detected: {num_gpus}).") + else: + print(f"[render_all_views] Running serially (GPUs detected: {num_gpus}).") + + if num_workers <= 1: + _run_single_render(file_path, output_folder, views, blender_render_engine) + else: + # Multi-process: split views into chunks and render in parallel + num_workers = min(num_workers, num_views) + view_chunks = np.array_split(views, num_workers) + + # Convert numpy arrays back to plain lists (json-serializable) + view_chunks = [list(chunk) for chunk in view_chunks] + chunk_infos = [] + + # Calculate optimal threads per worker + threads_per_worker = _get_optimal_threads(num_workers) + + with mp.Pool(processes=num_workers) as pool: + jobs = [] + for idx, chunk in enumerate(view_chunks): + chunk_output_folder = os.path.join(output_folder, f"chunk_{idx}") + chunk_infos.append((chunk_output_folder, chunk)) + + # Assign GPU ID round-robin if GPUs are available + assigned_gpu = None + if num_gpus > 0: + assigned_gpu = idx % num_gpus + + jobs.append( + pool.apply_async( + _render_views_chunk, + (file_path, chunk_output_folder, chunk, blender_render_engine, assigned_gpu, threads_per_worker), + ) + ) + for j in jobs: + j.get() + + _merge_blender_chunks(output_folder, chunk_infos, file_path, blender_render_engine) + + if os.path.exists(os.path.join(output_folder, 'transforms.json')): + # Return list of rendered image paths + out_renderviews = sorted( + [ + os.path.join(output_folder, f) + for f in os.listdir(output_folder) + if f.lower().endswith((".png", ".jpg", ".jpeg")) + ] + ) + return out_renderviews if out_renderviews else None + return None diff --git a/demos/demo_setup_colab.sh b/demos/demo_setup_colab.sh new file mode 100644 index 0000000000000000000000000000000000000000..d5bcfbbc75b92f352c44f97b704f0c38f83fbd04 --- /dev/null +++ b/demos/demo_setup_colab.sh @@ -0,0 +1,59 @@ +apt-get update -y +apt-get install -y xvfb +pip install pyvirtualdisplay + +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu124 +pip install pillow imageio imageio-ffmpeg tqdm easydict opencv-python-headless scipy ninja rembg onnxruntime trimesh open3d xatlas pyvista pymeshfix igraph transformers tensorview -qq +pip install git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8 -qq +pip install flash-attn + +mkdir -p /tmp/extensions +git clone https://github.com/NVlabs/nvdiffrast.git /tmp/extensions/nvdiffrast +pip install /tmp/extensions/nvdiffrast + +mkdir -p /tmp/extensions +git clone --recurse-submodules https://github.com/JeffreyXiang/diffoctreerast.git /tmp/extensions/diffoctreerast +pip install /tmp/extensions/diffoctreerast + +# pip install kaolin==0.18.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.8.0_cu128.html # CHECK CUDA VERSION BEFORE INSTALLING +pip install kaolin -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.0_cu124.html + +mkdir -p /tmp/extensions +git clone https://github.com/autonomousvision/mip-splatting.git /tmp/extensions/mip-splatting +pip install /tmp/extensions/mip-splatting/submodules/diff-gaussian-rasterization/ + +pip install spconv-cu124 + +pip install -U 'python-pycg[all]' +pip install psutil +pip install lightning==2.2 h5py yacs trimesh scikit-image loguru boto3 +pip install mesh2sdf tetgen pymeshlab plyfile einops libigl polyscope potpourri3d simple_parsing arrgh open3d +# pip install torch-scatter -f https://data.pyg.org/whl/torch-2.8.0+cu128.html +pip install torch-scatter -f https://data.pyg.org/whl/torch-2.5.0+cu124.html +sudo apt install libx11-6 libgl1 libxrender1 +pip install vtk + +pip install tetgen==0.6.4 +pip install numpy==1.26.4 + +mkdir -p ./models +wget https://huggingface.co/mikaelaangel/partfield-ckpt/resolve/main/model_objaverse.ckpt -O ../models/model_objaverse.ckpt + +export BLENDER_LINK='https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +export BLENDER_INSTALLATION_PATH='/tmp' +export BLENDER_HOME="${BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender" + +install_blender() { + if [ ! -f "$BLENDER_HOME" ]; then + echo "Installing Blender..." + sudo apt-get update + sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6 + wget "$BLENDER_LINK" -P "$BLENDER_INSTALLATION_PATH" + tar -xvf "${BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz" -C "$BLENDER_INSTALLATION_PATH" + echo "Blender installed at $BLENDER_HOME" + else + echo "Blender already installed at $BLENDER_HOME" + fi +} + +install_blender \ No newline at end of file diff --git a/demos/pipeline_fn.py b/demos/pipeline_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..eb872c1c53befe279886e5bb50aaae8b27f8fdf2 --- /dev/null +++ b/demos/pipeline_fn.py @@ -0,0 +1,460 @@ +import os +import os.path as osp +import gc +import trimesh +from PIL import Image +import logging as log +from omegaconf import OmegaConf +import random +import numpy as np +import hashlib +from typing import Optional + +import torch +from torchvision import transforms +from pycg import vis, image +from pycg import render as pycg_render + +import sys +sys.path.append('.') + +from lib.util.render import BLENDER_PATH +from third_party.PartField.partfield.model_trainer_pvcnn_only_demo import Model +from lib.opt import appearance, self_similarity +from lib.util import generation, common, pointcloud +import third_party.TRELLIS.trellis.models as models +from demos.custom_utils import render_all_views + +# Set BLENDER_HOME for pycg if not set +if "BLENDER_HOME" not in os.environ: + if osp.exists(BLENDER_PATH): + os.environ["BLENDER_HOME"] = BLENDER_PATH + else: + # Fallback to just 'blender' if path invalid, though this likely fails too if not in PATH + os.environ["BLENDER_HOME"] = "blender" + +log.getLogger().setLevel(log.INFO) +log.basicConfig(level=log.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +partfield_config = 'third_party/PartField/config.yaml' +partfield_cfg = OmegaConf.load(partfield_config) + +def file_sha256(path: str, chunk_size: int = 1 << 20) -> str: + h = hashlib.sha256() + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(chunk_size), b""): + h.update(chunk) + return h.hexdigest() + +def init_partfield(obj_path): + torch.manual_seed(0) + random.seed(0) + np.random.seed(0) + + partfield_model = Model(partfield_cfg, obj_path) + partfield_model = partfield_model.to(device) + + ckpt = torch.load(partfield_cfg.continue_ckpt, map_location=device, weights_only=False) + + state_dict = ckpt.get("state_dict", ckpt) + state_dict = {k.replace("model.", ""): v for k, v in state_dict.items()} + missing, unexpected = partfield_model.load_state_dict(state_dict, strict=False) + + if missing: + print("[load_partfield_model] Missing keys:", missing) + if unexpected: + print("[load_partfield_model] Unexpected keys:", unexpected) + + partfield_model.eval() + return partfield_model + +def partfield_pipeline_predict(obj_path, output_dir): + + log.info("Extracting PartField feature planes...") + + seed = int(partfield_cfg.seed) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + + partfield_model = init_partfield(obj_path) + dataloader = partfield_model.predict_dataloader() + batch = next(iter(dataloader)) + + with torch.no_grad(): + with torch.autocast(device_type="cuda", dtype=torch.float16): + batch = { + k: (v.to(device) if torch.is_tensor(v) else v) + for k, v in batch.items() + } + part_planes, uid = partfield_model.predict_step(batch, batch_idx=0) + + os.makedirs(output_dir, exist_ok=True) + print("UID VALUE: ", uid) + partfield_save_path = f'{output_dir}/part_feat_{uid}_batch_part_plane.npy' + print("SAVING PART FIELD TO: ", partfield_save_path) + np.save(partfield_save_path, part_planes) + + del partfield_model + if torch.cuda.is_available(): + torch.cuda.empty_cache() + gc.collect() + + return partfield_save_path + +class GuideFlow3dPipeline: + def __init__(self): + self.cfg = None + + def from_pretrained(self, config): + self.cfg = config + return self + + def preprocess( + self, + structure_mesh: str, + convert_yup_to_zup: bool, + output_dir: str, + ) -> None: + log.info("Loading structure mesh...") + + if not structure_mesh.endswith('.glb'): + log.error("Meshes must be in .glb format") + return + + struct_hash_path = osp.join(output_dir, "struct_mesh.hash") + current_struct_hash = file_sha256(structure_mesh) + cached_struct_hash = None + if osp.exists(struct_hash_path): + with open(struct_hash_path, "r") as f: + cached_struct_hash = f.read().strip() + + use_struct_cache = (cached_struct_hash == current_struct_hash) + + struct_mesh_path = structure_mesh + struct_mesh_zup_path = osp.join(output_dir, "struct_mesh_zup.glb") + + if use_struct_cache and osp.exists(struct_mesh_zup_path): + log.info("Using cached structure mesh (z-up).") + struct_mesh = trimesh.load(struct_mesh_zup_path, force="mesh") + else: + struct_mesh = trimesh.load(structure_mesh, force='mesh') + struct_mesh.export(struct_mesh_path) + + if convert_yup_to_zup: + struct_mesh = pointcloud.convert_mesh_yup_to_zup(struct_mesh) + struct_mesh.export(struct_mesh_zup_path) + + with open(struct_hash_path, "w") as f: + f.write(current_struct_hash) + + if convert_yup_to_zup: + struct_mesh = pointcloud.convert_mesh_yup_to_zup(struct_mesh) + struct_mesh.export(osp.join(output_dir, 'struct_mesh_zup.glb')) + + log.info(f"Rendering structure mesh for {self.cfg.num_views // 10} views...") + struct_render_dir = osp.join(output_dir, 'struct_renders') + common.ensure_dir(struct_render_dir) + + struct_mesh_ply_path = osp.join(struct_render_dir, "mesh.ply") + + if use_struct_cache and osp.exists(struct_mesh_ply_path): + log.info("Using cached structure renders.") + out_renderviews = sorted( + [ + osp.join(struct_render_dir, f) + for f in os.listdir(struct_render_dir) + if f.lower().endswith((".png", ".jpg", ".jpeg")) + ] + ) + else: + out_renderviews = render_all_views( + struct_mesh_zup_path, + struct_render_dir, + num_views=self.cfg.num_views // 10, + num_workers=None # Let custom_utils decide best worker count + ) + + if not out_renderviews: + log.error("Structure rendering failed! Aborting pipeline.") + return None + + voxel_dir = osp.join(output_dir, 'voxels') + common.ensure_dir(voxel_dir) + log.info("Voxelizing structure mesh...") + struct_voxels_path = osp.join(voxel_dir, "struct_voxels.ply") + + if use_struct_cache and osp.exists(struct_voxels_path): + log.info("Using cached structure voxels.") + else: + pointcloud.voxelize_mesh( + struct_mesh_ply_path, + save_path=struct_voxels_path, + ) + + log.info("Extracting Structure Mesh PartField feature planes...") + partfield_dir = osp.join(output_dir, 'partfield') + common.ensure_dir(partfield_dir) + + existing = [ + f for f in os.listdir(partfield_dir) + if f.startswith("part_feat_struct_mesh_zup") and f.endswith("_batch_part_plane.npy") + ] + if use_struct_cache and existing: + partfield_save_path = osp.join(partfield_dir, existing[0]) + log.info(f"Using cached Structure PartField at {partfield_save_path}") + else: + print("PREDICTING STRUCTURE PART FIELD...") + partfield_save_path = partfield_pipeline_predict( + struct_mesh_zup_path, + partfield_dir, + ) + + if not out_renderviews: + log.info("Structure rendering failed!") + + return { + "struct_mesh": struct_mesh, + "render_out": out_renderviews, + "partfield_structure_predictions_save_path": partfield_save_path, + "voxel_dir": voxel_dir + } + + def run_appearance( + self, + structure_mesh: str, + convert_target_yup_to_zup: bool, + convert_appearance_yup_to_zup: bool, + output_dir: str, + appearance_mesh: str, + appearance_image: str, + ) -> Optional[str]: + _ = self.preprocess( + structure_mesh=structure_mesh, + convert_yup_to_zup=convert_target_yup_to_zup, + output_dir=output_dir, + ) + + app_hash_path = osp.join(output_dir, "app_mesh.hash") + current_app_hash = file_sha256(appearance_mesh) + cached_app_hash = None + if osp.exists(app_hash_path): + with open(app_hash_path, "r") as f: + cached_app_hash = f.read().strip() + use_app_cache = (cached_app_hash == current_app_hash) + + blender_cache_dir = osp.join(output_dir, "blender_cache") + os.makedirs(blender_cache_dir, exist_ok=True) + os.environ["XDG_CACHE_HOME"] = blender_cache_dir + + log.info("Running appearance-guided optimization...") + + # Load appearance mesh + log.info("Loading appearance mesh...") + + if not appearance_mesh.endswith('.glb'): + log.error("Meshes must be in .glb format") + return None + + if not osp.exists(appearance_mesh): + log.error(f"Appearance mesh not found: {appearance_mesh}") + return None + + app_mesh_path = osp.join(output_dir, "app_mesh.glb") + app_mesh_zup_path = osp.join(output_dir, "app_mesh_zup.glb") + + if use_app_cache and osp.exists(app_mesh_zup_path): + log.info("Using cached appearance mesh (z-up).") + app_mesh = trimesh.load(app_mesh_zup_path, force="mesh") + else: + app_mesh = trimesh.load(appearance_mesh, force="mesh") + app_mesh.export(app_mesh_path) + + if convert_appearance_yup_to_zup: + app_mesh = pointcloud.convert_mesh_yup_to_zup(app_mesh) + app_mesh.export(app_mesh_zup_path) + + with open(app_hash_path, "w") as f: + f.write(current_app_hash) + + # Load appearance image + log.info("Loading appearance image...") + if appearance_image: + app_image = Image.open(appearance_image).convert('RGB') + app_image.save(osp.join(output_dir, 'app_image.png')) + else: + mesh = vis.from_file(osp.join(output_dir, 'app_mesh.glb'), load_obj_textures=True) + mesh.paint_uniform_color([0.5, 0.5, 0.5]) + scene = pycg_render.Scene(up_axis='+Y') + scene.add_object(mesh) + scene.quick_camera(w=512, h=512, pitch_angle=30, plane_angle=-45.0, fov=40) + pycg_render.ThemeDiffuseShadow(None, sun_tilt_right=0.0, sun_tilt_back=0.0, sun_angle=60.0).apply_to(scene) + rendering = scene.render_blender(quality=512) + rendering = image.alpha_compositing(rendering, image.solid(rendering.shape[1], rendering.shape[0])) + image.write(osp.join(output_dir, 'app_image.png'), rendering) + + # Render views for DinoV2 feature extraction + log.info(f"Rendering appearance mesh for {self.cfg.num_views} views...") + app_render_dir = osp.join(output_dir, 'app_renders') + common.ensure_dir(app_render_dir) + app_mesh_ply_path = osp.join(app_render_dir, "mesh.ply") + + if use_app_cache and osp.exists(app_mesh_ply_path): + log.info("Using cached appearance renders.") + out_renderviews = sorted( + [ + osp.join(app_render_dir, f) + for f in os.listdir(app_render_dir) + if f.lower().endswith((".png", ".jpg", ".jpeg")) + ] + ) + else: + out_renderviews = render_all_views( + app_mesh_zup_path, + app_render_dir, + num_views=self.cfg.num_views, + num_workers=None # Let custom_utils decide best worker count + ) + + if not out_renderviews: + log.info("Appearance rendering failed!") + return None + + # Voxelise mesh + log.info("Voxelizing appearance mesh...") + app_voxel_dir = osp.join(output_dir, "voxels") + common.ensure_dir(app_voxel_dir) + app_voxels_path = osp.join(app_voxel_dir, "app_voxels.ply") + + if use_app_cache and osp.exists(app_voxels_path): + log.info("Using cached appearance voxels.") + else: + pointcloud.voxelize_mesh( + app_mesh_ply_path, + save_path=app_voxels_path, + ) + + # Extract DinoV2 Features + log.info("Extracting DinoV2 features...") + features_dir = osp.join(output_dir, "features", self.cfg.feature_name) + common.ensure_dir(features_dir) + + if use_app_cache and os.listdir(features_dir): + log.info("Using cached DINOv2 features.") + else: + log.info("Extracting DinoV2 features...") + + dinov2_model = torch.hub.load(self.cfg.dinov2_repo, self.cfg.feature_name) + dinov2_model.eval().cuda() + transform = transforms.Compose([transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) + + generation.extract_feature(output_dir, dinov2_model, transform) + torch.cuda.empty_cache() + + del dinov2_model + gc.collect() # Free up memory + + # Extract SLAT Latent + log.info("Extracting SLAT latent...") + latents_dir = osp.join(output_dir, "latents", self.cfg.latent_name) + common.ensure_dir(latents_dir) + + if use_app_cache and os.listdir(latents_dir): + log.info("Using cached SLAT latent.") + else: + log.info("Extracting SLAT latent...") + encoder = models.from_pretrained(self.cfg.enc_pretrained).eval().cuda() + + generation.get_latent(output_dir, self.cfg.feature_name, self.cfg.latent_name, encoder) + + del encoder + gc.collect() # Free up memory + + # Extract PartField features for appearance mesh + log.info("Extracting Appearance Mesh PartField feature planes...") + app_partfield_dir = osp.join(output_dir, "partfield") + common.ensure_dir(app_partfield_dir) + + existing_app_pf = [ + f for f in os.listdir(app_partfield_dir) + if f.startswith("part_feat_app_mesh_zup") and f.endswith("_batch_part_plane.npy") + ] + if use_app_cache and existing_app_pf: + appearance_partfield_save_path = osp.join( + app_partfield_dir, existing_app_pf[0] + ) + log.info( + f"Using cached Appearance PartField at {appearance_partfield_save_path}" + ) + else: + appearance_partfield_save_path = partfield_pipeline_predict( + app_mesh_zup_path, + app_partfield_dir, + ) + + # Appearance Optimization + appearance.optimize_appearance(self.cfg, output_dir) + + # Return the output mesh path + output_mesh_path = osp.join(output_dir, 'out_app.glb') + output_video_path = osp.join(output_dir, 'out_gaussian_app.mp4') + if not osp.exists(output_mesh_path) or not osp.exists(output_video_path): + log.error(f"Output mesh or video not found at {output_mesh_path} or {output_video_path}") + return None, None + return output_mesh_path, output_video_path + + def run_self_similarity( + self, + structure_mesh: str, + convert_target_yup_to_zup: bool, + output_dir: str, + appearance_text: str, + ) -> Optional[str]: + _ = self.preprocess( + structure_mesh=structure_mesh, + convert_yup_to_zup=convert_target_yup_to_zup, + output_dir=output_dir, + ) + log.info("Running similarity-guided optimization...") + + # Self-Similarity Optimization + self_similarity.optimize_self_similarity(self.cfg, appearance_text, output_dir) + + # Return the output mesh path + output_mesh_path = osp.join(output_dir, 'out_sim.glb') + output_video_path = osp.join(output_dir, 'out_gaussian_sim.mp4') + if not osp.exists(output_mesh_path) or not osp.exists(output_video_path): + log.error(f"Output mesh or video not found at {output_mesh_path} or {output_video_path}") + return None, None + return output_mesh_path, output_video_path + +def main(): + args = { + "structure_mesh": os.path.join(os.getcwd(), "structure_mesh.glb"), + "output_dir": os.path.join(os.getcwd(), "all_outputs", "pipeline_outputs"), + "convert_target_yup_to_zup": True, + "convert_appearance_yup_to_zup": True, + "appearance_mesh": os.path.join(os.getcwd(), "appearance_mesh.glb"), + "appearance_image": os.path.join(os.getcwd(), "appearance_image.jpg"), + "appearance_text": "", + } + + cfg = OmegaConf.load('config/default.yaml') + + common.ensure_dir(args["output_dir"]) + + pipe = GuideFlow3dPipeline.from_pretrained(cfg) + + if args["guidance_mode"] == "appearance": + out = pipe.run_appearance( + **args + ) + else: + out = pipe.run_self_similarity( + **args + ) diff --git a/demos/run_gradio_demo.py b/demos/run_gradio_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..06a731818095c58fee1a739885d5008b414daea1 --- /dev/null +++ b/demos/run_gradio_demo.py @@ -0,0 +1,441 @@ +import os +import sys +import spaces +import base64 +import tempfile +from omegaconf import OmegaConf +from typing import Optional, Union, Tuple + +import gradio as gr + +GUIDEFLOW_YELLOW = "#ccad57" +GUIDEFLOW_BLUE = "#2459c2" +GUIDEFLOW_GREEN = "#8edf9f" + +os.environ["CUMM_DISABLE_JIT"] = "1" +os.environ["SPCONV_DISABLE_JIT"] = "1" +os.environ["TOKENIZERS_PARALLELISM"] = "false" + +# Add project root to Python path +project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +if project_root not in sys.path: + sys.path.insert(0, project_root) + +# --- START XVFB GLOBALLY --- +# Check if we are in a headless environment and DISPLAY is not set +if os.environ.get("DISPLAY") is None: + print("[INFO] Starting Xvfb for headless rendering...") + from pyvirtualdisplay import Display + + # Start Xvfb. visible=0 means headless. + # size=(1920, 1080) matches your previous xvfb-run settings. + display = Display(visible=0, size=(1920, 1080)) + display.start() + + # Ensure DISPLAY env var is set for subprocesses + if os.environ.get("DISPLAY") is None: + # PyVirtualDisplay usually sets this, but fallback if needed + os.environ["DISPLAY"] = f":{display.display}" + + print(f"[INFO] Xvfb started on {os.environ['DISPLAY']}") + +# --- LOGO SETUP (BASE64) --- +def image_to_base64(image_path): + """Encodes an image to a base64 string for direct HTML embedding.""" + if not os.path.exists(image_path): + return "" + with open(image_path, "rb") as img_file: + encoded_string = base64.b64encode(img_file.read()).decode('utf-8') + return f"data:image/png;base64,{encoded_string}" + +logo_rel_path = os.path.join("demos", "assets", "logo.png") +logo_abs_path = os.path.join(project_root, logo_rel_path) +logo_src = image_to_base64(logo_abs_path) + +BLENDER_LINK = 'https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +BLENDER_INSTALLATION_PATH = '/tmp' +BLENDER_PATH = f'{BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender' + +def _install_blender(): + if not os.path.exists(BLENDER_PATH): + os.system('sudo apt-get update') + os.system('sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6') + os.system(f'wget {BLENDER_LINK} -P {BLENDER_INSTALLATION_PATH}') + os.system(f'tar -xvf {BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz -C {BLENDER_INSTALLATION_PATH}') + +_install_blender() + +# Attempt import, handle failure gracefully for the demo shell +try: + from demos.pipeline_fn import GuideFlow3dPipeline +except ImportError: + GuideFlow3dPipeline = None + +pipe = None +cfg = None + +# Initialize Pipeline +try: + cfg_path = os.path.join(project_root, 'config', 'default.yaml') + if os.path.exists(cfg_path): + cfg = OmegaConf.load(cfg_path) + if GuideFlow3dPipeline: + pipe = GuideFlow3dPipeline().from_pretrained(cfg) +except Exception as e: + print(f"Error initializing pipeline: {e}") + pass + +output_dir = os.path.join(os.getcwd(), "all_outputs") +os.makedirs(output_dir, exist_ok=True) + +# --- MAPPING HELPERS --- + +# Dictionary mapping static thumbnail images to actual GLB files +THUMB_TO_GLB = { + # Structure Mesh Examples + "example_data/thumbs/example1_thumb.png": "example_data/example1.glb", + # Reference Appearance Mesh Examples + "example_data/thumbs/B07QC84LP1_thumb.png": "example_data/B07QC84LP1.glb" +} + +# Create a lookup based on basename to be robust against Gradio temp paths +THUMB_BASENAME_TO_GLB = {os.path.basename(k): v for k, v in THUMB_TO_GLB.items()} + +def load_mesh_from_thumb(thumb_path: str) -> Optional[str]: + """Callback to return the GLB path associated with a thumbnail.""" + if not thumb_path: + return None + basename = os.path.basename(thumb_path) + return THUMB_BASENAME_TO_GLB.get(basename, None) + +def _ensure_glb_path(result: Union[str, bytes, os.PathLike]) -> str: + """Normalize various return types from fn() to a .glb file path.""" + if isinstance(result, (str, os.PathLike)): + path = os.fspath(result) + if not os.path.exists(path): + raise gr.Error("Returned mesh path does not exist.") + return path + if isinstance(result, (bytes, bytearray)): + tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".glb") + tmp.write(result) + tmp.flush() + tmp.close() + return tmp.name + +@spaces.GPU(duration=360) +def on_run( + guidance_mode_state: str, + app_struct_mesh: Optional[str], + app_ref_mesh: Optional[str], + app_ref_image: Optional[str], + sim_struct_mesh: Optional[str], + sim_ref_text: Optional[str], + sim_ref_image: Optional[str], + target_up_label: str, + reference_up_label: str, + cfg_strength: float, + num_steps: int, + learning_rate: float, +) -> Tuple[str, Optional[str]]: + + current_mode = guidance_mode_state.lower() + + if current_mode == "appearance": + target_mesh_path = app_struct_mesh + reference_mesh_path = app_ref_mesh + reference_image_path = app_ref_image + reference_text = None + else: + target_mesh_path = sim_struct_mesh + reference_text = sim_ref_text + reference_image_path = sim_ref_image + reference_mesh_path = None + + if not target_mesh_path: + raise gr.Error(f"Target Structure mesh is required for {current_mode} mode.") + + if pipe is None: + raise gr.Error("Pipeline not initialized. Check logs.") + + args = { + "structure_mesh": target_mesh_path, + "output_dir": output_dir, + "convert_target_yup_to_zup": target_up_label == "Z-up", + "convert_appearance_yup_to_zup": reference_up_label == "Z-up", + "appearance_mesh": reference_mesh_path, + "appearance_image": reference_image_path, + "appearance_text": (reference_text or "").strip(), + } + + fn = None + if current_mode == "appearance": + if not reference_mesh_path: + raise gr.Error("Appearance mode requires a reference mesh.") + fn = pipe.run_appearance + args.pop("appearance_text", None) + else: # similarity + if not reference_text: + raise gr.Error("Similarity mode requires a text prompt.") + fn = pipe.run_self_similarity + args.pop("appearance_mesh", None) + args.pop("appearance_image", None) + args.pop("convert_appearance_yup_to_zup", None) + + if cfg: + updated_cfg = cfg # OmegaConf.load(cfg) + updated_cfg.cfg_strength = cfg_strength + updated_cfg.steps = num_steps + updated_cfg.learning_rate = learning_rate + pipe.cfg = updated_cfg + + try: + result_mesh, result_video = fn(**args) + mesh_path = _ensure_glb_path(result_mesh) + video_path = _ensure_glb_path(result_video) + return mesh_path, video_path + except Exception as e: + raise gr.Error(f"Generation failed: {str(e)}") + +# --- UI Styling & Header --- + +css = f""" +body, .gradio-container {{ + background-color: #ffffff !important; + color: #1f2937 !important; +}} +.dark body, .dark .gradio-container {{ + background-color: #ffffff !important; + color: #1f2937 !important; +}} +h1, h2, h3, span, p {{ + font-family: 'Inter', 'Roboto', sans-serif; +}} +.guideflow-header {{ + display: flex; + flex-direction: column; + align-items: center; + margin-bottom: 1rem; +}} +.logo-row {{ + display: flex; + align-items: baseline; + gap: 0.2rem; +}} +.logo-img {{ + height: 4rem; + width: auto; + transform: translateY(0.5rem); +}} +.gradient-title {{ + font-size: 3.5rem; + font-weight: 800; + background: linear-gradient(90deg, {GUIDEFLOW_GREEN}, {GUIDEFLOW_BLUE}, {GUIDEFLOW_YELLOW}); + -webkit-background-clip: text; + background-clip: text; + color: transparent; + line-height: 1.2; +}} +.subtitle {{ + font-size: 1.5rem; + font-weight: 600; + color: {GUIDEFLOW_YELLOW}; + margin-top: 0.5rem; + text-align: center; +}} +.authors {{ + font-size: 1rem; + color: #334155; + margin-top: 0.5rem; +}} +.affiliations {{ + font-size: 0.9rem; + color: #6b7280; + margin-top: 0.2rem; +}} +.venue {{ + font-size: 1.1rem; + font-weight: 700; + color: #111827; + margin-top: 0.5rem; +}} +.links a {{ + color: {GUIDEFLOW_BLUE}; + text-decoration: none; + margin: 0 0.5rem; + font-weight: 500; +}} +.links a:hover {{ + text-decoration: underline; +}} +.demo-credit {{ + font-size: 0.9rem; + color: #64748b; + margin-top: 0.5rem; +}} +.instructions-container {{ + max-width: 800px; + margin: 0 auto 2rem auto; + text-align: left; + padding: 0 1rem; +}} +.input-row {{ align-items: flex-start; margin-bottom: 1rem; }} +""" + +HEADER_HTML = f""" +
+
+ GuideFlow3D Logo + uideFlow3D +
+
Optimization-Guided Rectified Flow For Appearance Transfer
+
+ Sayan Deb Sarkar1    + Sinisa Stekovic2    + Vincent Lepetit2    + Iro Armeni1 +
+
+ 1Stanford University    2ENPC, IP Paris +
+
NeurIPS 2025
+ +
+ Demo made by Suvaditya Mukherjee +
+
+""" + +INSTRUCTIONS_MD = """ +
+

Instructions

+
    +
  1. Upload a Structure Mesh (.glb): This defines the shape of your 3D object.
  2. +
  3. Choose Guidance Mode: Select "Self-Similarity" (Text) or "Appearance" (Mesh/Image) using the tabs.
  4. +
  5. Provide Reference: Enter a text prompt or upload a reference image/mesh.
  6. +
  7. Run: Click "Generate 3D Asset" to create the result.
  8. +
+
+""" + +# Example Data +EX_STRUCT_THUMBS = [["example_data/thumbs/example1_thumb.png"]] +EX_MESH_THUMBS = [["example_data/thumbs/B07QC84LP1_thumb.png"]] + +EX_IMG = ["example_data/B07QC84LP1_orig.png"] +EX_TEXT = ["a wooden chair", "a marble statue", "a golden trophy"] + +with gr.Blocks( + title="GuideFlow3D", + css=css, + theme=gr.themes.Default(primary_hue="sky", secondary_hue="lime").set( + body_background_fill="white", + background_fill_primary="white", + block_background_fill="white", + input_background_fill="#f9fafb" + ) +) as demo: + + gr.HTML(HEADER_HTML) + gr.HTML(INSTRUCTIONS_MD) + + guidance_mode_state = gr.State(value="Similarity") + + with gr.Tabs() as guidance_tabs: + + # --- TAB 1: SELF-SIMILARITY (LEFT) --- + with gr.TabItem("Self-Similarity", id="tab_similarity") as tab_sim: + gr.Markdown("### Similarity Editing Inputs") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + sim_struct_mesh = gr.Model3D(label="Structure Mesh (.glb)", interactive=True, height=300) + with gr.Column(scale=2): + sim_struct_hidden = gr.Image(type="filepath", visible=False) + sim_struct_mesh_examples = gr.Examples(examples=EX_STRUCT_THUMBS, inputs=sim_struct_hidden, label="Structure Examples") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + sim_ref_text = gr.Textbox(label="Reference Text Prompt", placeholder="Describe the appearance...", lines=2) + with gr.Column(scale=2): + gr.Examples(examples=EX_TEXT, inputs=sim_ref_text, label="Prompt Examples") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + sim_ref_image = gr.Image(label="Reference Appearance Image (Optional)", type="filepath", height=250) + with gr.Column(scale=2): + gr.Examples(examples=EX_IMG, inputs=sim_ref_image, label="Image Examples") + + # --- TAB 2: APPEARANCE (RIGHT) --- + with gr.TabItem("Appearance", id="tab_appearance") as tab_app: + gr.Markdown("### Appearance Transfer Inputs") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + app_struct_mesh = gr.Model3D(label="Structure Mesh (.glb)", interactive=True, height=300) + with gr.Column(scale=2): + app_struct_hidden = gr.Image(type="filepath", visible=False) + app_struct_mesh_examples = gr.Examples(examples=EX_STRUCT_THUMBS, inputs=app_struct_hidden, label="Structure Examples") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + app_ref_image = gr.Image(label="Reference Appearance Image", type="filepath", height=250) + with gr.Column(scale=2): + gr.Examples(examples=EX_IMG, inputs=app_ref_image, label="Image Examples") + + with gr.Row(elem_classes="input-row"): + with gr.Column(scale=3): + app_ref_mesh = gr.Model3D(label="Reference Appearance Mesh (.glb)", interactive=True, height=300) + with gr.Column(scale=2): + app_ref_mesh_hidden = gr.Image(type="filepath", visible=False) + app_ref_mesh_examples = gr.Examples(examples=EX_MESH_THUMBS, inputs=app_ref_mesh_hidden, label="Mesh Examples") + + # --- ADVANCED SETTINGS --- + with gr.Accordion("Advanced Settings", open=False): + with gr.Row(): + target_up = gr.Radio(["Y-up", "Z-up"], value="Y-up", label="Target Mesh Up-Axis") + reference_up = gr.Radio(["Y-up", "Z-up"], value="Y-up", label="Ref Mesh Up-Axis") + + with gr.Row(): + cfg_strength = gr.Slider(0.1, 10.0, value=5.0, step=0.1, label="CFG Strength") + num_steps = gr.Slider(50, 1000, value=300, step=50, label="Diffusion Steps") + learning_rate = gr.Number(value=5e-4, label="Learning Rate") + + # --- RUN BUTTON --- + with gr.Row(): + run_btn = gr.Button("Generate 3D Asset", variant="primary", size="lg") + + # --- OUTPUTS --- + gr.Markdown("### Results") + with gr.Row(): + with gr.Column(): + output_model = gr.Model3D(label="Output Mesh", interactive=False, clear_color=[1.0, 1.0, 1.0, 0.0]) + with gr.Column(): + output_video = gr.Video(label="Output Video", autoplay=True, loop=True, interactive=False) + + # --- EVENT BINDING --- + sim_struct_hidden.change(fn=load_mesh_from_thumb, inputs=sim_struct_hidden, outputs=sim_struct_mesh) + app_struct_hidden.change(fn=load_mesh_from_thumb, inputs=app_struct_hidden, outputs=app_struct_mesh) + app_ref_mesh_hidden.change(fn=load_mesh_from_thumb, inputs=app_ref_mesh_hidden, outputs=app_ref_mesh) + + tab_sim.select(lambda: "Similarity", outputs=guidance_mode_state) + tab_app.select(lambda: "Appearance", outputs=guidance_mode_state) + + run_btn.click( + fn=on_run, + inputs=[ + guidance_mode_state, + app_struct_mesh, app_ref_mesh, app_ref_image, + sim_struct_mesh, sim_ref_text, sim_ref_image, + target_up, reference_up, cfg_strength, num_steps, learning_rate + ], + outputs=[output_model, output_video] + ) + + demo.load(None, None, None, js="() => { document.body.classList.remove('dark'); }") + +if __name__ == "__main__": + demo.queue().launch(share=True, allowed_paths=[project_root]) \ No newline at end of file diff --git a/example_data/B07QC84LP1.glb b/example_data/B07QC84LP1.glb new file mode 100755 index 0000000000000000000000000000000000000000..0d58637865336408babb15deefd691e5066a20f3 --- /dev/null +++ b/example_data/B07QC84LP1.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d9494d0538727f6423c7ceb6f1f8e32185e99f0c3c5086a01114ee2ed7977f +size 40350988 diff --git a/example_data/B07QC84LP1_orig.png b/example_data/B07QC84LP1_orig.png new file mode 100644 index 0000000000000000000000000000000000000000..b885c66a90e76f0ec3dc8b01f097acde430482ef --- /dev/null +++ b/example_data/B07QC84LP1_orig.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd392925312f8fd4320249ce368fc290d9c13b1e4ec04de2bbc49626402006b9 +size 2818576 diff --git a/example_data/example1.glb b/example_data/example1.glb new file mode 100644 index 0000000000000000000000000000000000000000..e7e8e8f8a30b320b608d33bc5a99de6d20ddfb93 --- /dev/null +++ b/example_data/example1.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29e345a5a91223c022982ca90a1df729587865a41d0bdfad631f4bc927466ca3 +size 128128 diff --git a/example_data/thumbs/B07QC84LP1_thumb.png b/example_data/thumbs/B07QC84LP1_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..b885c66a90e76f0ec3dc8b01f097acde430482ef --- /dev/null +++ b/example_data/thumbs/B07QC84LP1_thumb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd392925312f8fd4320249ce368fc290d9c13b1e4ec04de2bbc49626402006b9 +size 2818576 diff --git a/example_data/thumbs/example1_thumb.png b/example_data/thumbs/example1_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..9bc32c4d099162526e8e0d2bbda8bcbbfc7d3711 --- /dev/null +++ b/example_data/thumbs/example1_thumb.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:060453e878ff70e5ddc2ffc1b38d382ef871d62325bada61732d7a7554834c60 +size 20491 diff --git a/lib/opt/appearance.py b/lib/opt/appearance.py new file mode 100644 index 0000000000000000000000000000000000000000..2a63f83010e527569866a9ecf4fe6e83e98e4f52 --- /dev/null +++ b/lib/opt/appearance.py @@ -0,0 +1,137 @@ +import os.path as osp +import numpy as np +import torch +import torch.nn.functional as F +import utils3d +from PIL import Image +import logging + +import third_party.TRELLIS.trellis.modules.sparse as sp +from third_party.TRELLIS.trellis.pipelines import TrellisImageTo3DPipeline +from lib.util import partfield, generation + +# Global logger +log = logging.getLogger(__name__) + +def optimize_appearance(cfg, output_dir): + log.info("Starting appearance optimization...") + + generation_pipeline = TrellisImageTo3DPipeline.from_pretrained(cfg.trellis_img_model_name) + generation_pipeline.cuda() + + # load appearance and structure data + path = osp.join(output_dir, 'latents', cfg.latent_name, "appearance.npz") + data = np.load(path) + app_feats = torch.from_numpy(data['feats']).cuda() + app_coords = torch.from_numpy(data['coords']).cuda() + + struct_coords = utils3d.io.read_ply(osp.join(output_dir, 'voxels', 'struct_voxels.ply'))[0] + struct_coords = torch.from_numpy(struct_coords).float().cuda() + struct_coords = ((struct_coords + 0.5) * 64).long() + + app_image = Image.open(osp.join(output_dir, 'app_image.png')).convert('RGB') + + zeros = torch.zeros((struct_coords.size(0), 1), dtype=struct_coords.dtype, device=struct_coords.device) + struct_coords = torch.cat([zeros, struct_coords], dim=1) + + # Load partfield planes + path = osp.join(output_dir, 'partfield', 'part_feat_struct_mesh_zup_batch_part_plane.npy') + struct_part_planes = torch.from_numpy(np.load(path, allow_pickle=True)).cuda() + + path = osp.join(output_dir, 'partfield', 'part_feat_app_mesh_zup_batch_part_plane.npy') + app_part_planes = torch.from_numpy(np.load(path, allow_pickle=True)).cuda() + + app_labels, struct_labels, point_feat1, point_feat2 = partfield.cosegment_part(app_coords, app_part_planes, struct_coords, struct_part_planes, cfg.app_guidance.num_part_clusters) + + # Optimization Starts + app_labels = torch.from_numpy(app_labels.flatten()).cuda() + struct_labels = torch.from_numpy(struct_labels.flatten()).cuda() + + point_feat1 = torch.from_numpy(point_feat1).cuda() + point_feat2 = torch.from_numpy(point_feat2).cuda() + + struct_feats_params = torch.nn.Parameter(torch.randn((struct_coords.shape[0], cfg.flow_model_in_channels)), requires_grad=True) + + param_list = [struct_feats_params] + optimizer = torch.optim.AdamW(param_list, lr=cfg.app_guidance.learning_rate) + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1) + + best_loss = float('inf') + feats = None + + image = generation_pipeline.preprocess_image(app_image) + cond = generation_pipeline.get_cond([image]) + + flow_model = generation_pipeline.models['slat_flow_model'] + + sampler_params={ + "cfg_strength": cfg.app_guidance.cfg_strength, + "cfg_interval": cfg.app_guidance.cfg_interval, + } + + t_seq = np.linspace(1, 0, cfg.app_guidance.steps + 1) + t_seq = cfg.app_guidance.rescale_t * t_seq / (1 + (cfg.app_guidance.rescale_t - 1) * t_seq) + t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(cfg.app_guidance.steps)) + + std = torch.tensor(generation_pipeline.slat_normalization['std'])[None].cuda() + mean = torch.tensor(generation_pipeline.slat_normalization['mean'])[None].cuda() + + log.info(f"Beginning guidance + flow sampling loop for {len(t_pairs)} steps...") + for iteration, (t, t_prev) in enumerate(t_pairs): + optimizer.zero_grad() + + # Diffusion + struct_feats_params_clone = struct_feats_params.clone().cuda() + noise = sp.SparseTensor( + feats = struct_feats_params_clone, + coords = struct_coords.int(), + ).cuda() + + with torch.no_grad(): + out = generation_pipeline.slat_sampler.sample_once(flow_model, noise, t, t_prev, **cond, **sampler_params) + + sample = out.pred_x_prev + struct_feats_params.data = sample.feats + + # Optimization + if iteration < len(t_pairs) - 1: + app_loss, num_labels = torch.tensor(0.0, requires_grad=True).cuda(), 0.0 + for label in torch.unique(app_labels): + app_mask = (app_labels == label) + struct_mask = (struct_labels == label) + + if app_mask.sum() == 0 or struct_mask.sum() == 0: + continue + + # Appearance Loss + cos_sim = torch.matmul(point_feat2[struct_mask], point_feat1[app_mask].T) + cos_dist = (1 - cos_sim) / 2. + nearest = torch.argmin(cos_dist, dim=1) + + matched = app_feats[app_mask][nearest] + curr_loss = F.mse_loss(struct_feats_params[struct_mask], matched) + + app_loss += curr_loss + num_labels += 1 + + app_loss = cfg.app_guidance.loss_weight * (app_loss / num_labels) + + total_loss = app_loss + + total_loss.backward() + optimizer.step() + scheduler.step() + + if (iteration == 0) or (iteration + 1) % cfg.log_every == 0: + message = f"Step: {iteration}, Appearance Loss: {app_loss.item():.4f}, Total Loss: {total_loss.item():.4f}" + log.info(message) + + if total_loss < best_loss: + best_loss = total_loss.item() + feats = struct_feats_params.detach() * std + mean + + # Decode SLAT + log.info("Decoding output SLAT...") + out_meshpath = osp.join(output_dir, 'out_app.glb') + out_gspath = osp.join(output_dir, 'out_gaussian_app.mp4') + generation.decode_slat(generation_pipeline, feats, struct_coords, out_meshpath, out_gspath) \ No newline at end of file diff --git a/lib/opt/self_similarity.py b/lib/opt/self_similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..47387b6fba10d14d676036b800c05669f2d4b7c9 --- /dev/null +++ b/lib/opt/self_similarity.py @@ -0,0 +1,118 @@ +import os.path as osp +import numpy as np +import torch +import utils3d +import logging + +import third_party.TRELLIS.trellis.modules.sparse as sp +from third_party.TRELLIS.trellis.pipelines import TrellisTextTo3DPipeline +from lib.util import generation, partfield + +# Global logger +log = logging.getLogger(__name__) + +def attn_cosine_sim(x, eps=1e-08): + x = x[0] # TEMP: getting rid of redundant dimension, TBF + norm1 = x.norm(dim=2, keepdim=True) + factor = torch.clamp(norm1 @ norm1.permute(0, 2, 1), min=eps) + sim_matrix = (x @ x.permute(0, 2, 1)) / factor + return sim_matrix + +def optimize_self_similarity(cfg, app_text, output_dir): + log.info("Starting self-similarity optimization...") + + generation_pipeline = TrellisTextTo3DPipeline.from_pretrained(cfg.trellis_text_model_name) + generation_pipeline.cuda() + + # Load Structure Data + struct_coords = utils3d.io.read_ply(osp.join(output_dir, 'voxels', 'struct_voxels.ply'))[0] + struct_coords = torch.from_numpy(struct_coords).float().cuda() + struct_coords = ((struct_coords + 0.5) * 64).long() + + zeros = torch.zeros((struct_coords.size(0), 1), dtype=struct_coords.dtype, device=struct_coords.device) + struct_coords = torch.cat([zeros, struct_coords], dim=1) + + # Load partfield planes + path = osp.join(output_dir, "partfield", "part_feat_struct_mesh_zup_batch_part_plane.npy") + struct_part_planes = torch.from_numpy(np.load(path, allow_pickle=True)).cuda() + + struct_labels = partfield.cluster_geoms(struct_coords, struct_part_planes, num_clusters=cfg.sim_guidance.num_part_clusters) + + # Optimization Starts... + struct_labels = torch.from_numpy(struct_labels.flatten()).cuda() + struct_feats_params = torch.nn.Parameter(torch.randn((struct_coords.shape[0], cfg.flow_model_in_channels)), requires_grad=True) + + param_list = [struct_feats_params] + optimizer = torch.optim.AdamW(param_list, lr=cfg.sim_guidance.learning_rate) + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1) + + best_loss = float('inf') + feats = None + + cond = generation_pipeline.get_cond([app_text]) + + flow_model = generation_pipeline.models['slat_flow_model'] + sampler_params={ + "cfg_strength": cfg.sim_guidance.cfg_strength, + "cfg_interval": cfg.sim_guidance.cfg_interval, + } + + t_seq = np.linspace(1, 0, cfg.sim_guidance.steps + 1) + t_seq = cfg.sim_guidance.rescale_t * t_seq / (1 + (cfg.sim_guidance.rescale_t - 1) * t_seq) + t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(cfg.sim_guidance.steps)) + + std = torch.tensor(generation_pipeline.slat_normalization['std'])[None].cuda() + mean = torch.tensor(generation_pipeline.slat_normalization['mean'])[None].cuda() + + log.info(f"Beginning self-similarity guidance + flow sampling loop for {len(t_pairs)} steps...") + for iteration, (t, t_prev) in enumerate(t_pairs): + optimizer.zero_grad() + + # Diffusion + struct_feats_params_clone = struct_feats_params.clone().cuda() + noise = sp.SparseTensor( + feats = struct_feats_params_clone, + coords = struct_coords.int(), + ).cuda() + + with torch.no_grad(): + out = generation_pipeline.slat_sampler.sample_once(flow_model, noise, t, t_prev, **cond, **sampler_params) + + sample = out.pred_x_prev + struct_feats_params.data = sample.feats + + # Optimization - Structure Loss + if iteration < len(t_pairs) - 1: + labels = struct_labels.view(-1,1) + sim = attn_cosine_sim(struct_feats_params[None, None, ...])[0] + + mask = (labels == labels.T).float() + + logits_mask = torch.ones_like(mask) - torch.eye(mask.size(0), device=struct_feats_params.device) + mask = mask * logits_mask + + exp_sim = torch.exp(sim) * logits_mask + numerator = (exp_sim * mask).sum(dim=1) + denominator = exp_sim.sum(dim=1) + + struct_loss = -torch.log(numerator / (denominator + 1e-8)) + struct_loss = struct_loss[mask.sum(dim=1) > 0].mean() + + total_loss = cfg.sim_guidance.loss_weight * struct_loss + total_loss.backward() + optimizer.step() + scheduler.step() + + if (iteration == 0) or (iteration + 1) % cfg.log_every == 0: + message = f"Step: {iteration}, Structure Loss: {struct_loss.item():.4f}, Total Loss: {total_loss.item():.4f}" + log.info(message) + + if total_loss < best_loss: + best_loss = total_loss.item() + feats = struct_feats_params.detach() * std + mean + + # Decode SLAT + log.info("Decoding output SLAT...") + out_meshpath = osp.join(output_dir, 'out_sim.glb') + out_gspath = osp.join(output_dir, 'out_gaussian_sim.mp4') + generation.decode_slat(generation_pipeline, feats, struct_coords, out_meshpath, out_gspath) \ No newline at end of file diff --git a/lib/util/common.py b/lib/util/common.py new file mode 100644 index 0000000000000000000000000000000000000000..cd06a548cbe68005f146406b0c9fef21809e910c --- /dev/null +++ b/lib/util/common.py @@ -0,0 +1,101 @@ +import json +import os +import os.path as osp +import pickle +import re +from pathlib import Path +from typing import Any +import torch +import random +import numpy as np + +def make_dir(dir_path: str) -> None: + """Creates a directory if it does not exist.""" + if not Path(dir_path).exists(): + Path(dir_path).mkdir(parents=True, exist_ok=True) + +def ensure_dir(path: str) -> None: + """ + Ensures that a directory exists; creates it if it does not. + """ + if not osp.exists(path): + os.makedirs(path) + +def assert_dir(path: str) -> None: + """Asserts that a directory exists.""" + assert osp.exists(path) + +def load_pkl_data(filename: str) -> Any: + """Loads data from a pickle file.""" + with open(filename, 'rb') as handle: + data_dict = pickle.load(handle) + return data_dict + +def write_pkl_data(data_dict: Any, filename: str) -> None: + """Writes data to a pickle file.""" + with open(filename, 'wb') as handle: + pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) + +def load_json(filename: str) -> Any: + """Loads data from a JSON file.""" + file = open(filename) + data = json.load(file) + file.close() + return data + +def write_json(data_dict: Any, filename: str) -> None: + """Writes data to a JSON file with indentation.""" + json_obj = json.dumps(data_dict, indent=4) + + with open(filename, "w") as outfile: + outfile.write(json_obj) + +def get_print_format(value: Any) -> str: + """Determines the appropriate format string for a given value.""" + if isinstance(value, int): + return 'd' + if isinstance(value, str): + return 's' + if value == 0: + return '.3f' + if value < 1e-6: + return '.3e' + if value < 1e-3: + return '.6f' + return '.6f' + + +def get_format_strings(kv_pairs: list) -> list: + """Generates format strings for a list of key-value pairs.""" + log_strings = [] + for key, value in kv_pairs: + fmt = get_print_format(value) + format_string = '{}: {:' + fmt + '}' + log_strings.append(format_string.format(key, value)) + return log_strings + +def get_first_index_batch(x: Any) -> Any: + """Retrieves the first index from a batch, handling different data types.""" + if isinstance(x, list): + x = x[0] + elif isinstance(x, torch.Tensor): + x = x.squeeze(0) + elif isinstance(x, dict): + x = {key: get_first_index_batch(value) for key, value in x.items()} + return x + +def split_sentence(sentence: str) -> list: + """Splits a sentence into individual sentences based on periods.""" + sentence = re.split(r'[.]', sentence) + sentence = [s.strip() for s in sentence] + sentence = [s for s in sentence if len(s) > 0] + return sentence + +def set_random_seed(seed: int) -> None: + """Sets the random seed for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False \ No newline at end of file diff --git a/lib/util/generation.py b/lib/util/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..5a26a82804bcf910731a30e835f1ab1a2d621674 --- /dev/null +++ b/lib/util/generation.py @@ -0,0 +1,142 @@ +import os +import json +import torch +import numpy as np +from PIL import Image +import utils3d +import imageio +import torch.nn.functional as F + +import third_party.TRELLIS.trellis.modules.sparse as sp +from third_party.TRELLIS.trellis.utils import render_utils, postprocessing_utils + +def get_data(model_dir, view): + image_path = os.path.join(model_dir, view['file_path']) + image = Image.open(image_path) + image = image.resize((518, 518), Image.Resampling.LANCZOS) + image = np.array(image).astype(np.float32) / 255 + image = image[:, :, :3] * image[:, :, 3:] + image = torch.from_numpy(image).permute(2, 0, 1).float() + + c2w = torch.tensor(view['transform_matrix']) + c2w[:3, 1:3] *= -1 + extrinsics = torch.inverse(c2w) + fov = view['camera_angle_x'] + intrinsics = utils3d.torch.intrinsics_from_fov_xy(torch.tensor(fov), torch.tensor(fov)) + + return { + 'image': image, + 'extrinsics': extrinsics, + 'intrinsics': intrinsics + } + +@torch.no_grad() +def extract_feature(output_dir, dinov2_model, transform, n_patch=518 // 14, batch_size=8, feature_name='dinov2_vitl14_reg'): + dinov2_model.eval().cuda() + + with open(os.path.join(output_dir, 'app_renders', 'transforms.json'), 'r') as f: + metadata = json.load(f) + + frames = metadata['frames'] + data = [] + + for view in frames: + datum = get_data(os.path.join(output_dir, 'app_renders'), view) + datum['image'] = transform(datum['image']) + data.append(datum) + + positions = utils3d.io.read_ply(os.path.join(output_dir, 'voxels', 'app_voxels.ply'))[0] + positions = torch.from_numpy(positions).float().cuda() + indices = ((positions + 0.5) * 64).long() + assert torch.all(indices >= 0) and torch.all(indices < 64), "Some vertices are out of bounds" + + n_views = len(data) + N = positions.shape[0] + pack = { + 'indices': indices.cpu().numpy().astype(np.uint8), + } + + patchtokens_lst = [] + uv_lst = [] + + with torch.no_grad(): + for i in range(0, n_views, batch_size): + batch_data = data[i:i+batch_size] + bs = len(batch_data) + batch_images = torch.stack([d['image'] for d in batch_data]).cuda() + batch_extrinsics = torch.stack([d['extrinsics'] for d in batch_data]).cuda() + batch_intrinsics = torch.stack([d['intrinsics'] for d in batch_data]).cuda() + features = dinov2_model(batch_images, is_training=True) + uv = utils3d.torch.project_cv(positions, batch_extrinsics, batch_intrinsics)[0] * 2 - 1 + patchtokens = features['x_prenorm'][:, dinov2_model.num_register_tokens + 1:].permute(0, 2, 1).reshape(bs, 1024, n_patch, n_patch) + patchtokens_lst.append(patchtokens) + uv_lst.append(uv) + + patchtokens = torch.cat(patchtokens_lst, dim=0) + uv = torch.cat(uv_lst, dim=0) + + pack['patchtokens'] = F.grid_sample( + patchtokens.type(torch.float16), + uv.unsqueeze(1).type(torch.float16), + mode='bilinear', + align_corners=False, + ).squeeze(2).permute(0, 2, 1).cpu().numpy() + + assert not torch.isnan(patchtokens.type(torch.float16)).any(), "NaNs in patchtokens" + assert not np.isnan(pack['patchtokens']).any(), "NaNs in pack patchtokens" + assert not torch.isnan(uv.unsqueeze(1).type(torch.float16)).any(), "NaNs in uv" + + pack['patchtokens'] = np.mean(pack['patchtokens'], axis=0).astype(np.float16) + + save_path = os.path.join(output_dir, 'features', feature_name, 'appearance.npz') + np.savez_compressed(save_path, **pack) + + del patchtokens + del pack + +@torch.no_grad() +def get_latent(output_dir, feature_name, latent_name, encoder): + feats = np.load(os.path.join(output_dir, 'features', feature_name, 'appearance.npz')) + feats = sp.SparseTensor( + feats = torch.from_numpy(feats['patchtokens']).type(torch.float32), + coords = torch.cat([ + torch.zeros(feats['patchtokens'].shape[0], 1).int(), + torch.from_numpy(feats['indices']).int(), + ], dim=1), + ).cuda() + latent = encoder(feats, sample_posterior=False) + assert torch.isfinite(latent.feats).all(), "Non-finite latent" + pack = { + 'feats': latent.feats.cpu().numpy().astype(np.float32), + 'coords': latent.coords[:, :].cpu().numpy().astype(np.uint8), + } + + save_path = os.path.join(output_dir, 'latents', latent_name, 'appearance.npz') + np.savez_compressed(save_path, **pack) + + del latent + del pack + +def decode_slat(generation_pipeline, feats, coords, out_meshpath, out_gspath): + # Decode Output SLAT + slat = sp.SparseTensor( + feats = feats.float(), + coords = coords.int(), + ).cuda() + formats = ['mesh', 'gaussian'] + with torch.no_grad(): + outputs = generation_pipeline.decode_slat(slat, formats) + + _, mesh_textured = postprocessing_utils.to_glb( + outputs['gaussian'][0], + outputs['mesh'][0], + # Optional parameters + simplify=0.95, # Ratio of triangles to remove in the simplification process + texture_size=1024, # Size of the texture used for the GLB + verbose=False, # Print logs + ) + mesh_textured.export(out_meshpath) + + # Render the outputs + video = render_utils.render_video(outputs['gaussian'][0], bg_color=[255, 255, 255])['color'] + imageio.mimsave(out_gspath, video, fps=30) \ No newline at end of file diff --git a/lib/util/partfield.py b/lib/util/partfield.py new file mode 100644 index 0000000000000000000000000000000000000000..6fbda796f969dfbd8aeca1daa51def69ee6d75cb --- /dev/null +++ b/lib/util/partfield.py @@ -0,0 +1,108 @@ +import numpy as np +from sklearn.cluster import KMeans +from sklearn.neighbors import KDTree + +import torch +import torch.nn.functional as F + +def get_voxel_partfeats(voxel_coords, part_planes): + voxel_coords = ((voxel_coords[:, 1:] + 0.5) / 64 - 0.5).cpu().numpy() + bbmin = voxel_coords.min(0) + bbmax = voxel_coords.max(0) + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * 0.9 / (bbmax - bbmin).max() + voxel_coords = (voxel_coords - center) * scale + + tensor_vertices = torch.from_numpy(voxel_coords).unsqueeze(0).reshape(1, -1, 3).cuda().to(torch.float16) + part_feats = sample_triplane_feat(part_planes, tensor_vertices) # N, M, C + part_feats = part_feats.cpu().numpy().reshape(-1, 448) + + return part_feats + +def sample_triplane_feat(feature_triplane, normalized_pos): + ''' + normalized_pos [-1, 1] + ''' + tri_plane = torch.unbind(feature_triplane, dim=1) + + x_feat = F.grid_sample( + tri_plane[0], + torch.cat( + [normalized_pos[:, :, 0:1], normalized_pos[:, :, 1:2]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + y_feat = F.grid_sample( + tri_plane[1], + torch.cat( + [normalized_pos[:, :, 1:2], normalized_pos[:, :, 2:3]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + + z_feat = F.grid_sample( + tri_plane[2], + torch.cat( + [normalized_pos[:, :, 0:1], normalized_pos[:, :, 2:3]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + final_feat = (x_feat + y_feat + z_feat) + final_feat = final_feat.squeeze(dim=2).permute(0, 2, 1) # 32dimension + return final_feat + +def cosegment_part(app_coords, app_part_planes, struct_coords, struct_part_planes, num_clusters=30): + struct_partfield_feats = get_voxel_partfeats(struct_coords, struct_part_planes) + app_partfield_feats = get_voxel_partfeats(app_coords, app_part_planes) + + point_feat1 = app_partfield_feats + point_feat2 = struct_partfield_feats + + point_feat1 = point_feat1 / np.linalg.norm(point_feat1, axis=-1, keepdims=True) + point_feat2 = point_feat2 / np.linalg.norm(point_feat2, axis=-1, keepdims=True) + + clustering1 = KMeans(n_clusters=num_clusters, random_state=0, n_init="auto").fit(point_feat1) + # Get feature means per cluster + feature_means1 = [] + for j in range(num_clusters): + all_cluster_feat = point_feat1[clustering1.labels_==j] + mean_feat = np.mean(all_cluster_feat, axis=0) + feature_means1.append(mean_feat) + + labels1 = clustering1.labels_ + + feature_means1 = np.array(feature_means1) + tree = KDTree(feature_means1) + + init_mode = np.array(feature_means1) + + point_feat2 = point_feat2 / np.linalg.norm(point_feat2, axis=-1, keepdims=True) + clustering2 = KMeans(n_clusters=num_clusters, random_state=0, init=init_mode).fit(point_feat2) + + ### Get feature means per cluster + feature_means2 = [] + for j in range(num_clusters): + all_cluster_feat = point_feat2[clustering2.labels_==j] + mean_feat = np.mean(all_cluster_feat, axis=0) + feature_means2.append(mean_feat) + + feature_means2 = np.array(feature_means2) + _, nn_idx = tree.query(feature_means2, k=1) + relabelled_2 = nn_idx[clustering2.labels_] + + return labels1, relabelled_2, point_feat1, point_feat2 + +def cluster_geoms(struct_coords, struct_part_planes, num_clusters=10): + struct_partfield_feats = get_voxel_partfeats(struct_coords, struct_part_planes) + + point_feat = struct_partfield_feats + point_feat = point_feat / np.linalg.norm(point_feat, axis=-1, keepdims=True) + + + clustering = KMeans(n_clusters=num_clusters, random_state=0, n_init="auto").fit(point_feat) + # Get feature means per cluster + feature_means = [] + for j in range(num_clusters): + all_cluster_feat = point_feat[clustering.labels_==j] + mean_feat = np.mean(all_cluster_feat, axis=0) + feature_means.append(mean_feat) + + labels = clustering.labels_ + return labels \ No newline at end of file diff --git a/lib/util/pointcloud.py b/lib/util/pointcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..ac1cd15b6fba769dcf5100a73c523736082b8dfe --- /dev/null +++ b/lib/util/pointcloud.py @@ -0,0 +1,23 @@ +import numpy as np +import trimesh +import utils3d +import open3d_pycg as o3d + +def convert_mesh_yup_to_zup(mesh): + mesh.vertices = mesh.vertices @ np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + return mesh + +def voxelize_mesh(mesh_file, save_path): + assert mesh_file.endswith('.ply') and save_path.endswith('.ply'), 'Voxelization only supports .ply files' + + mesh = o3d.io.read_triangle_mesh(mesh_file) + + # clamp vertices to the range [-0.5, 0.5] + vertices = np.clip(np.asarray(mesh.vertices), -0.5 + 1e-6, 0.5 - 1e-6) + mesh.vertices = o3d.utility.Vector3dVector(vertices) + voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(mesh, voxel_size=1/64, min_bound=(-0.5, -0.5, -0.5), max_bound=(0.5, 0.5, 0.5)) + vertices = np.array([voxel.grid_index for voxel in voxel_grid.get_voxels()]) + assert np.all(vertices >= 0) and np.all(vertices < 64), "Some vertices are out of bounds" + vertices = (vertices + 0.5) / 64 - 0.5 + + utils3d.io.write_ply(save_path, vertices) \ No newline at end of file diff --git a/lib/util/render.py b/lib/util/render.py new file mode 100644 index 0000000000000000000000000000000000000000..42fd2ef447edac96e3d1e68e875dbc1941c15df4 --- /dev/null +++ b/lib/util/render.py @@ -0,0 +1,80 @@ +import os +import json +from subprocess import call, DEVNULL +import numpy as np + +BLENDER_LINK = 'https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +BLENDER_INSTALLATION_PATH = '/tmp' +BLENDER_PATH = f'{BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender' + +def _install_blender(): + if not os.path.exists(BLENDER_PATH): + os.system('sudo apt-get update') + os.system('sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6') + os.system(f'wget {BLENDER_LINK} -P {BLENDER_INSTALLATION_PATH}') + os.system(f'tar -xf {BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz -C {BLENDER_INSTALLATION_PATH}') + print(f'Blender installed at {BLENDER_PATH}') + else: + print(f'Blender already installed at {BLENDER_PATH}') + +def render_all_views(file_path, output_folder, num_views=150): + _install_blender() + # Build camera {yaw, pitch, radius, fov} + yaws = [] + pitchs = [] + offset = (np.random.rand(), np.random.rand()) + for i in range(num_views): + y, p = sphere_hammersley_sequence(i, num_views, offset) + yaws.append(y) + pitchs.append(p) + radius = [2] * num_views + fov = [40 / 180 * np.pi] * num_views + views = [{'yaw': y, 'pitch': p, 'radius': r, 'fov': f} for y, p, r, f in zip(yaws, pitchs, radius, fov)] + + args = [ + os.environ['BLENDER_HOME'], '-b', '-P', os.path.join(os.getcwd(), 'third_party/TRELLIS/dataset_toolkits', 'blender_script', 'render.py'), + '--', + '--views', json.dumps(views), + '--object', os.path.expanduser(file_path), + '--resolution', '512', + '--output_folder', output_folder, + '--engine', 'CYCLES', + '--save_mesh', + ] + if file_path.endswith('.blend'): + args.insert(1, file_path) + + call(args, stdout=DEVNULL, stderr=DEVNULL) + + if os.path.exists(os.path.join(output_folder, 'transforms.json')): + return True + +# ===============LOW DISCREPANCY SEQUENCES================ + +PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53] + +def radical_inverse(base, n): + val = 0 + inv_base = 1.0 / base + inv_base_n = inv_base + while n > 0: + digit = n % base + val += digit * inv_base_n + n //= base + inv_base_n *= inv_base + return val + +def halton_sequence(dim, n): + return [radical_inverse(PRIMES[dim], n) for dim in range(dim)] + +def hammersley_sequence(dim, n, num_samples): + return [n / num_samples] + halton_sequence(dim - 1, n) + +def sphere_hammersley_sequence(n, num_samples, offset=(0, 0)): + u, v = hammersley_sequence(2, n, num_samples) + u += offset[0] / num_samples + v += offset[1] + u = 2 * u if u < 0.25 else 2 / 3 * u + 1 / 3 + theta = np.arccos(1 - 2 * u) - np.pi / 2 + phi = v * 2 * np.pi + return [phi, theta] \ No newline at end of file diff --git a/packages.txt b/packages.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4462fc074da8af4c866a263b8f92a158633eaac --- /dev/null +++ b/packages.txt @@ -0,0 +1,7 @@ +xvfb +libx11-6 +libgl1 +libxrender1 +libxi6 +libxkbcommon-x11-0 +libsm6 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..76e873207b4f966af160aacc6856f5ddd378ba88 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,55 @@ +--extra-index-url https://download.pytorch.org/whl/cu124 +--find-links https://data.pyg.org/whl/torch-2.5.0+cu124.html +--find-links https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.0_cu124.html + +torch==2.5.0+cu124 +torchvision==0.20.0+cu124 +torchaudio==2.5.0+cu124 +pillow==11.3.0 +imageio==2.37.2 +imageio-ffmpeg==0.6.0 +tqdm==4.67.1 +easydict==1.13 +opencv-python-headless==4.12.0.88 +scipy==1.16.3 +ninja==1.13.0 +rembg==2.0.68 +onnxruntime==1.23.2 +open3d==0.19.0 +xatlas==0.0.11 +pyvirtualdisplay==3.0 +pyvista==0.46.4 +pymeshfix==0.17.1 +igraph==1.0.0 +transformers==4.57.2 +tensorview==0.2.0 +git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8 +https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.5.4/flash_attn-2.8.3+cu124torch2.5-cp312-cp312-linux_x86_64.whl?download=true +https://huggingface.co/spaces/GradientSpaces/GuideFlow3D/resolve/main/demos/assets/wheels/diff_gaussian_rasterization-0.0.0-cp312-cp312-linux_x86_64.whl?download=true +https://huggingface.co/spaces/GradientSpaces/GuideFlow3D/resolve/main/demos/assets/wheels/diffoctreerast-0.0.0-cp312-cp312-linux_x86_64.whl?download=true +https://huggingface.co/spaces/GradientSpaces/GuideFlow3D/resolve/main/demos/assets/wheels/nvdiffrast-0.3.5-py3-none-any.whl?download=true +python-pycg[all] +spconv-cu124==2.3.8 +psutil==5.9.5 +lightning==2.2 +h5py==3.15.1 +yacs==0.1.8 +trimesh==4.10.0 +scikit-image==0.25.2 +loguru==0.7.3 +boto3==1.41.5 +mesh2sdf==1.1.0 +tetgen==0.6.4 +pymeshlab==2025.7 +plyfile==1.1.3 +einops==0.8.1 +libigl==2.6.1 +polyscope==2.5.0 +potpourri3d==1.3 +simple_parsing==0.1.7 +arrgh==1.0.0 +open3d==0.19.0 +torch-scatter==2.1.2+pt25cu124 +kaolin==0.18.0 +vtk==9.5.2 +tetgen==0.6.4 \ No newline at end of file diff --git a/third_party/PartField/config.yaml b/third_party/PartField/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c72eb70f2836038d312e7559175865bdbf22de52 --- /dev/null +++ b/third_party/PartField/config.yaml @@ -0,0 +1,88 @@ +seed: 0 +output_dir: "results" +result_name: "demo" + +triplet_sampling: "random" +load_original_mesh: false + +num_pos: 64 +num_neg_random: 256 +num_neg_hard_pc: 128 +num_neg_hard_emb: 128 + +vertex_feature: false # if true, sample feature on vertices; if false, sample feature on faces +n_point_per_face: 1000 +n_sample_each: 10000 +preprocess_mesh: false + +regress_2d_feat: false + +is_pc: false + +cut_manifold: false +remesh_demo: false +correspondence_demo: false + +save_every_epoch: 10 +training_epochs: 30 +continue_training: false + +continue_ckpt: models/model_objaverse.ckpt +epoch_selected: "epoch=50.ckpt" + +triplane_resolution: 128 +triplane_channels_low: 128 +triplane_channels_high: 512 +lr: 1.0e-3 +train: true +test: false + +inference_save_pred_sdf_to_mesh: true +inference_save_feat_pca: true +name: "test" +test_subset: false +test_corres: false +test_partobjaversetiny: false + +dataset: + type: "Mix" + data_path: "objaverse_data" + train_batch_size: 1 + val_batch_size: 1 + train_num_workers: 8 + val_num_workers: 32 + all_files: [] # only used for correspondence demo + +voxel2triplane: + transformer_dim: 1024 + transformer_layers: 6 + transformer_heads: 8 + triplane_low_res: 32 + triplane_high_res: 256 + triplane_dim: 64 + normalize_vox_feat: false + +loss: + triplet: 1.0 + sdf: 1.0 + feat: 10.0 + l1: 0.0 + +use_pvcnn: false +use_pvcnnonly: true + +pvcnn: + point_encoder_type: 'pvcnn' + use_point_scatter: true + z_triplane_channels: 256 + z_triplane_resolution: 128 + unet_cfg: + depth: 3 + enabled: true + rolled: true + use_3d_aware: true + start_hidden_channels: 32 + use_initial_conv: false + +use_2d_feat: false +inference_metrics_only: false \ No newline at end of file diff --git a/third_party/PartField/partfield/config/__init__.py b/third_party/PartField/partfield/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39582506b85759ab473acedb5d0f15b7d7f26594 --- /dev/null +++ b/third_party/PartField/partfield/config/__init__.py @@ -0,0 +1,26 @@ +import argparse +import os.path as osp +from datetime import datetime +import pytz + +def default_argument_parser(add_help=True, default_config_file=""): + parser = argparse.ArgumentParser(add_help=add_help) + parser.add_argument("--config-file", '-c', default=default_config_file, metavar="FILE", help="path to config file") + parser.add_argument( + "--opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser + +def setup(args, freeze=True): + from .defaults import _C as cfg + cfg = cfg.clone() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + dt = datetime.now(pytz.timezone('America/Los_Angeles')).strftime('%y%m%d-%H%M%S') + cfg.output_dir = osp.join(cfg.output_dir, cfg.name, dt) + if freeze: + cfg.freeze() + return cfg \ No newline at end of file diff --git a/third_party/PartField/partfield/config/defaults.py b/third_party/PartField/partfield/config/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..be505f9655a3606d9aece9bb42e78f646d41889b --- /dev/null +++ b/third_party/PartField/partfield/config/defaults.py @@ -0,0 +1,92 @@ +from yacs.config import CfgNode as CN + +_C = CN() +_C.seed = 0 +_C.output_dir = "results" +_C.result_name = "test_all" + +_C.triplet_sampling = "random" +_C.load_original_mesh = False + +_C.num_pos = 64 +_C.num_neg_random = 256 +_C.num_neg_hard_pc = 128 +_C.num_neg_hard_emb = 128 + +_C.vertex_feature = False # if true, sample feature on vertices; if false, sample feature on faces +_C.n_point_per_face = 2000 +_C.n_sample_each = 10000 +_C.preprocess_mesh = False + +_C.regress_2d_feat = False + +_C.is_pc = False + +_C.cut_manifold = False +_C.remesh_demo = False +_C.correspondence_demo = False + +_C.save_every_epoch = 10 +_C.training_epochs = 30 +_C.continue_training = False + +_C.continue_ckpt = None +_C.epoch_selected = "epoch=50.ckpt" + +_C.triplane_resolution = 128 +_C.triplane_channels_low = 128 +_C.triplane_channels_high = 512 +_C.lr = 1e-3 +_C.train = True +_C.test = False + +_C.inference_save_pred_sdf_to_mesh=True +_C.inference_save_feat_pca=True +_C.name = "test" +_C.test_subset = False +_C.test_corres = False +_C.test_partobjaversetiny = False + +_C.dataset = CN() +_C.dataset.type = "Demo_Dataset" +_C.dataset.data_path = "objaverse_data/" +_C.dataset.train_num_workers = 64 +_C.dataset.val_num_workers = 32 +_C.dataset.train_batch_size = 2 +_C.dataset.val_batch_size = 2 +_C.dataset.all_files = [] # only used for correspondence demo + +_C.voxel2triplane = CN() +_C.voxel2triplane.transformer_dim = 1024 +_C.voxel2triplane.transformer_layers = 6 +_C.voxel2triplane.transformer_heads = 8 +_C.voxel2triplane.triplane_low_res = 32 +_C.voxel2triplane.triplane_high_res = 256 +_C.voxel2triplane.triplane_dim = 64 +_C.voxel2triplane.normalize_vox_feat = False + + +_C.loss = CN() +_C.loss.triplet = 0.0 +_C.loss.sdf = 1.0 +_C.loss.feat = 10.0 +_C.loss.l1 = 0.0 + +_C.use_pvcnn = False +_C.use_pvcnnonly = True + +_C.pvcnn = CN() +_C.pvcnn.point_encoder_type = 'pvcnn' +_C.pvcnn.use_point_scatter = True +_C.pvcnn.z_triplane_channels = 64 +_C.pvcnn.z_triplane_resolution = 256 +_C.pvcnn.unet_cfg = CN() +_C.pvcnn.unet_cfg.depth = 3 +_C.pvcnn.unet_cfg.enabled = True +_C.pvcnn.unet_cfg.rolled = True +_C.pvcnn.unet_cfg.use_3d_aware = True +_C.pvcnn.unet_cfg.start_hidden_channels = 32 +_C.pvcnn.unet_cfg.use_initial_conv = False + +_C.use_2d_feat = False +_C.inference_metrics_only = False \ No newline at end of file diff --git a/third_party/PartField/partfield/dataloader.py b/third_party/PartField/partfield/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..3ae1256dbb42ea9656b6e80d39122d21cea1022f --- /dev/null +++ b/third_party/PartField/partfield/dataloader.py @@ -0,0 +1,78 @@ +import os +import torch +import trimesh +import numpy as np +import gc + +def quad_to_triangle_mesh(F): + """ + Converts a quad-dominant mesh into a pure triangle mesh by splitting quads into two triangles. + + Parameters: + quad_mesh (trimesh.Trimesh): Input mesh with quad faces. + + Returns: + trimesh.Trimesh: A new mesh with only triangle faces. + """ + faces = F + + ### If already a triangle mesh -- skip + if len(faces[0]) == 3: + return F + + new_faces = [] + + for face in faces: + if len(face) == 4: # Quad face + # Split into two triangles + new_faces.append([face[0], face[1], face[2]]) # Triangle 1 + new_faces.append([face[0], face[2], face[3]]) # Triangle 2 + else: + print(f"Warning: Skipping non-triangle/non-quad face {face}") + + new_faces = np.array(new_faces) + + return new_faces + +class Demo_Dataset(torch.utils.data.Dataset): + def __init__(self, obj_path): + super().__init__() + + self.obj_path = obj_path + self.pc_num_pts = 100000 + + + def __len__(self): + return 1 + + def get_model(self): + uid = os.path.basename(self.obj_path).split(".")[-2] + mesh = trimesh.load(self.obj_path, force='mesh', process=False) + vertices = mesh.vertices + faces = mesh.faces + + bbmin = vertices.min(0) + bbmax = vertices.max(0) + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * 0.9 / (bbmax - bbmin).max() + vertices = (vertices - center) * scale + mesh.vertices = vertices + + ### Make sure it is a triangle mesh -- just convert the quad + mesh.faces = quad_to_triangle_mesh(faces) + pc, _ = trimesh.sample.sample_surface(mesh, self.pc_num_pts) + + result = { + 'uid': uid + } + + result['pc'] = torch.tensor(pc, dtype=torch.float32) + result['vertices'] = mesh.vertices + result['faces'] = mesh.faces + + return result + + def __getitem__(self, index): + gc.collect() + + return self.get_model() \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/conv_pointnet.py b/third_party/PartField/partfield/model/PVCNN/conv_pointnet.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5c806f1e725ed9a75aeb752f3e2ae4a5c606a1 --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/conv_pointnet.py @@ -0,0 +1,251 @@ +""" +Taken from gensdf +https://github.com/princeton-computational-imaging/gensdf +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +# from dnnlib.util import printarr +try: + from torch_scatter import scatter_mean, scatter_max +except: + pass +# from .unet import UNet +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# Resnet Blocks +class ResnetBlockFC(nn.Module): + ''' Fully connected ResNet Block class. + Args: + size_in (int): input dimension + size_out (int): output dimension + size_h (int): hidden dimension + ''' + + def __init__(self, size_in, size_out=None, size_h=None): + super().__init__() + # Attributes + if size_out is None: + size_out = size_in + + if size_h is None: + size_h = min(size_in, size_out) + + self.size_in = size_in + self.size_h = size_h + self.size_out = size_out + # Submodules + self.fc_0 = nn.Linear(size_in, size_h) + self.fc_1 = nn.Linear(size_h, size_out) + self.actvn = nn.ReLU() + + if size_in == size_out: + self.shortcut = None + else: + self.shortcut = nn.Linear(size_in, size_out, bias=False) + # Initialization + nn.init.zeros_(self.fc_1.weight) + + def forward(self, x): + net = self.fc_0(self.actvn(x)) + dx = self.fc_1(self.actvn(net)) + + if self.shortcut is not None: + x_s = self.shortcut(x) + else: + x_s = x + + return x_s + dx + + +class ConvPointnet(nn.Module): + ''' PointNet-based encoder network with ResNet blocks for each point. + Number of input points are fixed. + + Args: + c_dim (int): dimension of latent code c + dim (int): input points dimension + hidden_dim (int): hidden dimension of the network + scatter_type (str): feature aggregation when doing local pooling + unet (bool): weather to use U-Net + unet_kwargs (str): U-Net parameters + plane_resolution (int): defined resolution for plane feature + plane_type (str): feature type, 'xz' - 1-plane, ['xz', 'xy', 'yz'] - 3-plane, ['grid'] - 3D grid volume + padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55] + n_blocks (int): number of blocks ResNetBlockFC layers + ''' + + def __init__(self, c_dim=128, dim=3, hidden_dim=128, scatter_type='max', + # unet=False, unet_kwargs=None, + plane_resolution=None, plane_type=['xz', 'xy', 'yz'], padding=0.1, n_blocks=5): + super().__init__() + self.c_dim = c_dim + + self.fc_pos = nn.Linear(dim, 2*hidden_dim) + self.blocks = nn.ModuleList([ + ResnetBlockFC(2*hidden_dim, hidden_dim) for i in range(n_blocks) + ]) + self.fc_c = nn.Linear(hidden_dim, c_dim) + + self.actvn = nn.ReLU() + self.hidden_dim = hidden_dim + + # if unet: + # self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs) + # else: + # self.unet = None + + self.reso_plane = plane_resolution + self.plane_type = plane_type + self.padding = padding + + if scatter_type == 'max': + self.scatter = scatter_max + elif scatter_type == 'mean': + self.scatter = scatter_mean + + + # takes in "p": point cloud and "query": sdf_xyz + # sample plane features for unlabeled_query as well + def forward(self, p):#, query2): + batch_size, T, D = p.size() + + # acquire the index for each point + coord = {} + index = {} + if 'xz' in self.plane_type: + coord['xz'] = self.normalize_coordinate(p.clone(), plane='xz', padding=self.padding) + index['xz'] = self.coordinate2index(coord['xz'], self.reso_plane) + if 'xy' in self.plane_type: + coord['xy'] = self.normalize_coordinate(p.clone(), plane='xy', padding=self.padding) + index['xy'] = self.coordinate2index(coord['xy'], self.reso_plane) + if 'yz' in self.plane_type: + coord['yz'] = self.normalize_coordinate(p.clone(), plane='yz', padding=self.padding) + index['yz'] = self.coordinate2index(coord['yz'], self.reso_plane) + + + net = self.fc_pos(p) + + net = self.blocks[0](net) + for block in self.blocks[1:]: + pooled = self.pool_local(coord, index, net) + net = torch.cat([net, pooled], dim=2) + net = block(net) + + c = self.fc_c(net) + + fea = {} + plane_feat_sum = 0 + #second_sum = 0 + if 'xz' in self.plane_type: + fea['xz'] = self.generate_plane_features(p, c, plane='xz') # shape: batch, latent size, resolution, resolution (e.g. 16, 256, 64, 64) + # plane_feat_sum += self.sample_plane_feature(query, fea['xz'], 'xz') + #second_sum += self.sample_plane_feature(query2, fea['xz'], 'xz') + if 'xy' in self.plane_type: + fea['xy'] = self.generate_plane_features(p, c, plane='xy') + # plane_feat_sum += self.sample_plane_feature(query, fea['xy'], 'xy') + #second_sum += self.sample_plane_feature(query2, fea['xy'], 'xy') + if 'yz' in self.plane_type: + fea['yz'] = self.generate_plane_features(p, c, plane='yz') + # plane_feat_sum += self.sample_plane_feature(query, fea['yz'], 'yz') + #second_sum += self.sample_plane_feature(query2, fea['yz'], 'yz') + return fea + + # return plane_feat_sum.transpose(2,1)#, second_sum.transpose(2,1) + + + def normalize_coordinate(self, p, padding=0.1, plane='xz'): + ''' Normalize coordinate to [0, 1] for unit cube experiments + + Args: + p (tensor): point + padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55] + plane (str): plane feature type, ['xz', 'xy', 'yz'] + ''' + if plane == 'xz': + xy = p[:, :, [0, 2]] + elif plane =='xy': + xy = p[:, :, [0, 1]] + else: + xy = p[:, :, [1, 2]] + + xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5) + xy_new = xy_new + 0.5 # range (0, 1) + + # f there are outliers out of the range + if xy_new.max() >= 1: + xy_new[xy_new >= 1] = 1 - 10e-6 + if xy_new.min() < 0: + xy_new[xy_new < 0] = 0.0 + return xy_new + + + def coordinate2index(self, x, reso): + ''' Normalize coordinate to [0, 1] for unit cube experiments. + Corresponds to our 3D model + + Args: + x (tensor): coordinate + reso (int): defined resolution + coord_type (str): coordinate type + ''' + x = (x * reso).long() + index = x[:, :, 0] + reso * x[:, :, 1] + index = index[:, None, :] + return index + + + # xy is the normalized coordinates of the point cloud of each plane + # I'm pretty sure the keys of xy are the same as those of index, so xy isn't needed here as input + def pool_local(self, xy, index, c): + bs, fea_dim = c.size(0), c.size(2) + keys = xy.keys() + + c_out = 0 + for key in keys: + # scatter plane features from points + fea = self.scatter(c.permute(0, 2, 1), index[key], dim_size=self.reso_plane**2) + if self.scatter == scatter_max: + fea = fea[0] + # gather feature back to points + fea = fea.gather(dim=2, index=index[key].expand(-1, fea_dim, -1)) + c_out += fea + return c_out.permute(0, 2, 1) + + + def generate_plane_features(self, p, c, plane='xz'): + # acquire indices of features in plane + xy = self.normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1) + index = self.coordinate2index(xy, self.reso_plane) + + # scatter plane features from points + fea_plane = c.new_zeros(p.size(0), self.c_dim, self.reso_plane**2) + c = c.permute(0, 2, 1) # B x 512 x T + fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2 + fea_plane = fea_plane.reshape(p.size(0), self.c_dim, self.reso_plane, self.reso_plane) # sparce matrix (B x 512 x reso x reso) + + # printarr(fea_plane, c, p, xy, index) + # import pdb; pdb.set_trace() + + # process the plane features with UNet + # if self.unet is not None: + # fea_plane = self.unet(fea_plane) + + return fea_plane + + + # sample_plane_feature function copied from /src/conv_onet/models/decoder.py + # uses values from plane_feature and pixel locations from vgrid to interpolate feature + def sample_plane_feature(self, query, plane_feature, plane): + xy = self.normalize_coordinate(query.clone(), plane=plane, padding=self.padding) + xy = xy[:, :, None].float() + vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1) + sampled_feat = F.grid_sample(plane_feature, vgrid, padding_mode='border', align_corners=True, mode='bilinear').squeeze(-1) + return sampled_feat + + + \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/dnnlib_util.py b/third_party/PartField/partfield/model/PVCNN/dnnlib_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9514fe685275a66fc83bf78fb0cf3c94952678dd --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/dnnlib_util.py @@ -0,0 +1,1074 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. + +"""Miscellaneous utility classes and functions.""" +from collections import namedtuple +import time +import ctypes +import fnmatch +import importlib +import inspect +import numpy as np +import json +import os +import shutil +import sys +import types +import io +import pickle +import re +# import requests +import html +import hashlib +import glob +import tempfile +import urllib +import urllib.request +import uuid +import boto3 +import threading +from contextlib import ContextDecorator +from contextlib import contextmanager, nullcontext + +from distutils.util import strtobool +from typing import Any, List, Tuple, Union +import importlib +from loguru import logger +# import wandb +import torch +import psutil +import subprocess + +import random +import string +import pdb + +# Util classes +# ------------------------------------------------------------------------------------------ + + +class EasyDict(dict): + """Convenience class that behaves like a dict but allows access with the attribute syntax.""" + + def __getattr__(self, name: str) -> Any: + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name: str, value: Any) -> None: + self[name] = value + + def __delattr__(self, name: str) -> None: + del self[name] + + +class Logger(object): + """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" + + def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): + self.file = None + + if file_name is not None: + self.file = open(file_name, file_mode) + + self.should_flush = should_flush + self.stdout = sys.stdout + self.stderr = sys.stderr + + sys.stdout = self + sys.stderr = self + + def __enter__(self) -> "Logger": + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self.close() + + def write(self, text: Union[str, bytes]) -> None: + """Write text to stdout (and a file) and optionally flush.""" + if isinstance(text, bytes): + text = text.decode() + if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash + return + + if self.file is not None: + self.file.write(text) + + self.stdout.write(text) + + if self.should_flush: + self.flush() + + def flush(self) -> None: + """Flush written text to both stdout and a file, if open.""" + if self.file is not None: + self.file.flush() + + self.stdout.flush() + + def close(self) -> None: + """Flush, close possible files, and remove stdout/stderr mirroring.""" + self.flush() + + # if using multiple loggers, prevent closing in wrong order + if sys.stdout is self: + sys.stdout = self.stdout + if sys.stderr is self: + sys.stderr = self.stderr + + if self.file is not None: + self.file.close() + self.file = None + + +# Cache directories +# ------------------------------------------------------------------------------------------ + +_dnnlib_cache_dir = None + + +def set_cache_dir(path: str) -> None: + global _dnnlib_cache_dir + _dnnlib_cache_dir = path + + +def make_cache_dir_path(*paths: str) -> str: + if _dnnlib_cache_dir is not None: + return os.path.join(_dnnlib_cache_dir, *paths) + if 'DNNLIB_CACHE_DIR' in os.environ: + return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) + if 'HOME' in os.environ: + return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) + if 'USERPROFILE' in os.environ: + return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) + return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) + + +# Small util functions +# ------------------------------------------------------------------------------------------ + + +def format_time(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) + else: + return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) + + +def format_time_brief(seconds: Union[int, float]) -> str: + """Convert the seconds to human readable string with days, hours, minutes and seconds.""" + s = int(np.rint(seconds)) + + if s < 60: + return "{0}s".format(s) + elif s < 60 * 60: + return "{0}m {1:02}s".format(s // 60, s % 60) + elif s < 24 * 60 * 60: + return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60) + else: + return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) + + +def ask_yes_no(question: str) -> bool: + """Ask the user the question until the user inputs a valid answer.""" + while True: + try: + print("{0} [y/n]".format(question)) + return strtobool(input().lower()) + except ValueError: + pass + + +def tuple_product(t: Tuple) -> Any: + """Calculate the product of the tuple elements.""" + result = 1 + + for v in t: + result *= v + + return result + + +_str_to_ctype = { + "uint8": ctypes.c_ubyte, + "uint16": ctypes.c_uint16, + "uint32": ctypes.c_uint32, + "uint64": ctypes.c_uint64, + "int8": ctypes.c_byte, + "int16": ctypes.c_int16, + "int32": ctypes.c_int32, + "int64": ctypes.c_int64, + "float32": ctypes.c_float, + "float64": ctypes.c_double +} + + +def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: + """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" + type_str = None + + if isinstance(type_obj, str): + type_str = type_obj + elif hasattr(type_obj, "__name__"): + type_str = type_obj.__name__ + elif hasattr(type_obj, "name"): + type_str = type_obj.name + else: + raise RuntimeError("Cannot infer type name from input") + + assert type_str in _str_to_ctype.keys() + + my_dtype = np.dtype(type_str) + my_ctype = _str_to_ctype[type_str] + + assert my_dtype.itemsize == ctypes.sizeof(my_ctype) + + return my_dtype, my_ctype + + +def is_pickleable(obj: Any) -> bool: + try: + with io.BytesIO() as stream: + pickle.dump(obj, stream) + return True + except: + return False + + +# Functionality to import modules/objects by name, and call functions by name +# ------------------------------------------------------------------------------------------ + +def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: + """Searches for the underlying module behind the name to some python object. + Returns the module and the object name (original name with module part removed).""" + + # allow convenience shorthands, substitute them by full names + obj_name = re.sub("^np.", "numpy.", obj_name) + obj_name = re.sub("^tf.", "tensorflow.", obj_name) + + # list alternatives for (module_name, local_obj_name) + parts = obj_name.split(".") + name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] + + # try each alternative in turn + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + return module, local_obj_name + except: + pass + + # maybe some of the modules themselves contain errors? + for module_name, _local_obj_name in name_pairs: + try: + importlib.import_module(module_name) # may raise ImportError + except ImportError: + if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): + raise + + # maybe the requested attribute is missing? + for module_name, local_obj_name in name_pairs: + try: + module = importlib.import_module(module_name) # may raise ImportError + get_obj_from_module(module, local_obj_name) # may raise AttributeError + except ImportError: + pass + + # we are out of luck, but we have no idea why + raise ImportError(obj_name) + + +def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: + """Traverses the object name and returns the last (rightmost) python object.""" + if obj_name == '': + return module + obj = module + for part in obj_name.split("."): + obj = getattr(obj, part) + return obj + + +def get_obj_by_name(name: str) -> Any: + """Finds the python object with the given name.""" + module, obj_name = get_module_from_obj_name(name) + return get_obj_from_module(module, obj_name) + + +def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: + """Finds the python object with the given name and calls it as a function.""" + assert func_name is not None + func_obj = get_obj_by_name(func_name) + assert callable(func_obj) + return func_obj(*args, **kwargs) + + +def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: + """Finds the python class with the given name and constructs it with the given arguments.""" + return call_func_by_name(*args, func_name=class_name, **kwargs) + + +def get_module_dir_by_obj_name(obj_name: str) -> str: + """Get the directory path of the module containing the given object name.""" + module, _ = get_module_from_obj_name(obj_name) + return os.path.dirname(inspect.getfile(module)) + + +def is_top_level_function(obj: Any) -> bool: + """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" + return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ + + +def get_top_level_function_name(obj: Any) -> str: + """Return the fully-qualified name of a top-level function.""" + assert is_top_level_function(obj) + module = obj.__module__ + if module == '__main__': + module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0] + return module + "." + obj.__name__ + + +# File system helpers +# ------------------------------------------------------------------------------------------ + +def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: + """List all files recursively in a given directory while ignoring given file and directory names. + Returns list of tuples containing both absolute and relative paths.""" + assert os.path.isdir(dir_path) + base_name = os.path.basename(os.path.normpath(dir_path)) + + if ignores is None: + ignores = [] + + result = [] + + for root, dirs, files in os.walk(dir_path, topdown=True): + for ignore_ in ignores: + dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] + + # dirs need to be edited in-place + for d in dirs_to_remove: + dirs.remove(d) + + files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] + + absolute_paths = [os.path.join(root, f) for f in files] + relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] + + if add_base_to_relative: + relative_paths = [os.path.join(base_name, p) for p in relative_paths] + + assert len(absolute_paths) == len(relative_paths) + result += zip(absolute_paths, relative_paths) + + return result + + +def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: + """Takes in a list of tuples of (src, dst) paths and copies files. + Will create all necessary directories.""" + for file in files: + target_dir_name = os.path.dirname(file[1]) + + # will create all intermediate-level directories + if not os.path.exists(target_dir_name): + os.makedirs(target_dir_name) + + shutil.copyfile(file[0], file[1]) + + +# URL helpers +# ------------------------------------------------------------------------------------------ + +def is_url(obj: Any, allow_file_urls: bool = False) -> bool: + """Determine whether the given object is a valid URL string.""" + if not isinstance(obj, str) or not "://" in obj: + return False + if allow_file_urls and obj.startswith('file://'): + return True + try: + res = requests.compat.urlparse(obj) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) + if not res.scheme or not res.netloc or not "." in res.netloc: + return False + except: + return False + return True + + +def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: + """Download the given URL and return a binary-mode file object to access the data.""" + assert num_attempts >= 1 + assert not (return_filename and (not cache)) + + # Doesn't look like an URL scheme so interpret it as a local filename. + if not re.match('^[a-z]+://', url): + return url if return_filename else open(url, "rb") + + # Handle file URLs. This code handles unusual file:// patterns that + # arise on Windows: + # + # file:///c:/foo.txt + # + # which would translate to a local '/c:/foo.txt' filename that's + # invalid. Drop the forward slash for such pathnames. + # + # If you touch this code path, you should test it on both Linux and + # Windows. + # + # Some internet resources suggest using urllib.request.url2pathname() but + # but that converts forward slashes to backslashes and this causes + # its own set of problems. + if url.startswith('file://'): + filename = urllib.parse.urlparse(url).path + if re.match(r'^/[a-zA-Z]:', filename): + filename = filename[1:] + return filename if return_filename else open(filename, "rb") + + assert is_url(url) + + # Lookup from cache. + if cache_dir is None: + cache_dir = make_cache_dir_path('downloads') + + url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() + if cache: + cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) + if len(cache_files) == 1: + filename = cache_files[0] + return filename if return_filename else open(filename, "rb") + + # Download. + url_name = None + url_data = None + with requests.Session() as session: + if verbose: + print("Downloading %s ..." % url, end="", flush=True) + for attempts_left in reversed(range(num_attempts)): + try: + with session.get(url) as res: + res.raise_for_status() + if len(res.content) == 0: + raise IOError("No data received") + + if len(res.content) < 8192: + content_str = res.content.decode("utf-8") + if "download_warning" in res.headers.get("Set-Cookie", ""): + links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] + if len(links) == 1: + url = requests.compat.urljoin(url, links[0]) + raise IOError("Google Drive virus checker nag") + if "Google Drive - Quota exceeded" in content_str: + raise IOError("Google Drive download quota exceeded -- please try again later") + + match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) + url_name = match[1] if match else url + url_data = res.content + if verbose: + print(" done") + break + except KeyboardInterrupt: + raise + except: + if not attempts_left: + if verbose: + print(" failed") + raise + if verbose: + print(".", end="", flush=True) + + # Save to cache. + if cache: + safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) + cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) + temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) + os.makedirs(cache_dir, exist_ok=True) + with open(temp_file, "wb") as f: + f.write(url_data) + os.replace(temp_file, cache_file) # atomic + if return_filename: + return cache_file + + # Return data as file object. + assert not return_filename + return io.BytesIO(url_data) + +# ------------------------------------------------------------------------------------------ +# util function modified from https://github.com/nv-tlabs/LION/blob/0467d2199076e95a7e88bafd99dcd7d48a04b4a7/utils/model_helper.py +def import_class(model_str): + from torch_utils.dist_utils import is_rank0 + if is_rank0(): + logger.info('import: {}', model_str) + p, m = model_str.rsplit('.', 1) + mod = importlib.import_module(p) + Model = getattr(mod, m) + return Model + +class ScopedTorchProfiler(ContextDecorator): + """ + Marks ranges for both nvtx profiling (with nsys) and torch autograd profiler + """ + __global_counts = {} + enabled=False + + def __init__(self, unique_name: str): + """ + Names must be unique! + """ + ScopedTorchProfiler.__global_counts[unique_name] = 0 + self._name = unique_name + self._autograd_scope = torch.profiler.record_function(unique_name) + + def __enter__(self): + if ScopedTorchProfiler.enabled: + torch.cuda.nvtx.range_push(self._name) + self._autograd_scope.__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + self._autograd_scope.__exit__(exc_type, exc_value, traceback) + if ScopedTorchProfiler.enabled: + torch.cuda.nvtx.range_pop() + +class TimingsMonitor(): + CUDATimer = namedtuple('CUDATimer', ['start', 'end']) + def __init__(self, device, enabled=True, timing_names:List[str]=[], cuda_timing_names:List[str]=[]): + """ + Usage: + tmonitor = TimingsMonitor(device) + for i in range(n_iter): + # Record arbitrary scopes + with tmonitor.timing_scope('regular_scope_name'): + ... + with tmonitor.cuda_timing_scope('nested_scope_name'): + ... + with tmonitor.cuda_timing_scope('cuda_scope_name'): + ... + tmonitor.record_timing('duration_name', end_time - start_time) + + # Gather timings + tmonitor.record_all_cuda_timings() + tmonitor.update_all_averages() + averages = tmonitor.get_average_timings() + all_timings = tmonitor.get_timings() + + Two types of timers, standard report timing and cuda timings. + Cuda timing supports scoped context manager cuda_event_scope. + Args: + device: device to time on (needed for cuda timers) + # enabled: HACK to only report timings from rank 0, set enabled=(global_rank==0) + timing_names: timings to report optional (will auto add new names) + cuda_timing_names: cuda periods to time optional (will auto add new names) + """ + self.enabled=enabled + self.device = device + + # Normal timing + # self.all_timings_dict = {k:None for k in timing_names + cuda_timing_names} + self.all_timings_dict = {} + self.avg_meter_dict = {} + + # Cuda event timers to measure time spent on pushing data to gpu and on training step + self.cuda_event_timers = {} + + for k in timing_names: + self.add_new_timing(k) + + for k in cuda_timing_names: + self.add_new_cuda_timing(k) + + # Running averages + # self.avg_meter_dict = {k:AverageMeter() for k in self.all_timings_dict} + + def add_new_timing(self, name): + self.avg_meter_dict[name] = AverageMeter() + self.all_timings_dict[name] = None + + def add_new_cuda_timing(self, name): + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + self.cuda_event_timers[name] = self.CUDATimer(start=start_event, end=end_event) + self.add_new_timing(name) + + def clear_timings(self): + self.all_timings_dict = {k:None for k in self.all_timings_dict} + + def get_timings(self): + return self.all_timings_dict + + def get_average_timings(self): + return {k:v.avg for k,v in self.avg_meter_dict.items()} + + def update_all_averages(self): + """ + Once per iter, when timings have been finished recording, one should + call update_average_iter to keep running average of timings. + """ + for k,v in self.all_timings_dict.items(): + if v is None: + print("none_timing", k) + continue + self.avg_meter_dict[k].update(v) + + def record_timing(self, name, value): + if name not in self.all_timings_dict: self.add_new_timing(name) + # assert name in self.all_timings_dict + self.all_timings_dict[name] = value + + def _record_cuda_event_start(self, name): + if name in self.cuda_event_timers: + self.cuda_event_timers[name].start.record( + torch.cuda.current_stream(self.device)) + + def _record_cuda_event_end(self, name): + if name in self.cuda_event_timers: + self.cuda_event_timers[name].end.record( + torch.cuda.current_stream(self.device)) + + @contextmanager + def cuda_timing_scope(self, name, profile=True): + if name not in self.all_timings_dict: self.add_new_cuda_timing(name) + with ScopedTorchProfiler(name) if profile else nullcontext(): + self._record_cuda_event_start(name) + try: + yield + finally: + self._record_cuda_event_end(name) + + @contextmanager + def timing_scope(self, name, profile=True): + if name not in self.all_timings_dict: self.add_new_timing(name) + with ScopedTorchProfiler(name) if profile else nullcontext(): + start_time = time.time() + try: + yield + finally: + self.record_timing(name, time.time()-start_time) + + def record_all_cuda_timings(self): + """ After all the cuda events call this to synchronize and record down the cuda timings. """ + for k, events in self.cuda_event_timers.items(): + with torch.no_grad(): + events.end.synchronize() + # Convert to seconds + time_elapsed = events.start.elapsed_time(events.end)/1000. + self.all_timings_dict[k] = time_elapsed + +def init_s3(config_file): + config = json.load(open(config_file, 'r')) + s3_client = boto3.client("s3", **config) + return s3_client + +def download_from_s3(file_path, target_path, cfg): + tic = time.time() + s3_client = init_s3(cfg.checkpoint.write_s3_config) # use to test the s3_client can be init + bucket_name = file_path.split('/')[2] + file_key = file_path.split(bucket_name+'/')[-1] + print(bucket_name, file_key) + s3_client.download_file(bucket_name, file_key, target_path) + logger.info(f'finish download from ! s3://{bucket_name}/{file_key} to {target_path} %.1f sec'%( + time.time() - tic)) + +def upload_to_s3(buffer, bucket_name, key, config_dict): + logger.info(f'start upload_to_s3! bucket_name={bucket_name}, key={key}') + tic = time.time() + s3 = boto3.client('s3', **config_dict) + s3.put_object(Bucket=bucket_name, Key=key, Body=buffer.getvalue()) + logger.info(f'finish upload_to_s3! s3://{bucket_name}/{key} %.1f sec'%(time.time() - tic)) + +def write_ckpt_to_s3(cfg, all_model_dict, ckpt_name): + buffer = io.BytesIO() + tic = time.time() + torch.save(all_model_dict, buffer) # take ~0.25 sec + # logger.info('write ckpt to buffer: %.2f sec'%(time.time() - tic)) + group, name = cfg.outdir.rstrip("/").split("/")[-2:] + key = f"checkpoints/{group}/{name}/ckpt/{ckpt_name}" + bucket_name = cfg.checkpoint.write_s3_bucket + + s3_client = init_s3(cfg.checkpoint.write_s3_config) # use to test the s3_client can be init + + config_dict = json.load(open(cfg.checkpoint.write_s3_config, 'r')) + upload_thread = threading.Thread(target=upload_to_s3, args=(buffer, bucket_name, key, config_dict)) + upload_thread.start() + path = f"s3://{bucket_name}/{key}" + return path + +def upload_file_to_s3(cfg, file_path, key_name=None): + # file_path is the local file path, can be a yaml file + # this function is used to upload the ckecpoint only + tic = time.time() + group, name = cfg.outdir.rstrip("/").split("/")[-2:] + if key_name is None: + key = os.path.basename(file_path) + key = f"checkpoints/{group}/{name}/{key}" + bucket_name = cfg.checkpoint.write_s3_bucket + s3_client = init_s3(cfg.checkpoint.write_s3_config) + # Upload the file + with open(file_path, 'rb') as f: + s3_client.upload_fileobj(f, bucket_name, key) + full_s3_path = f"s3://{bucket_name}/{key}" + logger.info(f'upload_to_s3: {file_path} {full_s3_path} | use time: {time.time()-tic}') + + return full_s3_path + + +def load_from_s3(file_path, cfg, load_fn): + """ + ckpt_path example: + s3://xzeng/checkpoints/2023_0413/vae_kl_5e-1/ckpt/snapshot_epo000163_iter164000.pt + """ + s3_client = init_s3(cfg.checkpoint.write_s3_config) # use to test the s3_client can be init + bucket_name = file_path.split("s3://")[-1].split('/')[0] + key = file_path.split(f'{bucket_name}/')[-1] + # logger.info(f"-> try to load s3://{bucket_name}/{key} ") + tic = time.time() + for attemp in range(10): + try: + # Download the state dict from S3 into memory (as a binary stream) + with io.BytesIO() as buffer: + s3_client.download_fileobj(bucket_name, key, buffer) + buffer.seek(0) + + # Load the state dict into a PyTorch model + # out = torch.load(buffer, map_location=torch.device("cpu")) + out = load_fn(buffer) + break + except: + logger.info(f"fail to load s3://{bucket_name}/{key} attemp: {attemp}") + from torch_utils.dist_utils import is_rank0 + if is_rank0(): + logger.info(f'loaded {file_path} | use time: {time.time()-tic:.1f} sec') + return out + +def load_torch_dict_from_s3(ckpt_path, cfg): + """ + ckpt_path example: + s3://xzeng/checkpoints/2023_0413/vae_kl_5e-1/ckpt/snapshot_epo000163_iter164000.pt + """ + s3_client = init_s3(cfg.checkpoint.write_s3_config) # use to test the s3_client can be init + bucket_name = ckpt_path.split("s3://")[-1].split('/')[0] + key = ckpt_path.split(f'{bucket_name}/')[-1] + for attemp in range(10): + try: + # Download the state dict from S3 into memory (as a binary stream) + with io.BytesIO() as buffer: + s3_client.download_fileobj(bucket_name, key, buffer) + buffer.seek(0) + + # Load the state dict into a PyTorch model + out = torch.load(buffer, map_location=torch.device("cpu")) + break + except: + logger.info(f"fail to load s3://{bucket_name}/{key} attemp: {attemp}") + return out + +def count_parameters_in_M(model): + return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6 + +def printarr(*arrs, float_width=6, **kwargs): + """ + Print a pretty table giving name, shape, dtype, type, and content information for input tensors or scalars. + + Call like: printarr(my_arr, some_other_arr, maybe_a_scalar). Accepts a variable number of arguments. + + Inputs can be: + - Numpy tensor arrays + - Pytorch tensor arrays + - Jax tensor arrays + - Python ints / floats + - None + + It may also work with other array-like types, but they have not been tested. + + Use the `float_width` option specify the precision to which floating point types are printed. + + Author: Nicholas Sharp (nmwsharp.com) + Canonical source: https://gist.github.com/nmwsharp/54d04af87872a4988809f128e1a1d233 + License: This snippet may be used under an MIT license, and it is also released into the public domain. + Please retain this docstring as a reference. + """ + + frame = inspect.currentframe().f_back + default_name = "[temporary]" + + ## helpers to gather data about each array + def name_from_outer_scope(a): + if a is None: + return '[None]' + name = default_name + for k, v in frame.f_locals.items(): + if v is a: + name = k + break + return name + + def type_strip(type_str): + return type_str.lstrip('').replace('torch.', '').strip("'") + + def dtype_str(a): + if a is None: + return 'None' + if isinstance(a, int): + return 'int' + if isinstance(a, float): + return 'float' + if isinstance(a, list) and len(a)>0: + return type_strip(str(type(a[0]))) + if hasattr(a, 'dtype'): + return type_strip(str(a.dtype)) + else: + return '' + def shape_str(a): + if a is None: + return 'N/A' + if isinstance(a, int): + return 'scalar' + if isinstance(a, float): + return 'scalar' + if isinstance(a, list): + return f"[{shape_str(a[0]) if len(a)>0 else '?'}]*{len(a)}" + if hasattr(a, 'shape'): + return str(tuple(a.shape)) + else: + return '' + def type_str(a): + return type_strip(str(type(a))) # TODO this is is weird... what's the better way? + def device_str(a): + if hasattr(a, 'device'): + device_str = str(a.device) + if len(device_str) < 10: + # heuristic: jax returns some goofy long string we don't want, ignore it + return device_str + return "" + def format_float(x): + return f"{x:{float_width}g}" + def minmaxmean_str(a): + if a is None: + return ('N/A', 'N/A', 'N/A', 'N/A') + if isinstance(a, int) or isinstance(a, float): + return (format_float(a),)*4 + + # compute min/max/mean. if anything goes wrong, just print 'N/A' + min_str = "N/A" + try: min_str = format_float(a.min()) + except: pass + max_str = "N/A" + try: max_str = format_float(a.max()) + except: pass + mean_str = "N/A" + try: mean_str = format_float(a.mean()) + except: pass + try: median_str = format_float(a.median()) + except: + try: median_str = format_float(np.median(np.array(a))) + except: median_str = 'N/A' + return (min_str, max_str, mean_str, median_str) + + def get_prop_dict(a,k=None): + minmaxmean = minmaxmean_str(a) + props = { + 'name' : name_from_outer_scope(a) if k is None else k, + # 'type' : str(type(a)).replace('torch.',''), + 'dtype' : dtype_str(a), + 'shape' : shape_str(a), + 'type' : type_str(a), + 'device' : device_str(a), + 'min' : minmaxmean[0], + 'max' : minmaxmean[1], + 'mean' : minmaxmean[2], + 'median': minmaxmean[3] + } + return props + + try: + + props = ['name', 'type', 'dtype', 'shape', 'device', 'min', 'max', 'mean', 'median'] + + # precompute all of the properties for each input + str_props = [] + for a in arrs: + str_props.append(get_prop_dict(a)) + for k,a in kwargs.items(): + str_props.append(get_prop_dict(a, k=k)) + + # for each property, compute its length + maxlen = {} + for p in props: maxlen[p] = 0 + for sp in str_props: + for p in props: + maxlen[p] = max(maxlen[p], len(sp[p])) + + # if any property got all empty strings, don't bother printing it, remove if from the list + props = [p for p in props if maxlen[p] > 0] + + # print a header + header_str = "" + for p in props: + prefix = "" if p == 'name' else " | " + fmt_key = ">" if p == 'name' else "<" + header_str += f"{prefix}{p:{fmt_key}{maxlen[p]}}" + print(header_str) + print("-"*len(header_str)) + + # now print the acual arrays + for strp in str_props: + for p in props: + prefix = "" if p == 'name' else " | " + fmt_key = ">" if p == 'name' else "<" + print(f"{prefix}{strp[p]:{fmt_key}{maxlen[p]}}", end='') + print("") + + finally: + del frame + +def debug_print_all_tensor_sizes(min_tot_size = 0): + import gc + print("---------------------------------------"*3) + for obj in gc.get_objects(): + try: + if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)): + if np.prod(obj.size())>=min_tot_size: + print(type(obj), obj.size()) + except: + pass +def print_cpu_usage(): + + # Get current CPU usage as a percentage + cpu_usage = psutil.cpu_percent() + + # Get current memory usage + memory_usage = psutil.virtual_memory().used + + # Convert memory usage to a human-readable format + memory_usage_str = psutil._common.bytes2human(memory_usage) + + # Print CPU and memory usage + msg = f"Current CPU usage: {cpu_usage}% | " + msg += f"Current memory usage: {memory_usage_str}" + return msg + +def calmsize(num_bytes): + if math.isnan(num_bytes): + return '' + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if abs(num_bytes) < 1024.0: + return "{:.1f}{}B".format(num_bytes, unit) + num_bytes /= 1024.0 + return "{:.1f}{}B".format(num_bytes, 'Y') + +def readable_size(num_bytes: int) -> str: + return calmsize(num_bytes) ## '' if math.isnan(num_bytes) else '{:.1f}'.format(calmsize(num_bytes)) + +def get_gpu_memory(): + """ + Get the current GPU memory usage for each device as a dictionary + """ + output = subprocess.check_output(["nvidia-smi", "--query-gpu=memory.used", "--format=csv"]) + output = output.decode("utf-8") + gpu_memory_values = output.split("\n")[1:-1] + gpu_memory_values = [int(x.strip().split()[0]) for x in gpu_memory_values] + gpu_memory = dict(zip(range(len(gpu_memory_values)), gpu_memory_values)) + return gpu_memory + +def get_gpu_util(): + """ + Get the current GPU memory usage for each device as a dictionary + """ + output = subprocess.check_output(["nvidia-smi", "--query-gpu=utilization.gpu", "--format=csv"]) + output = output.decode("utf-8") + gpu_memory_values = output.split("\n")[1:-1] + gpu_memory_values = [int(x.strip().split()[0]) for x in gpu_memory_values] + gpu_util = dict(zip(range(len(gpu_memory_values)), gpu_memory_values)) + return gpu_util + + +def print_gpu_usage(): + useage = get_gpu_memory() + msg = f" | GPU usage: " + for k, v in useage.items(): + msg += f"{k}: {v} MB " + # utilization = get_gpu_util() + # msg + ' | util ' + # for k, v in utilization.items(): + # msg += f"{k}: {v} % " + return msg + +class AverageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +def generate_random_string(length): + # This script will generate a string of 10 random ASCII letters (both lowercase and uppercase). + # You can adjust the length parameter to fit your needs. + letters = string.ascii_letters + return ''.join(random.choice(letters) for _ in range(length)) + + +class ForkedPdb(pdb.Pdb): + """ + PDB Subclass for debugging multi-processed code + Suggested in: https://stackoverflow.com/questions/4716533/how-to-attach-debugger-to-a-python-subproccess + """ + def interaction(self, *args, **kwargs): + _stdin = sys.stdin + try: + sys.stdin = open('/dev/stdin') + pdb.Pdb.interaction(self, *args, **kwargs) + finally: + sys.stdin = _stdin + +def check_exist_in_s3(file_path, s3_config): + s3 = init_s3(s3_config) + bucket_name, object_name = s3path_to_bucket_key(file_path) + + try: + s3.head_object(Bucket=bucket_name, Key=object_name) + return 1 + except: + logger.info(f'file not found: s3://{bucket_name}/{object_name}') + return 0 + +def s3path_to_bucket_key(file_path): + bucket_name = file_path.split('/')[2] + object_name = file_path.split(bucket_name + '/')[-1] + return bucket_name, object_name + +def copy_file_to_s3(cfg, file_path_local, file_path_s3): + # work similar as upload_file_to_s3, but not trying to parse the file path + # file_path_s3: s3://{bucket}/{key} + bucket_name, key = s3path_to_bucket_key(file_path_s3) + tic = time.time() + s3_client = init_s3(cfg.checkpoint.write_s3_config) + + # Upload the file + with open(file_path_local, 'rb') as f: + s3_client.upload_fileobj(f, bucket_name, key) + full_s3_path = f"s3://{bucket_name}/{key}" + logger.info(f'copy file: {file_path_local} {full_s3_path} | use time: {time.time()-tic}') + return full_s3_path \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/encoder_pc.py b/third_party/PartField/partfield/model/PVCNN/encoder_pc.py new file mode 100644 index 0000000000000000000000000000000000000000..25a384ec0b7b8ce19d0336f1edd2463b8b3acd4e --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/encoder_pc.py @@ -0,0 +1,243 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. + +from ast import Dict +import math + +import numpy as np +import torch +from torch import nn +import torch.nn.functional as F +from torch_scatter import scatter_mean #, scatter_max + +from .unet_3daware import setup_unet #UNetTriplane3dAware +from .conv_pointnet import ConvPointnet + +from .pc_encoder import PVCNNEncoder #PointNet + +import einops + +from .dnnlib_util import ScopedTorchProfiler, printarr + +def generate_plane_features(p, c, resolution, plane='xz'): + """ + Args: + p: (B,3,n_p) + c: (B,C,n_p) + """ + padding = 0. + c_dim = c.size(1) + # acquire indices of features in plane + xy = normalize_coordinate(p.clone(), plane=plane, padding=padding) # normalize to the range of (0, 1) + index = coordinate2index(xy, resolution) + + # scatter plane features from points + fea_plane = c.new_zeros(p.size(0), c_dim, resolution**2) + fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2 + fea_plane = fea_plane.reshape(p.size(0), c_dim, resolution, resolution) # sparce matrix (B x 512 x reso x reso) + return fea_plane + +def normalize_coordinate(p, padding=0.1, plane='xz'): + ''' Normalize coordinate to [0, 1] for unit cube experiments + + Args: + p (tensor): point + padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55] + plane (str): plane feature type, ['xz', 'xy', 'yz'] + ''' + if plane == 'xz': + xy = p[:, :, [0, 2]] + elif plane =='xy': + xy = p[:, :, [0, 1]] + else: + xy = p[:, :, [1, 2]] + + xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5) + xy_new = xy_new + 0.5 # range (0, 1) + + # if there are outliers out of the range + if xy_new.max() >= 1: + xy_new[xy_new >= 1] = 1 - 10e-6 + if xy_new.min() < 0: + xy_new[xy_new < 0] = 0.0 + return xy_new + + +def coordinate2index(x, resolution): + ''' Normalize coordinate to [0, 1] for unit cube experiments. + Corresponds to our 3D model + + Args: + x (tensor): coordinate + reso (int): defined resolution + coord_type (str): coordinate type + ''' + x = (x * resolution).long() + index = x[:, :, 0] + resolution * x[:, :, 1] + index = index[:, None, :] + return index + +def softclip(x, min, max, hardness=5): + # Soft clipping for the logsigma + x = min + F.softplus(hardness*(x - min))/hardness + x = max - F.softplus(-hardness*(x - max))/hardness + return x + + +def sample_triplane_feat(feature_triplane, normalized_pos): + ''' + normalized_pos [-1, 1] + ''' + tri_plane = torch.unbind(feature_triplane, dim=1) + + x_feat = F.grid_sample( + tri_plane[0], + torch.cat( + [normalized_pos[:, :, 0:1], normalized_pos[:, :, 1:2]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + y_feat = F.grid_sample( + tri_plane[1], + torch.cat( + [normalized_pos[:, :, 1:2], normalized_pos[:, :, 2:3]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + + z_feat = F.grid_sample( + tri_plane[2], + torch.cat( + [normalized_pos[:, :, 0:1], normalized_pos[:, :, 2:3]], + dim=-1).unsqueeze(dim=1), padding_mode='border', + align_corners=True) + final_feat = (x_feat + y_feat + z_feat) + final_feat = final_feat.squeeze(dim=2).permute(0, 2, 1) # 32dimension + return final_feat + + +# @persistence.persistent_class +class TriPlanePC2Encoder(torch.nn.Module): + # Encoder that encode point cloud to triplane feature vector similar to ConvOccNet + def __init__( + self, + cfg, + device='cuda', + shape_min=-1.0, + shape_length=2.0, + use_2d_feat=False, + # point_encoder='pvcnn', + # use_point_scatter=False + ): + """ + Outputs latent triplane from PC input + Configs: + max_logsigma: (float) Soft clip upper range for logsigm + min_logsigma: (float) + point_encoder_type: (str) one of ['pvcnn', 'pointnet'] + pvcnn_flatten_voxels: (bool) for pvcnn whether to reduce voxel + features (instead of scattering point features) + unet_cfg: (dict) + z_triplane_channels: (int) output latent triplane + z_triplane_resolution: (int) + Args: + + """ + # assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 + super().__init__() + self.device = device + + self.cfg = cfg + + self.shape_min = shape_min + self.shape_length = shape_length + + self.z_triplane_resolution = cfg.z_triplane_resolution + z_triplane_channels = cfg.z_triplane_channels + + point_encoder_out_dim = z_triplane_channels #* 2 + + in_channels = 6 + # self.resample_filter=[1, 3, 3, 1] + if cfg.point_encoder_type == 'pvcnn': + self.pc_encoder = PVCNNEncoder(point_encoder_out_dim, + device=self.device, in_channels=in_channels, use_2d_feat=use_2d_feat) # Encode it to a volume vector. + elif cfg.point_encoder_type == 'pointnet': + # TODO the pointnet was buggy, investigate + self.pc_encoder = ConvPointnet(c_dim=point_encoder_out_dim, + dim=in_channels, hidden_dim=32, + plane_resolution=self.z_triplane_resolution, + padding=0) + else: + raise NotImplementedError(f"Point encoder {cfg.point_encoder_type} not implemented") + + if cfg.unet_cfg.enabled: + self.unet_encoder = setup_unet( + output_channels=point_encoder_out_dim, + input_channels=point_encoder_out_dim, + unet_cfg=cfg.unet_cfg) + else: + self.unet_encoder = None + + # @ScopedTorchProfiler('encode') + def encode(self, point_cloud_xyz, point_cloud_feature, mv_feat=None, pc2pc_idx=None) -> Dict: + # output = AttrDict() + point_cloud_xyz = (point_cloud_xyz - self.shape_min) / self.shape_length # [0, 1] + point_cloud_xyz = point_cloud_xyz - 0.5 # [-0.5, 0.5] + point_cloud = torch.cat([point_cloud_xyz, point_cloud_feature], dim=-1) + + if self.cfg.point_encoder_type == 'pvcnn': + if mv_feat is not None: + pc_feat, points_feat = self.pc_encoder(point_cloud, mv_feat, pc2pc_idx) + else: + pc_feat, points_feat = self.pc_encoder(point_cloud) # 3D feature volume: BxDx32x32x32 + if self.cfg.use_point_scatter: + # Scattering from PVCNN point features + points_feat_ = points_feat[0] + # shape: batch, latent size, resolution, resolution (e.g. 16, 256, 64, 64) + pc_feat_1 = generate_plane_features(point_cloud_xyz, points_feat_, + resolution=self.z_triplane_resolution, plane='xy') + pc_feat_2 = generate_plane_features(point_cloud_xyz, points_feat_, + resolution=self.z_triplane_resolution, plane='yz') + pc_feat_3 = generate_plane_features(point_cloud_xyz, points_feat_, + resolution=self.z_triplane_resolution, plane='xz') + pc_feat = pc_feat[0] + + else: + pc_feat = pc_feat[0] + sf = self.z_triplane_resolution//32 # 32 is PVCNN's voxel dim + + pc_feat_1 = torch.mean(pc_feat, dim=-1) #xy_plane, normalize in z plane + pc_feat_2 = torch.mean(pc_feat, dim=-3) #yz_plane, normalize in x plane + pc_feat_3 = torch.mean(pc_feat, dim=-2) #xz_plane, normalize in y plane + + # nearest upsample + pc_feat_1 = einops.repeat(pc_feat_1, 'b c h w -> b c (h hm ) (w wm)', hm = sf, wm = sf) + pc_feat_2 = einops.repeat(pc_feat_2, 'b c h w -> b c (h hm) (w wm)', hm = sf, wm = sf) + pc_feat_3 = einops.repeat(pc_feat_3, 'b c h w -> b c (h hm) (w wm)', hm = sf, wm = sf) + elif self.cfg.point_encoder_type == 'pointnet': + assert self.cfg.use_point_scatter + # Run ConvPointnet + pc_feat = self.pc_encoder(point_cloud) + pc_feat_1 = pc_feat['xy'] # + pc_feat_2 = pc_feat['yz'] + pc_feat_3 = pc_feat['xz'] + else: + raise NotImplementedError() + + if self.unet_encoder is not None: + # TODO eval adding a skip connection + # Unet expects B, 3, C, H, W + pc_feat_tri_plane_stack_pre = torch.stack([pc_feat_1, pc_feat_2, pc_feat_3], dim=1) + # dpc_feat_tri_plane_stack = self.unet_encoder(pc_feat_tri_plane_stack_pre) + # pc_feat_tri_plane_stack = pc_feat_tri_plane_stack_pre + dpc_feat_tri_plane_stack + pc_feat_tri_plane_stack = self.unet_encoder(pc_feat_tri_plane_stack_pre) + pc_feat_1, pc_feat_2, pc_feat_3 = torch.unbind(pc_feat_tri_plane_stack, dim=1) + + return torch.stack([pc_feat_1, pc_feat_2, pc_feat_3], dim=1) + + def forward(self, point_cloud_xyz, point_cloud_feature=None, mv_feat=None, pc2pc_idx=None): + return self.encode(point_cloud_xyz, point_cloud_feature=point_cloud_feature, mv_feat=mv_feat, pc2pc_idx=pc2pc_idx) \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/pc_encoder.py b/third_party/PartField/partfield/model/PVCNN/pc_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..adeaeba96147a24515eb289b09da77bc4716869c --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pc_encoder.py @@ -0,0 +1,90 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import functools + +from .pv_module import SharedMLP, PVConv + +def create_pointnet_components( + blocks, in_channels, with_se=False, normalize=True, eps=0, + width_multiplier=1, voxel_resolution_multiplier=1, scale_pvcnn=False, device='cuda'): + r, vr = width_multiplier, voxel_resolution_multiplier + layers, concat_channels = [], 0 + for out_channels, num_blocks, voxel_resolution in blocks: + out_channels = int(r * out_channels) + if voxel_resolution is None: + block = functools.partial(SharedMLP, device=device) + else: + block = functools.partial( + PVConv, kernel_size=3, resolution=int(vr * voxel_resolution), + with_se=with_se, normalize=normalize, eps=eps, scale_pvcnn=scale_pvcnn, device=device) + for _ in range(num_blocks): + layers.append(block(in_channels, out_channels)) + in_channels = out_channels + concat_channels += out_channels + return layers, in_channels, concat_channels + +class PCMerger(nn.Module): +# merge surface sampled PC and rendering backprojected PC (w/ 2D features): + def __init__(self, in_channels=204, device="cuda"): + super(PCMerger, self).__init__() + self.mlp_normal = SharedMLP(3, [128, 128], device=device) + self.mlp_rgb = SharedMLP(3, [128, 128], device=device) + self.mlp_sam = SharedMLP(204 - 6, [128, 128], device=device) + + def forward(self, feat, mv_feat, pc2pc_idx): + mv_feat_normal = self.mlp_normal(mv_feat[:, :3, :]) + mv_feat_rgb = self.mlp_rgb(mv_feat[:, 3:6, :]) + mv_feat_sam = self.mlp_sam(mv_feat[:, 6:, :]) + + mv_feat_normal = mv_feat_normal.permute(0, 2, 1) + mv_feat_rgb = mv_feat_rgb.permute(0, 2, 1) + mv_feat_sam = mv_feat_sam.permute(0, 2, 1) + feat = feat.permute(0, 2, 1) + + for i in range(mv_feat.shape[0]): + mask = (pc2pc_idx[i] != -1).reshape(-1) + idx = pc2pc_idx[i][mask].reshape(-1) + feat[i][mask] += mv_feat_normal[i][idx] + mv_feat_rgb[i][idx] + mv_feat_sam[i][idx] + + return feat.permute(0, 2, 1) + + +class PVCNNEncoder(nn.Module): + def __init__(self, pvcnn_feat_dim, device='cuda', in_channels=3, use_2d_feat=False): + super(PVCNNEncoder, self).__init__() + self.device = device + self.blocks = ((pvcnn_feat_dim, 1, 32), (128, 2, 16), (256, 1, 8)) + self.use_2d_feat=use_2d_feat + if in_channels == 6: + self.append_channel = 2 + elif in_channels == 3: + self.append_channel = 1 + else: + raise NotImplementedError + layers, channels_point, concat_channels_point = create_pointnet_components( + blocks=self.blocks, in_channels=in_channels + self.append_channel, with_se=False, normalize=False, + width_multiplier=1, voxel_resolution_multiplier=1, scale_pvcnn=True, + device=device + ) + self.encoder = nn.ModuleList(layers)#.to(self.device) + if self.use_2d_feat: + self.merger = PCMerger() + + + + def forward(self, input_pc, mv_feat=None, pc2pc_idx=None): + features = input_pc.permute(0, 2, 1) * 2 # make point cloud [-1, 1] + coords = features[:, :3, :] + out_features_list = [] + voxel_feature_list = [] + zero_padding = torch.zeros(features.shape[0], self.append_channel, features.shape[-1], device=features.device, dtype=torch.float) + features = torch.cat([features, zero_padding], dim=1)################## + + for i in range(len(self.encoder)): + features, _, voxel_feature = self.encoder[i]((features, coords)) + if i == 0 and mv_feat is not None: + features = self.merger(features, mv_feat.permute(0, 2, 1), pc2pc_idx) + out_features_list.append(features) + voxel_feature_list.append(voxel_feature) + return voxel_feature_list, out_features_list \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/__init__.py b/third_party/PartField/partfield/model/PVCNN/pv_module/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd32e598f709503f4e35171e09fbbedec05f9c3 --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/__init__.py @@ -0,0 +1,2 @@ +from .pvconv import PVConv +from .shared_mlp import SharedMLP \ No newline at end of file diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/ball_query.py b/third_party/PartField/partfield/model/PVCNN/pv_module/ball_query.py new file mode 100644 index 0000000000000000000000000000000000000000..ea2a8203baa3fc1c94959f1ba852839365bf38ce --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/ball_query.py @@ -0,0 +1,34 @@ +import torch +import torch.nn as nn + +from . import functional as F + +__all__ = ['BallQuery'] + + +class BallQuery(nn.Module): + def __init__(self, radius, num_neighbors, include_coordinates=True): + super().__init__() + self.radius = radius + self.num_neighbors = num_neighbors + self.include_coordinates = include_coordinates + + def forward(self, points_coords, centers_coords, points_features=None): + points_coords = points_coords.contiguous() + centers_coords = centers_coords.contiguous() + neighbor_indices = F.ball_query(centers_coords, points_coords, self.radius, self.num_neighbors) + neighbor_coordinates = F.grouping(points_coords, neighbor_indices) + neighbor_coordinates = neighbor_coordinates - centers_coords.unsqueeze(-1) + + if points_features is None: + assert self.include_coordinates, 'No Features For Grouping' + neighbor_features = neighbor_coordinates + else: + neighbor_features = F.grouping(points_features, neighbor_indices) + if self.include_coordinates: + neighbor_features = torch.cat([neighbor_coordinates, neighbor_features], dim=1) + return neighbor_features + + def extra_repr(self): + return 'radius={}, num_neighbors={}{}'.format( + self.radius, self.num_neighbors, ', include coordinates' if self.include_coordinates else '') diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/frustum.py b/third_party/PartField/partfield/model/PVCNN/pv_module/frustum.py new file mode 100644 index 0000000000000000000000000000000000000000..fb302963a6472f949f4ab69ed42575d79b68b4ea --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/frustum.py @@ -0,0 +1,141 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from . import functional as PF + +__all__ = ['FrustumPointNetLoss', 'get_box_corners_3d'] + + +class FrustumPointNetLoss(nn.Module): + def __init__( + self, num_heading_angle_bins, num_size_templates, size_templates, box_loss_weight=1.0, + corners_loss_weight=10.0, heading_residual_loss_weight=20.0, size_residual_loss_weight=20.0): + super().__init__() + self.box_loss_weight = box_loss_weight + self.corners_loss_weight = corners_loss_weight + self.heading_residual_loss_weight = heading_residual_loss_weight + self.size_residual_loss_weight = size_residual_loss_weight + + self.num_heading_angle_bins = num_heading_angle_bins + self.num_size_templates = num_size_templates + self.register_buffer('size_templates', size_templates.view(self.num_size_templates, 3)) + self.register_buffer( + 'heading_angle_bin_centers', torch.arange(0, 2 * np.pi, 2 * np.pi / self.num_heading_angle_bins) + ) + + def forward(self, inputs, targets): + mask_logits = inputs['mask_logits'] # (B, 2, N) + center_reg = inputs['center_reg'] # (B, 3) + center = inputs['center'] # (B, 3) + heading_scores = inputs['heading_scores'] # (B, NH) + heading_residuals_normalized = inputs['heading_residuals_normalized'] # (B, NH) + heading_residuals = inputs['heading_residuals'] # (B, NH) + size_scores = inputs['size_scores'] # (B, NS) + size_residuals_normalized = inputs['size_residuals_normalized'] # (B, NS, 3) + size_residuals = inputs['size_residuals'] # (B, NS, 3) + + mask_logits_target = targets['mask_logits'] # (B, N) + center_target = targets['center'] # (B, 3) + heading_bin_id_target = targets['heading_bin_id'] # (B, ) + heading_residual_target = targets['heading_residual'] # (B, ) + size_template_id_target = targets['size_template_id'] # (B, ) + size_residual_target = targets['size_residual'] # (B, 3) + + batch_size = center.size(0) + batch_id = torch.arange(batch_size, device=center.device) + + # Basic Classification and Regression losses + mask_loss = F.cross_entropy(mask_logits, mask_logits_target) + heading_loss = F.cross_entropy(heading_scores, heading_bin_id_target) + size_loss = F.cross_entropy(size_scores, size_template_id_target) + center_loss = PF.huber_loss(torch.norm(center_target - center, dim=-1), delta=2.0) + center_reg_loss = PF.huber_loss(torch.norm(center_target - center_reg, dim=-1), delta=1.0) + + # Refinement losses for size/heading + heading_residuals_normalized = heading_residuals_normalized[batch_id, heading_bin_id_target] # (B, ) + heading_residual_normalized_target = heading_residual_target / (np.pi / self.num_heading_angle_bins) + heading_residual_normalized_loss = PF.huber_loss( + heading_residuals_normalized - heading_residual_normalized_target, delta=1.0 + ) + size_residuals_normalized = size_residuals_normalized[batch_id, size_template_id_target] # (B, 3) + size_residual_normalized_target = size_residual_target / self.size_templates[size_template_id_target] + size_residual_normalized_loss = PF.huber_loss( + torch.norm(size_residual_normalized_target - size_residuals_normalized, dim=-1), delta=1.0 + ) + + # Bounding box losses + heading = (heading_residuals[batch_id, heading_bin_id_target] + + self.heading_angle_bin_centers[heading_bin_id_target]) # (B, ) + # Warning: in origin code, size_residuals are added twice (issue #43 and #49 in charlesq34/frustum-pointnets) + size = (size_residuals[batch_id, size_template_id_target] + + self.size_templates[size_template_id_target]) # (B, 3) + corners = get_box_corners_3d(centers=center, headings=heading, sizes=size, with_flip=False) # (B, 3, 8) + heading_target = self.heading_angle_bin_centers[heading_bin_id_target] + heading_residual_target # (B, ) + size_target = self.size_templates[size_template_id_target] + size_residual_target # (B, 3) + corners_target, corners_target_flip = get_box_corners_3d( + centers=center_target, headings=heading_target, + sizes=size_target, with_flip=True) # (B, 3, 8) + corners_loss = PF.huber_loss( + torch.min( + torch.norm(corners - corners_target, dim=1), torch.norm(corners - corners_target_flip, dim=1) + ), delta=1.0) + # Summing up + loss = mask_loss + self.box_loss_weight * ( + center_loss + center_reg_loss + heading_loss + size_loss + + self.heading_residual_loss_weight * heading_residual_normalized_loss + + self.size_residual_loss_weight * size_residual_normalized_loss + + self.corners_loss_weight * corners_loss + ) + + return loss + + +def get_box_corners_3d(centers, headings, sizes, with_flip=False): + """ + :param centers: coords of box centers, FloatTensor[N, 3] + :param headings: heading angles, FloatTensor[N, ] + :param sizes: box sizes, FloatTensor[N, 3] + :param with_flip: bool, whether to return flipped box (headings + np.pi) + :return: + coords of box corners, FloatTensor[N, 3, 8] + NOTE: corner points are in counter clockwise order, e.g., + 2--1 + 3--0 5 + 7--4 + """ + l = sizes[:, 0] # (N,) + w = sizes[:, 1] # (N,) + h = sizes[:, 2] # (N,) + x_corners = torch.stack([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2], dim=1) # (N, 8) + y_corners = torch.stack([h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2], dim=1) # (N, 8) + z_corners = torch.stack([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], dim=1) # (N, 8) + + c = torch.cos(headings) # (N,) + s = torch.sin(headings) # (N,) + o = torch.ones_like(headings) # (N,) + z = torch.zeros_like(headings) # (N,) + + centers = centers.unsqueeze(-1) # (B, 3, 1) + corners = torch.stack([x_corners, y_corners, z_corners], dim=1) # (N, 3, 8) + R = torch.stack([c, z, s, z, o, z, -s, z, c], dim=1).view(-1, 3, 3) # roty matrix: (N, 3, 3) + if with_flip: + R_flip = torch.stack([-c, z, -s, z, o, z, s, z, -c], dim=1).view(-1, 3, 3) + return torch.matmul(R, corners) + centers, torch.matmul(R_flip, corners) + centers + else: + return torch.matmul(R, corners) + centers + + # centers = centers.unsqueeze(1) # (B, 1, 3) + # corners = torch.stack([x_corners, y_corners, z_corners], dim=-1) # (N, 8, 3) + # RT = torch.stack([c, z, -s, z, o, z, s, z, c], dim=1).view(-1, 3, 3) # (N, 3, 3) + # if with_flip: + # RT_flip = torch.stack([-c, z, s, z, o, z, -s, z, -c], dim=1).view(-1, 3, 3) # (N, 3, 3) + # return torch.matmul(corners, RT) + centers, torch.matmul(corners, RT_flip) + centers # (N, 8, 3) + # else: + # return torch.matmul(corners, RT) + centers # (N, 8, 3) + + # corners = torch.stack([x_corners, y_corners, z_corners], dim=1) # (N, 3, 8) + # R = torch.stack([c, z, s, z, o, z, -s, z, c], dim=1).view(-1, 3, 3) # (N, 3, 3) + # corners = torch.matmul(R, corners) + centers.unsqueeze(2) # (N, 3, 8) + # corners = corners.transpose(1, 2) # (N, 8, 3) diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/functional/__init__.py b/third_party/PartField/partfield/model/PVCNN/pv_module/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..993d1d12511dce369d781b60be75e79a71762e47 --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/functional/__init__.py @@ -0,0 +1 @@ +from .devoxelization import trilinear_devoxelize diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/functional/devoxelization.py b/third_party/PartField/partfield/model/PVCNN/pv_module/functional/devoxelization.py new file mode 100644 index 0000000000000000000000000000000000000000..c60dab12d804ec3b41b53f7da3eecb20917077fc --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/functional/devoxelization.py @@ -0,0 +1,12 @@ +from torch.autograd import Function +import torch +import torch.nn.functional as F + +__all__ = ['trilinear_devoxelize'] + +def trilinear_devoxelize(c, coords, r, training=None): + coords = (coords * 2 + 1.0) / r - 1.0 + coords = coords.permute(0, 2, 1).reshape(c.shape[0], 1, 1, -1, 3) + f = F.grid_sample(input=c, grid=coords, padding_mode='border', align_corners=False) + f = f.squeeze(dim=2).squeeze(dim=2) + return f diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/loss.py b/third_party/PartField/partfield/model/PVCNN/pv_module/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..a35cdd8a0fe83c8ca6b1d7040b66d142e76471df --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/loss.py @@ -0,0 +1,10 @@ +import torch.nn as nn + +from . import functional as F + +__all__ = ['KLLoss'] + + +class KLLoss(nn.Module): + def forward(self, x, y): + return F.kl_loss(x, y) diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/pointnet.py b/third_party/PartField/partfield/model/PVCNN/pv_module/pointnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e58e01cc2f84925d4817a8a04aefdbc3d36d484e --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/pointnet.py @@ -0,0 +1,113 @@ +import torch +import torch.nn as nn + +from . import functional as F +from .ball_query import BallQuery +from .shared_mlp import SharedMLP + +__all__ = ['PointNetAModule', 'PointNetSAModule', 'PointNetFPModule'] + + +class PointNetAModule(nn.Module): + def __init__(self, in_channels, out_channels, include_coordinates=True): + super().__init__() + if not isinstance(out_channels, (list, tuple)): + out_channels = [[out_channels]] + elif not isinstance(out_channels[0], (list, tuple)): + out_channels = [out_channels] + + mlps = [] + total_out_channels = 0 + for _out_channels in out_channels: + mlps.append( + SharedMLP( + in_channels=in_channels + (3 if include_coordinates else 0), + out_channels=_out_channels, dim=1) + ) + total_out_channels += _out_channels[-1] + + self.include_coordinates = include_coordinates + self.out_channels = total_out_channels + self.mlps = nn.ModuleList(mlps) + + def forward(self, inputs): + features, coords = inputs + if self.include_coordinates: + features = torch.cat([features, coords], dim=1) + coords = torch.zeros((coords.size(0), 3, 1), device=coords.device) + if len(self.mlps) > 1: + features_list = [] + for mlp in self.mlps: + features_list.append(mlp(features).max(dim=-1, keepdim=True).values) + return torch.cat(features_list, dim=1), coords + else: + return self.mlps[0](features).max(dim=-1, keepdim=True).values, coords + + def extra_repr(self): + return f'out_channels={self.out_channels}, include_coordinates={self.include_coordinates}' + + +class PointNetSAModule(nn.Module): + def __init__(self, num_centers, radius, num_neighbors, in_channels, out_channels, include_coordinates=True): + super().__init__() + if not isinstance(radius, (list, tuple)): + radius = [radius] + if not isinstance(num_neighbors, (list, tuple)): + num_neighbors = [num_neighbors] * len(radius) + assert len(radius) == len(num_neighbors) + if not isinstance(out_channels, (list, tuple)): + out_channels = [[out_channels]] * len(radius) + elif not isinstance(out_channels[0], (list, tuple)): + out_channels = [out_channels] * len(radius) + assert len(radius) == len(out_channels) + + groupers, mlps = [], [] + total_out_channels = 0 + for _radius, _out_channels, _num_neighbors in zip(radius, out_channels, num_neighbors): + groupers.append( + BallQuery(radius=_radius, num_neighbors=_num_neighbors, include_coordinates=include_coordinates) + ) + mlps.append( + SharedMLP( + in_channels=in_channels + (3 if include_coordinates else 0), + out_channels=_out_channels, dim=2) + ) + total_out_channels += _out_channels[-1] + + self.num_centers = num_centers + self.out_channels = total_out_channels + self.groupers = nn.ModuleList(groupers) + self.mlps = nn.ModuleList(mlps) + + def forward(self, inputs): + features, coords = inputs + centers_coords = F.furthest_point_sample(coords, self.num_centers) + features_list = [] + for grouper, mlp in zip(self.groupers, self.mlps): + features_list.append(mlp(grouper(coords, centers_coords, features)).max(dim=-1).values) + if len(features_list) > 1: + return torch.cat(features_list, dim=1), centers_coords + else: + return features_list[0], centers_coords + + def extra_repr(self): + return f'num_centers={self.num_centers}, out_channels={self.out_channels}' + + +class PointNetFPModule(nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__() + self.mlp = SharedMLP(in_channels=in_channels, out_channels=out_channels, dim=1) + + def forward(self, inputs): + if len(inputs) == 3: + points_coords, centers_coords, centers_features = inputs + points_features = None + else: + points_coords, centers_coords, centers_features, points_features = inputs + interpolated_features = F.nearest_neighbor_interpolate(points_coords, centers_coords, centers_features) + if points_features is not None: + interpolated_features = torch.cat( + [interpolated_features, points_features], dim=1 + ) + return self.mlp(interpolated_features), points_coords diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/pvconv.py b/third_party/PartField/partfield/model/PVCNN/pv_module/pvconv.py new file mode 100644 index 0000000000000000000000000000000000000000..a64705da194cf2d32ff641025fad7b92d71dc67b --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/pvconv.py @@ -0,0 +1,38 @@ +import torch.nn as nn + +from . import functional as F +from .voxelization import Voxelization +from .shared_mlp import SharedMLP +import torch + +__all__ = ['PVConv'] + + +class PVConv(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=0, scale_pvcnn=False, + device='cuda'): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.resolution = resolution + self.voxelization = Voxelization(resolution, normalize=normalize, eps=eps, scale_pvcnn=scale_pvcnn) + voxel_layers = [ + nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, device=device), + nn.InstanceNorm3d(out_channels, eps=1e-4, device=device), + nn.LeakyReLU(0.1, True), + nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, device=device), + nn.InstanceNorm3d(out_channels, eps=1e-4, device=device), + nn.LeakyReLU(0.1, True), + ] + self.voxel_layers = nn.Sequential(*voxel_layers) + self.point_features = SharedMLP(in_channels, out_channels, device=device) + + def forward(self, inputs): + features, coords = inputs + voxel_features, voxel_coords = self.voxelization(features, coords) + voxel_features = self.voxel_layers(voxel_features) + devoxel_features = F.trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training) + fused_features = devoxel_features + self.point_features(features) + return fused_features, coords, voxel_features diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/shared_mlp.py b/third_party/PartField/partfield/model/PVCNN/pv_module/shared_mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..e1d4ff864c05b894194ef11ac4b629ec72c4952b --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/shared_mlp.py @@ -0,0 +1,35 @@ +import torch.nn as nn + +__all__ = ['SharedMLP'] + + +class SharedMLP(nn.Module): + def __init__(self, in_channels, out_channels, dim=1, device='cuda'): + super().__init__() + # print('==> SharedMLP device: ', device) + if dim == 1: + conv = nn.Conv1d + bn = nn.InstanceNorm1d + elif dim == 2: + conv = nn.Conv2d + bn = nn.InstanceNorm1d + else: + raise ValueError + if not isinstance(out_channels, (list, tuple)): + out_channels = [out_channels] + layers = [] + for oc in out_channels: + layers.extend( + [ + conv(in_channels, oc, 1, device=device), + bn(oc, device=device), + nn.ReLU(True), + ]) + in_channels = oc + self.layers = nn.Sequential(*layers) + + def forward(self, inputs): + if isinstance(inputs, (list, tuple)): + return (self.layers(inputs[0]), *inputs[1:]) + else: + return self.layers(inputs) diff --git a/third_party/PartField/partfield/model/PVCNN/pv_module/voxelization.py b/third_party/PartField/partfield/model/PVCNN/pv_module/voxelization.py new file mode 100644 index 0000000000000000000000000000000000000000..15535abc63f7adebcd12bc436e84710c7c9862d2 --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/pv_module/voxelization.py @@ -0,0 +1,50 @@ +import torch +import torch.nn as nn + +from . import functional as F + +__all__ = ['Voxelization'] + + +def my_voxelization(features, coords, resolution): + b, c, _ = features.shape + result = torch.zeros(b, c + 1, resolution * resolution * resolution, device=features.device, dtype=torch.float) + r = resolution + r2 = resolution * resolution + indices = coords[:, 0] * r2 + coords[:, 1] * r + coords[:, 2] + indices = indices.unsqueeze(dim=1).expand(-1, result.shape[1], -1) + features = torch.cat([features, torch.ones(features.shape[0], 1, features.shape[2], device=features.device, dtype=features.dtype)], dim=1) + out_feature = result.scatter_(index=indices.long(), src=features, dim=2, reduce='add') + cnt = out_feature[:, -1:, :] + zero_mask = (cnt == 0).float() + cnt = cnt * (1 - zero_mask) + zero_mask * 1e-5 + vox_feature = out_feature[:, :-1, :] / cnt + return vox_feature.view(b, c, resolution, resolution, resolution) + +class Voxelization(nn.Module): + def __init__(self, resolution, normalize=True, eps=0, scale_pvcnn=False): + super().__init__() + self.r = int(resolution) + self.normalize = normalize + self.eps = eps + self.scale_pvcnn = scale_pvcnn + assert not normalize + + def forward(self, features, coords): + with torch.no_grad(): + coords = coords.detach() + + if self.normalize: + norm_coords = norm_coords / (norm_coords.norm(dim=1, keepdim=True).max(dim=2, keepdim=True).values * 2.0 + self.eps) + 0.5 + else: + if self.scale_pvcnn: + norm_coords = (coords + 1) / 2.0 # [0, 1] + else: + norm_coords = (norm_coords + 1) / 2.0 + norm_coords = torch.clamp(norm_coords * self.r, 0, self.r - 1) + vox_coords = torch.round(norm_coords) + new_vox_feat = my_voxelization(features, vox_coords, self.r) + return new_vox_feat, norm_coords + + def extra_repr(self): + return 'resolution={}{}'.format(self.r, ', normalized eps = {}'.format(self.eps) if self.normalize else '') diff --git a/third_party/PartField/partfield/model/PVCNN/unet_3daware.py b/third_party/PartField/partfield/model/PVCNN/unet_3daware.py new file mode 100644 index 0000000000000000000000000000000000000000..b0084f0c1d6989ae4ad103f364401c2f2bd5e361 --- /dev/null +++ b/third_party/PartField/partfield/model/PVCNN/unet_3daware.py @@ -0,0 +1,427 @@ +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import init + +import einops + +def conv3x3(in_channels, out_channels, stride=1, + padding=1, bias=True, groups=1): + return nn.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=padding, + bias=bias, + groups=groups) + +def upconv2x2(in_channels, out_channels, mode='transpose'): + if mode == 'transpose': + return nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=2, + stride=2) + else: + # out_channels is always going to be the same + # as in_channels + return nn.Sequential( + nn.Upsample(mode='bilinear', scale_factor=2), + conv1x1(in_channels, out_channels)) + +def conv1x1(in_channels, out_channels, groups=1): + return nn.Conv2d( + in_channels, + out_channels, + kernel_size=1, + groups=groups, + stride=1) + +class ConvTriplane3dAware(nn.Module): + """ 3D aware triplane conv (as described in RODIN) """ + def __init__(self, internal_conv_f, in_channels, out_channels, order='xz'): + """ + Args: + internal_conv_f: function that should return a 2D convolution Module + given in and out channels + order: if triplane input is in 'xz' order + """ + super(ConvTriplane3dAware, self).__init__() + # Need 3 seperate convolutions + self.in_channels = in_channels + self.out_channels = out_channels + assert order in ['xz', 'zx'] + self.order = order + # Going to stack from other planes + self.plane_convs = nn.ModuleList([ + internal_conv_f(3*self.in_channels, self.out_channels) for _ in range(3)]) + + def forward(self, triplanes_list): + """ + Args: + triplanes_list: [(B,Ci,H,W)]*3 in xy,yz,(zx or xz) depending on order + Returns: + out_triplanes_list: [(B,Co,H,W)]*3 in xy,yz,(zx or xz) depending on order + """ + inps = list(triplanes_list) + xp = 1 #(yz) + yp = 2 #(zx) + zp = 0 #(xy) + + if self.order == 'xz': + # get into zx order + inps[yp] = einops.rearrange(inps[yp], 'b c x z -> b c z x') + + + oplanes = [None]*3 + # order shouldn't matter + for iplane in [zp, xp, yp]: + # i_plane -> (j,k) + + # need to average out i and convert to (j,k) + # j_plane -> (k,i) + # k_plane -> (i,j) + jplane = (iplane+1)%3 + kplane = (iplane+2)%3 + + ifeat = inps[iplane] + # need to average out nonshared dim + # Average pool across + + # j_plane -> (k,i) -> (k,1) -> (1,k) -> (j,k) + # b c k i -> b c k 1 + jpool = torch.mean(inps[jplane], dim=3 ,keepdim=True) + jpool = einops.rearrange(jpool, 'b c k 1 -> b c 1 k') + jpool = einops.repeat(jpool, 'b c 1 k -> b c j k', j=ifeat.size(2)) + + # k_plane -> (i,j) -> (1,j) -> (j,1) -> (j,k) + # b c i j -> b c 1 j + kpool = torch.mean(inps[kplane], dim=2 ,keepdim=True) + kpool = einops.rearrange(kpool, 'b c 1 j -> b c j 1') + kpool = einops.repeat(kpool, 'b c j 1 -> b c j k', k=ifeat.size(3)) + + # b c h w + # jpool = jpool.expand_as(ifeat) + # kpool = kpool.expand_as(ifeat) + + # concat and conv on feature dim + catfeat = torch.cat([ifeat, jpool, kpool], dim=1) + oplane = self.plane_convs[iplane](catfeat) + oplanes[iplane] = oplane + + if self.order == 'xz': + # get back into xz order + oplanes[yp] = einops.rearrange(oplanes[yp], 'b c z x -> b c x z') + + return oplanes + +def roll_triplanes(triplanes_list): + # B, C, tri, h, w + tristack = torch.stack((triplanes_list),dim=2) + return einops.rearrange(tristack, 'b c tri h w -> b c (tri h) w', tri=3) + +def unroll_triplanes(rolled_triplane): + # B, C, tri*h, w + tristack = einops.rearrange(rolled_triplane, 'b c (tri h) w -> b c tri h w', tri=3) + return torch.unbind(tristack, dim=2) + +def conv1x1triplane3daware(in_channels, out_channels, order='xz', **kwargs): + return ConvTriplane3dAware(lambda inp, out: conv1x1(inp,out,**kwargs), + in_channels, out_channels,order=order) + +def Normalize(in_channels, num_groups=32): + num_groups = min(in_channels, num_groups) # avoid error if in_channels < 32 + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + +def nonlinearity(x): + # return F.relu(x) + # Swish + return x*torch.sigmoid(x) + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + +class ResnetBlock3dAware(nn.Module): + def __init__(self, in_channels, out_channels=None): + #, conv_shortcut=False): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + # self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = conv3x3(self.in_channels, self.out_channels) + + self.norm_mid = Normalize(out_channels) + self.conv_3daware = conv1x1triplane3daware(self.out_channels, self.out_channels) + + self.norm2 = Normalize(out_channels) + self.conv2 = conv3x3(self.out_channels, self.out_channels) + + if self.in_channels != self.out_channels: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + # 3x3 plane comm + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + # 1x1 3d aware, crossplane comm + h = self.norm_mid(h) + h = nonlinearity(h) + h = unroll_triplanes(h) + h = self.conv_3daware(h) + h = roll_triplanes(h) + + # 3x3 plane comm + h = self.norm2(h) + h = nonlinearity(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + x = self.nin_shortcut(x) + + return x+h + +class DownConv3dAware(nn.Module): + """ + A helper Module that performs 2 convolutions and 1 MaxPool. + A ReLU activation follows each convolution. + """ + def __init__(self, in_channels, out_channels, downsample=True, with_conv=False): + super(DownConv3dAware, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.block = ResnetBlock3dAware(in_channels=in_channels, + out_channels=out_channels) + + self.do_downsample = downsample + self.downsample = Downsample(out_channels, with_conv=with_conv) + + def forward(self, x): + """ + rolled input, rolled output + Args: + x: rolled (b c (tri*h) w) + """ + x = self.block(x) + before_pool = x + # if self.pooling: + # x = self.pool(x) + if self.do_downsample: + # unroll and cat channel-wise (to prevent pooling across triplane boundaries) + x = einops.rearrange(x, 'b c (tri h) w -> b (c tri) h w', tri=3) + x = self.downsample(x) + # undo + x = einops.rearrange(x, 'b (c tri) h w -> b c (tri h) w', tri=3) + return x, before_pool + +class UpConv3dAware(nn.Module): + """ + A helper Module that performs 2 convolutions and 1 UpConvolution. + A ReLU activation follows each convolution. + """ + def __init__(self, in_channels, out_channels, + merge_mode='concat', with_conv=False): #up_mode='transpose', ): + super(UpConv3dAware, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.merge_mode = merge_mode + + self.upsample = Upsample(in_channels, with_conv) + + if self.merge_mode == 'concat': + self.norm1 = Normalize(in_channels+out_channels) + self.block = ResnetBlock3dAware(in_channels=in_channels+out_channels, + out_channels=out_channels) + else: + self.norm1 = Normalize(in_channels) + self.block = ResnetBlock3dAware(in_channels=in_channels, + out_channels=out_channels) + + + def forward(self, from_down, from_up): + """ Forward pass + rolled inputs, rolled output + rolled (b c (tri*h) w) + Arguments: + from_down: tensor from the encoder pathway + from_up: upconv'd tensor from the decoder pathway + """ + # from_up = self.upconv(from_up) + from_up = self.upsample(from_up) + if self.merge_mode == 'concat': + x = torch.cat((from_up, from_down), 1) + else: + x = from_up + from_down + + x = self.norm1(x) + x = self.block(x) + return x + +class UNetTriplane3dAware(nn.Module): + def __init__(self, out_channels, in_channels=3, depth=5, + start_filts=64,# up_mode='transpose', + use_initial_conv=False, + merge_mode='concat', **kwargs): + """ + Arguments: + in_channels: int, number of channels in the input tensor. + Default is 3 for RGB images. + depth: int, number of MaxPools in the U-Net. + start_filts: int, number of convolutional filters for the + first conv. + """ + super(UNetTriplane3dAware, self).__init__() + + + self.out_channels = out_channels + self.in_channels = in_channels + self.start_filts = start_filts + self.depth = depth + + self.use_initial_conv = use_initial_conv + if use_initial_conv: + self.conv_initial = conv1x1(self.in_channels, self.start_filts) + + self.down_convs = [] + self.up_convs = [] + + # create the encoder pathway and add to a list + for i in range(depth): + if i == 0: + ins = self.start_filts if use_initial_conv else self.in_channels + else: + ins = outs + outs = self.start_filts*(2**i) + downsamp_it = True if i < depth-1 else False + + down_conv = DownConv3dAware(ins, outs, downsample = downsamp_it) + self.down_convs.append(down_conv) + + for i in range(depth-1): + ins = outs + outs = ins // 2 + up_conv = UpConv3dAware(ins, outs, + merge_mode=merge_mode) + self.up_convs.append(up_conv) + + # add the list of modules to current module + self.down_convs = nn.ModuleList(self.down_convs) + self.up_convs = nn.ModuleList(self.up_convs) + + self.norm_out = Normalize(outs) + self.conv_final = conv1x1(outs, self.out_channels) + + self.reset_params() + + @staticmethod + def weight_init(m): + if isinstance(m, nn.Conv2d): + # init.xavier_normal_(m.weight, gain=0.1) + init.xavier_normal_(m.weight) + init.constant_(m.bias, 0) + + + def reset_params(self): + for i, m in enumerate(self.modules()): + self.weight_init(m) + + + def forward(self, x): + """ + Args: + x: Stacked triplane expected to be in (B,3,C,H,W) + """ + # Roll + x = einops.rearrange(x, 'b tri c h w -> b c (tri h) w', tri=3) + + if self.use_initial_conv: + x = self.conv_initial(x) + + encoder_outs = [] + # encoder pathway, save outputs for merging + for i, module in enumerate(self.down_convs): + x, before_pool = module(x) + encoder_outs.append(before_pool) + + # Spend a block in the middle + # x = self.block_mid(x) + + for i, module in enumerate(self.up_convs): + before_pool = encoder_outs[-(i+2)] + x = module(before_pool, x) + + x = self.norm_out(x) + + # No softmax is used. This means you need to use + # nn.CrossEntropyLoss is your training script, + # as this module includes a softmax already. + x = self.conv_final(nonlinearity(x)) + + # Unroll + x = einops.rearrange(x, 'b c (tri h) w -> b tri c h w', tri=3) + return x + + +def setup_unet(output_channels, input_channels, unet_cfg): + if unet_cfg['use_3d_aware']: + assert(unet_cfg['rolled']) + unet = UNetTriplane3dAware( + out_channels=output_channels, + in_channels=input_channels, + depth=unet_cfg['depth'], + use_initial_conv=unet_cfg['use_initial_conv'], + start_filts=unet_cfg['start_hidden_channels'],) + else: + raise NotImplementedError + return unet + diff --git a/third_party/PartField/partfield/model/UNet/buildingblocks.py b/third_party/PartField/partfield/model/UNet/buildingblocks.py new file mode 100644 index 0000000000000000000000000000000000000000..e97f501d1813b03555dbec5658d024e06d761443 --- /dev/null +++ b/third_party/PartField/partfield/model/UNet/buildingblocks.py @@ -0,0 +1,546 @@ +#https://github.com/wolny/pytorch-3dunet/blob/master/pytorch3dunet/unet3d/buildingblocks.py +# MIT License + +# Copyright (c) 2018 Adrian Wolny + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from functools import partial + +import torch +from torch import nn as nn +from torch.nn import functional as F + +# from pytorch3dunet.unet3d.se import ChannelSELayer3D, ChannelSpatialSELayer3D, SpatialSELayer3D + + +def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding, + dropout_prob, is3d): + """ + Create a list of modules with together constitute a single conv layer with non-linearity + and optional batchnorm/groupnorm. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size(int or tuple): size of the convolving kernel + order (string): order of things, e.g. + 'cr' -> conv + ReLU + 'gcr' -> groupnorm + conv + ReLU + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + 'bcr' -> batchnorm + conv + ReLU + 'cbrd' -> conv + batchnorm + ReLU + dropout + 'cbrD' -> conv + batchnorm + ReLU + dropout2d + num_groups (int): number of groups for the GroupNorm + padding (int or tuple): add zero-padding added to all three sides of the input + dropout_prob (float): dropout probability + is3d (bool): is3d (bool): if True use Conv3d, otherwise use Conv2d + Return: + list of tuple (name, module) + """ + assert 'c' in order, "Conv layer MUST be present" + assert order[0] not in 'rle', 'Non-linearity cannot be the first operation in the layer' + + modules = [] + for i, char in enumerate(order): + if char == 'r': + modules.append(('ReLU', nn.ReLU(inplace=True))) + elif char == 'l': + modules.append(('LeakyReLU', nn.LeakyReLU(inplace=True))) + elif char == 'e': + modules.append(('ELU', nn.ELU(inplace=True))) + elif char == 'c': + # add learnable bias only in the absence of batchnorm/groupnorm + bias = not ('g' in order or 'b' in order) + if is3d: + conv = nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias) + else: + conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias) + + modules.append(('conv', conv)) + elif char == 'g': + is_before_conv = i < order.index('c') + if is_before_conv: + num_channels = in_channels + else: + num_channels = out_channels + + # use only one group if the given number of groups is greater than the number of channels + if num_channels < num_groups: + num_groups = 1 + + assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' + modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) + elif char == 'b': + is_before_conv = i < order.index('c') + if is3d: + bn = nn.BatchNorm3d + else: + bn = nn.BatchNorm2d + + if is_before_conv: + modules.append(('batchnorm', bn(in_channels))) + else: + modules.append(('batchnorm', bn(out_channels))) + elif char == 'd': + modules.append(('dropout', nn.Dropout(p=dropout_prob))) + elif char == 'D': + modules.append(('dropout2d', nn.Dropout2d(p=dropout_prob))) + else: + raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c', 'd', 'D']") + + return modules + + +class SingleConv(nn.Sequential): + """ + Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order + of operations can be specified via the `order` parameter + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int or tuple): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + padding (int or tuple): add zero-padding + dropout_prob (float): dropout probability, default 0.1 + is3d (bool): if True use Conv3d, otherwise use Conv2d + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, order='gcr', num_groups=8, + padding=1, dropout_prob=0.1, is3d=True): + super(SingleConv, self).__init__() + + for name, module in create_conv(in_channels, out_channels, kernel_size, order, + num_groups, padding, dropout_prob, is3d): + self.add_module(name, module) + + +class DoubleConv(nn.Sequential): + """ + A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). + We use (Conv3d+ReLU+GroupNorm3d) by default. + This can be changed however by providing the 'order' argument, e.g. in order + to change to Conv3d+BatchNorm3d+ELU use order='cbe'. + Use padded convolutions to make sure that the output (H_out, W_out) is the same + as (H_in, W_in), so that you don't have to crop in the decoder path. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + encoder (bool): if True we're in the encoder path, otherwise we're in the decoder + kernel_size (int or tuple): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + padding (int or tuple): add zero-padding added to all three sides of the input + upscale (int): number of the convolution to upscale in encoder if DoubleConv, default: 2 + dropout_prob (float or tuple): dropout probability for each convolution, default 0.1 + is3d (bool): if True use Conv3d instead of Conv2d layers + """ + + def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='gcr', + num_groups=8, padding=1, upscale=2, dropout_prob=0.1, is3d=True): + super(DoubleConv, self).__init__() + if encoder: + # we're in the encoder path + conv1_in_channels = in_channels + if upscale == 1: + conv1_out_channels = out_channels + else: + conv1_out_channels = out_channels // 2 + if conv1_out_channels < in_channels: + conv1_out_channels = in_channels + conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels + else: + # we're in the decoder path, decrease the number of channels in the 1st convolution + conv1_in_channels, conv1_out_channels = in_channels, out_channels + conv2_in_channels, conv2_out_channels = out_channels, out_channels + + # check if dropout_prob is a tuple and if so + # split it for different dropout probabilities for each convolution. + if isinstance(dropout_prob, list) or isinstance(dropout_prob, tuple): + dropout_prob1 = dropout_prob[0] + dropout_prob2 = dropout_prob[1] + else: + dropout_prob1 = dropout_prob2 = dropout_prob + + # conv1 + self.add_module('SingleConv1', + SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups, + padding=padding, dropout_prob=dropout_prob1, is3d=is3d)) + # conv2 + self.add_module('SingleConv2', + SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups, + padding=padding, dropout_prob=dropout_prob2, is3d=is3d)) + + +class ResNetBlock(nn.Module): + """ + Residual block that can be used instead of standard DoubleConv in the Encoder module. + Motivated by: https://arxiv.org/pdf/1706.00120.pdf + + Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, order='cge', num_groups=8, is3d=True, **kwargs): + super(ResNetBlock, self).__init__() + + if in_channels != out_channels: + # conv1x1 for increasing the number of channels + if is3d: + self.conv1 = nn.Conv3d(in_channels, out_channels, 1) + else: + self.conv1 = nn.Conv2d(in_channels, out_channels, 1) + else: + self.conv1 = nn.Identity() + + self.conv2 = SingleConv(in_channels, out_channels, kernel_size=kernel_size, order=order, num_groups=num_groups, + is3d=is3d) + # remove non-linearity from the 3rd convolution since it's going to be applied after adding the residual + n_order = order + for c in 'rel': + n_order = n_order.replace(c, '') + self.conv3 = SingleConv(out_channels, out_channels, kernel_size=kernel_size, order=n_order, + num_groups=num_groups, is3d=is3d) + + # create non-linearity separately + if 'l' in order: + self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) + elif 'e' in order: + self.non_linearity = nn.ELU(inplace=True) + else: + self.non_linearity = nn.ReLU(inplace=True) + + def forward(self, x): + # apply first convolution to bring the number of channels to out_channels + residual = self.conv1(x) + + out = self.conv2(x) + out = self.conv3(out) + + out += residual + out = self.non_linearity(out) + + return out + +class Encoder(nn.Module): + """ + A single module from the encoder path consisting of the optional max + pooling layer (one may specify the MaxPool kernel_size to be different + from the standard (2,2,2), e.g. if the volumetric data is anisotropic + (make sure to use complementary scale_factor in the decoder path) followed by + a basic module (DoubleConv or ResNetBlock). + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + conv_kernel_size (int or tuple): size of the convolving kernel + apply_pooling (bool): if True use MaxPool3d before DoubleConv + pool_kernel_size (int or tuple): the size of the window + pool_type (str): pooling layer: 'max' or 'avg' + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + padding (int or tuple): add zero-padding added to all three sides of the input + upscale (int): number of the convolution to upscale in encoder if DoubleConv, default: 2 + dropout_prob (float or tuple): dropout probability, default 0.1 + is3d (bool): use 3d or 2d convolutions/pooling operation + """ + + def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, + pool_kernel_size=2, pool_type='max', basic_module=DoubleConv, conv_layer_order='gcr', + num_groups=8, padding=1, upscale=2, dropout_prob=0.1, is3d=True): + super(Encoder, self).__init__() + assert pool_type in ['max', 'avg'] + if apply_pooling: + if pool_type == 'max': + if is3d: + self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size) + else: + self.pooling = nn.MaxPool2d(kernel_size=pool_kernel_size) + else: + if is3d: + self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size) + else: + self.pooling = nn.AvgPool2d(kernel_size=pool_kernel_size) + else: + self.pooling = None + + self.basic_module = basic_module(in_channels, out_channels, + encoder=True, + kernel_size=conv_kernel_size, + order=conv_layer_order, + num_groups=num_groups, + padding=padding, + upscale=upscale, + dropout_prob=dropout_prob, + is3d=is3d) + + def forward(self, x): + if self.pooling is not None: + x = self.pooling(x) + x = self.basic_module(x) + return x + + +class Decoder(nn.Module): + """ + A single module for decoder path consisting of the upsampling layer + (either learned ConvTranspose3d or nearest neighbor interpolation) + followed by a basic module (DoubleConv or ResNetBlock). + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + conv_kernel_size (int or tuple): size of the convolving kernel + scale_factor (int or tuple): used as the multiplier for the image H/W/D in + case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation + from the corresponding encoder + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + padding (int or tuple): add zero-padding added to all three sides of the input + upsample (str): algorithm used for upsampling: + InterpolateUpsampling: 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area' + TransposeConvUpsampling: 'deconv' + No upsampling: None + Default: 'default' (chooses automatically) + dropout_prob (float or tuple): dropout probability, default 0.1 + """ + + def __init__(self, in_channels, out_channels, conv_kernel_size=3, scale_factor=2, basic_module=DoubleConv, + conv_layer_order='gcr', num_groups=8, padding=1, upsample='default', + dropout_prob=0.1, is3d=True): + super(Decoder, self).__init__() + + # perform concat joining per default + concat = True + + # don't adapt channels after join operation + adapt_channels = False + + if upsample is not None and upsample != 'none': + if upsample == 'default': + if basic_module == DoubleConv: + upsample = 'nearest' # use nearest neighbor interpolation for upsampling + concat = True # use concat joining + adapt_channels = False # don't adapt channels + elif basic_module == ResNetBlock: #or basic_module == ResNetBlockSE: + upsample = 'deconv' # use deconvolution upsampling + concat = False # use summation joining + adapt_channels = True # adapt channels after joining + + # perform deconvolution upsampling if mode is deconv + if upsample == 'deconv': + self.upsampling = TransposeConvUpsampling(in_channels=in_channels, out_channels=out_channels, + kernel_size=conv_kernel_size, scale_factor=scale_factor, + is3d=is3d) + else: + self.upsampling = InterpolateUpsampling(mode=upsample) + else: + # no upsampling + self.upsampling = NoUpsampling() + # concat joining + self.joining = partial(self._joining, concat=True) + + # perform joining operation + self.joining = partial(self._joining, concat=concat) + + # adapt the number of in_channels for the ResNetBlock + if adapt_channels is True: + in_channels = out_channels + + self.basic_module = basic_module(in_channels, out_channels, + encoder=False, + kernel_size=conv_kernel_size, + order=conv_layer_order, + num_groups=num_groups, + padding=padding, + dropout_prob=dropout_prob, + is3d=is3d) + + def forward(self, encoder_features, x): + x = self.upsampling(encoder_features=encoder_features, x=x) + x = self.joining(encoder_features, x) + x = self.basic_module(x) + return x + + @staticmethod + def _joining(encoder_features, x, concat): + if concat: + return torch.cat((encoder_features, x), dim=1) + else: + return encoder_features + x + + +def create_encoders(in_channels, f_maps, basic_module, conv_kernel_size, conv_padding, + conv_upscale, dropout_prob, + layer_order, num_groups, pool_kernel_size, is3d): + # create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)` + encoders = [] + for i, out_feature_num in enumerate(f_maps): + if i == 0: + # apply conv_coord only in the first encoder if any + encoder = Encoder(in_channels, out_feature_num, + apply_pooling=False, # skip pooling in the firs encoder + basic_module=basic_module, + conv_layer_order=layer_order, + conv_kernel_size=conv_kernel_size, + num_groups=num_groups, + padding=conv_padding, + upscale=conv_upscale, + dropout_prob=dropout_prob, + is3d=is3d) + else: + encoder = Encoder(f_maps[i - 1], out_feature_num, + basic_module=basic_module, + conv_layer_order=layer_order, + conv_kernel_size=conv_kernel_size, + num_groups=num_groups, + pool_kernel_size=pool_kernel_size, + padding=conv_padding, + upscale=conv_upscale, + dropout_prob=dropout_prob, + is3d=is3d) + + encoders.append(encoder) + + return nn.ModuleList(encoders) + + +def create_decoders(f_maps, basic_module, conv_kernel_size, conv_padding, layer_order, + num_groups, upsample, dropout_prob, is3d): + # create decoder path consisting of the Decoder modules. The length of the decoder list is equal to `len(f_maps) - 1` + decoders = [] + reversed_f_maps = list(reversed(f_maps[1:])) + for i in range(len(reversed_f_maps) - 1): + if basic_module == DoubleConv and upsample != 'deconv': + in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1] + else: + in_feature_num = reversed_f_maps[i] + + out_feature_num = reversed_f_maps[i + 1] + + decoder = Decoder(in_feature_num, out_feature_num, + basic_module=basic_module, + conv_layer_order=layer_order, + conv_kernel_size=conv_kernel_size, + num_groups=num_groups, + padding=conv_padding, + upsample=upsample, + dropout_prob=dropout_prob, + is3d=is3d) + decoders.append(decoder) + return nn.ModuleList(decoders) + + +class AbstractUpsampling(nn.Module): + """ + Abstract class for upsampling. A given implementation should upsample a given 5D input tensor using either + interpolation or learned transposed convolution. + """ + + def __init__(self, upsample): + super(AbstractUpsampling, self).__init__() + self.upsample = upsample + + def forward(self, encoder_features, x): + # get the spatial dimensions of the output given the encoder_features + output_size = encoder_features.size()[2:] + # upsample the input and return + return self.upsample(x, output_size) + + +class InterpolateUpsampling(AbstractUpsampling): + """ + Args: + mode (str): algorithm used for upsampling: + 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'. Default: 'nearest' + used only if transposed_conv is False + """ + + def __init__(self, mode='nearest'): + upsample = partial(self._interpolate, mode=mode) + super().__init__(upsample) + + @staticmethod + def _interpolate(x, size, mode): + return F.interpolate(x, size=size, mode=mode) + + +class TransposeConvUpsampling(AbstractUpsampling): + """ + Args: + in_channels (int): number of input channels for transposed conv + used only if transposed_conv is True + out_channels (int): number of output channels for transpose conv + used only if transposed_conv is True + kernel_size (int or tuple): size of the convolving kernel + used only if transposed_conv is True + scale_factor (int or tuple): stride of the convolution + used only if transposed_conv is True + is3d (bool): if True use ConvTranspose3d, otherwise use ConvTranspose2d + """ + + class Upsample(nn.Module): + """ + Workaround the 'ValueError: requested an output size...' in the `_output_padding` method in + transposed convolution. It performs transposed conv followed by the interpolation to the correct size if necessary. + """ + + def __init__(self, conv_transposed, is3d): + super().__init__() + self.conv_transposed = conv_transposed + self.is3d = is3d + + def forward(self, x, size): + x = self.conv_transposed(x) + return F.interpolate(x, size=size) + + def __init__(self, in_channels, out_channels, kernel_size=3, scale_factor=2, is3d=True): + # make sure that the output size reverses the MaxPool3d from the corresponding encoder + if is3d is True: + conv_transposed = nn.ConvTranspose3d(in_channels, out_channels, kernel_size=kernel_size, + stride=scale_factor, padding=1, bias=False) + else: + conv_transposed = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, + stride=scale_factor, padding=1, bias=False) + upsample = self.Upsample(conv_transposed, is3d) + super().__init__(upsample) + + +class NoUpsampling(AbstractUpsampling): + def __init__(self): + super().__init__(self._no_upsampling) + + @staticmethod + def _no_upsampling(x, size): + return x \ No newline at end of file diff --git a/third_party/PartField/partfield/model/UNet/model.py b/third_party/PartField/partfield/model/UNet/model.py new file mode 100644 index 0000000000000000000000000000000000000000..db20b2f5de3d37a52f7465450f915e003ef412d6 --- /dev/null +++ b/third_party/PartField/partfield/model/UNet/model.py @@ -0,0 +1,170 @@ +# https://github.com/wolny/pytorch-3dunet/blob/master/pytorch3dunet/unet3d/buildingblocks.py +# MIT License + +# Copyright (c) 2018 Adrian Wolny + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch.nn as nn + +from partfield.model.UNet.buildingblocks import DoubleConv, ResNetBlock, \ + create_decoders, create_encoders + +def number_of_features_per_level(init_channel_number, num_levels): + return [init_channel_number * 2 ** k for k in range(num_levels)] + +class AbstractUNet(nn.Module): + """ + Base class for standard and residual UNet. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class) + or BCEWithLogitsLoss (two-class) respectively) + f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number + of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4 + final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the final 1x1 convolution, + otherwise apply nn.Softmax. In effect only if `self.training == False`, i.e. during validation/testing + basic_module: basic model for the encoder/decoder (DoubleConv, ResNetBlock, ....) + layer_order (string): determines the order of layers in `SingleConv` module. + E.g. 'crg' stands for GroupNorm3d+Conv3d+ReLU. See `SingleConv` for more info + num_groups (int): number of groups for the GroupNorm + num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int) + default: 4 + is_segmentation (bool): if True and the model is in eval mode, Sigmoid/Softmax normalization is applied + after the final convolution; if False (regression problem) the normalization layer is skipped + conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module + pool_kernel_size (int or tuple): the size of the window + conv_padding (int or tuple): add zero-padding added to all three sides of the input + conv_upscale (int): number of the convolution to upscale in encoder if DoubleConv, default: 2 + upsample (str): algorithm used for decoder upsampling: + InterpolateUpsampling: 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area' + TransposeConvUpsampling: 'deconv' + No upsampling: None + Default: 'default' (chooses automatically) + dropout_prob (float or tuple): dropout probability, default: 0.1 + is3d (bool): if True the model is 3D, otherwise 2D, default: True + """ + + def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr', + num_groups=8, num_levels=4, is_segmentation=False, conv_kernel_size=3, pool_kernel_size=2, + conv_padding=1, conv_upscale=2, upsample='default', dropout_prob=0.1, is3d=True, encoder_only=False): + super(AbstractUNet, self).__init__() + + if isinstance(f_maps, int): + f_maps = number_of_features_per_level(f_maps, num_levels=num_levels) + + assert isinstance(f_maps, list) or isinstance(f_maps, tuple) + assert len(f_maps) > 1, "Required at least 2 levels in the U-Net" + if 'g' in layer_order: + assert num_groups is not None, "num_groups must be specified if GroupNorm is used" + + # create encoder path + self.encoders = create_encoders(in_channels, f_maps, basic_module, conv_kernel_size, + conv_padding, conv_upscale, dropout_prob, + layer_order, num_groups, pool_kernel_size, is3d) + + self.encoder_only = encoder_only + + if encoder_only == False: + # create decoder path + self.decoders = create_decoders(f_maps, basic_module, conv_kernel_size, conv_padding, + layer_order, num_groups, upsample, dropout_prob, + is3d) + + # in the last layer a 1ร—1 convolution reduces the number of output channels to the number of labels + if is3d: + self.final_conv = nn.Conv3d(f_maps[1], out_channels, 1) + else: + self.final_conv = nn.Conv2d(f_maps[1], out_channels, 1) + + if is_segmentation: + # semantic segmentation problem + if final_sigmoid: + self.final_activation = nn.Sigmoid() + else: + self.final_activation = nn.Softmax(dim=1) + else: + # regression problem + self.final_activation = None + + def forward(self, x, return_bottleneck_feat=False): + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + bottleneck_feat = encoders_features[0] + if self.encoder_only: + return bottleneck_feat + else: + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + x = self.final_conv(x) + # During training the network outputs logits + if self.final_activation is not None: + x = self.final_activation(x) + + if return_bottleneck_feat: + return x, bottleneck_feat + else: + return x + +class ResidualUNet3D(AbstractUNet): + """ + Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf. + Uses ResNetBlock as a basic building block, summation joining instead + of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts). + Since the model effectively becomes a residual net, in theory it allows for deeper UNet. + """ + + def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=(8, 16, 64, 256, 1024), layer_order='gcr', + num_groups=8, num_levels=5, is_segmentation=True, conv_padding=1, + conv_upscale=2, upsample='default', dropout_prob=0.1, encoder_only=False, **kwargs): + super(ResidualUNet3D, self).__init__(in_channels=in_channels, + out_channels=out_channels, + final_sigmoid=final_sigmoid, + basic_module=ResNetBlock, + f_maps=f_maps, + layer_order=layer_order, + num_groups=num_groups, + num_levels=num_levels, + is_segmentation=is_segmentation, + conv_padding=conv_padding, + conv_upscale=conv_upscale, + upsample=upsample, + dropout_prob=dropout_prob, + encoder_only=encoder_only, + is3d=True) + + diff --git a/third_party/PartField/partfield/model/model_utils.py b/third_party/PartField/partfield/model/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1cc16bd96d21b69f21998e577f7f5f970d25cf3 --- /dev/null +++ b/third_party/PartField/partfield/model/model_utils.py @@ -0,0 +1,54 @@ +import torch +import torch.nn as nn + +class VanillaMLP(nn.Module): + def __init__(self, input_dim, output_dim, out_activation, n_hidden_layers=4, n_neurons=64, activation="ReLU"): + super().__init__() + self.n_neurons = n_neurons + self.n_hidden_layers = n_hidden_layers + self.activation = activation + self.out_activation = out_activation + layers = [ + self.make_linear(input_dim, self.n_neurons, is_first=True, is_last=False), + self.make_activation(), + ] + for i in range(self.n_hidden_layers - 1): + layers += [ + self.make_linear( + self.n_neurons, self.n_neurons, is_first=False, is_last=False + ), + self.make_activation(), + ] + layers += [ + self.make_linear(self.n_neurons, output_dim, is_first=False, is_last=True) + ] + if self.out_activation == "sigmoid": + layers += [nn.Sigmoid()] + elif self.out_activation == "tanh": + layers += [nn.Tanh()] + elif self.out_activation == "hardtanh": + layers += [nn.Hardtanh()] + elif self.out_activation == "GELU": + layers += [nn.GELU()] + elif self.out_activation == "RELU": + layers += [nn.ReLU()] + else: + raise NotImplementedError + self.layers = nn.Sequential(*layers) + + def forward(self, x, split_size=100000): + with torch.cuda.amp.autocast(enabled=False): + out = self.layers(x) + return out + + def make_linear(self, dim_in, dim_out, is_first, is_last): + layer = nn.Linear(dim_in, dim_out, bias=False) + return layer + + def make_activation(self): + if self.activation == "ReLU": + return nn.ReLU(inplace=True) + elif self.activation == "GELU": + return nn.GELU() + else: + raise NotImplementedError \ No newline at end of file diff --git a/third_party/PartField/partfield/model/triplane.py b/third_party/PartField/partfield/model/triplane.py new file mode 100644 index 0000000000000000000000000000000000000000..6274a8398d248d3ba4a5a4734c7b1bc90d596b10 --- /dev/null +++ b/third_party/PartField/partfield/model/triplane.py @@ -0,0 +1,331 @@ +#https://github.com/3DTopia/OpenLRM/blob/main/openlrm/models/modeling_lrm.py +# Copyright (c) 2023-2024, Zexin He +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from functools import partial + +def project_onto_planes(planes, coordinates): + """ + Does a projection of a 3D point onto a batch of 2D planes, + returning 2D plane coordinates. + + Takes plane axes of shape n_planes, 3, 3 + # Takes coordinates of shape N, M, 3 + # returns projections of shape N*n_planes, M, 2 + """ + N, M, C = coordinates.shape + n_planes, _, _ = planes.shape + coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3) + inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3) + projections = torch.bmm(coordinates, inv_planes) + return projections[..., :2] + +def sample_from_planes(plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None): + plane_axes = torch.tensor([[[1, 0, 0], + [0, 1, 0], + [0, 0, 1]], + [[1, 0, 0], + [0, 0, 1], + [0, 1, 0]], + [[0, 0, 1], + [0, 1, 0], + [1, 0, 0]]], dtype=torch.float32).cuda() + + assert padding_mode == 'zeros' + N, n_planes, C, H, W = plane_features.shape + _, M, _ = coordinates.shape + plane_features = plane_features.view(N*n_planes, C, H, W) + + projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1) + output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C) + return output_features + +def get_grid_coord(grid_size = 256, align_corners=False): + if align_corners == False: + coords = torch.linspace(-1 + 1/(grid_size), 1 - 1/(grid_size), steps=grid_size) + else: + coords = torch.linspace(-1, 1, steps=grid_size) + i, j, k = torch.meshgrid(coords, coords, coords, indexing='ij') + coordinates = torch.stack((i, j, k), dim=-1).reshape(-1, 3) + return coordinates + +class BasicBlock(nn.Module): + """ + Transformer block that is in its simplest form. + Designed for PF-LRM architecture. + """ + # Block contains a self-attention layer and an MLP + def __init__(self, inner_dim: int, num_heads: int, eps: float, + attn_drop: float = 0., attn_bias: bool = False, + mlp_ratio: float = 4., mlp_drop: float = 0.): + super().__init__() + self.norm1 = nn.LayerNorm(inner_dim, eps=eps) + self.self_attn = nn.MultiheadAttention( + embed_dim=inner_dim, num_heads=num_heads, + dropout=attn_drop, bias=attn_bias, batch_first=True) + self.norm2 = nn.LayerNorm(inner_dim, eps=eps) + self.mlp = nn.Sequential( + nn.Linear(inner_dim, int(inner_dim * mlp_ratio)), + nn.GELU(), + nn.Dropout(mlp_drop), + nn.Linear(int(inner_dim * mlp_ratio), inner_dim), + nn.Dropout(mlp_drop), + ) + + def forward(self, x): + # x: [N, L, D] + before_sa = self.norm1(x) + x = x + self.self_attn(before_sa, before_sa, before_sa, need_weights=False)[0] + x = x + self.mlp(self.norm2(x)) + return x + +class ConditionBlock(nn.Module): + """ + Transformer block that takes in a cross-attention condition. + Designed for SparseLRM architecture. + """ + # Block contains a cross-attention layer, a self-attention layer, and an MLP + def __init__(self, inner_dim: int, cond_dim: int, num_heads: int, eps: float, + attn_drop: float = 0., attn_bias: bool = False, + mlp_ratio: float = 4., mlp_drop: float = 0.): + super().__init__() + self.norm1 = nn.LayerNorm(inner_dim, eps=eps) + self.cross_attn = nn.MultiheadAttention( + embed_dim=inner_dim, num_heads=num_heads, kdim=cond_dim, vdim=cond_dim, + dropout=attn_drop, bias=attn_bias, batch_first=True) + self.norm2 = nn.LayerNorm(inner_dim, eps=eps) + self.self_attn = nn.MultiheadAttention( + embed_dim=inner_dim, num_heads=num_heads, + dropout=attn_drop, bias=attn_bias, batch_first=True) + self.norm3 = nn.LayerNorm(inner_dim, eps=eps) + self.mlp = nn.Sequential( + nn.Linear(inner_dim, int(inner_dim * mlp_ratio)), + nn.GELU(), + nn.Dropout(mlp_drop), + nn.Linear(int(inner_dim * mlp_ratio), inner_dim), + nn.Dropout(mlp_drop), + ) + + def forward(self, x, cond): + # x: [N, L, D] + # cond: [N, L_cond, D_cond] + x = x + self.cross_attn(self.norm1(x), cond, cond, need_weights=False)[0] + before_sa = self.norm2(x) + x = x + self.self_attn(before_sa, before_sa, before_sa, need_weights=False)[0] + x = x + self.mlp(self.norm3(x)) + return x + +class TransformerDecoder(nn.Module): + def __init__(self, block_type: str, + num_layers: int, num_heads: int, + inner_dim: int, cond_dim: int = None, + eps: float = 1e-6): + super().__init__() + self.block_type = block_type + self.layers = nn.ModuleList([ + self._block_fn(inner_dim, cond_dim)( + num_heads=num_heads, + eps=eps, + ) + for _ in range(num_layers) + ]) + self.norm = nn.LayerNorm(inner_dim, eps=eps) + + @property + def block_type(self): + return self._block_type + + @block_type.setter + def block_type(self, block_type): + assert block_type in ['cond', 'basic'], \ + f"Unsupported block type: {block_type}" + self._block_type = block_type + + def _block_fn(self, inner_dim, cond_dim): + assert inner_dim is not None, f"inner_dim must always be specified" + if self.block_type == 'basic': + return partial(BasicBlock, inner_dim=inner_dim) + elif self.block_type == 'cond': + assert cond_dim is not None, f"Condition dimension must be specified for ConditionBlock" + return partial(ConditionBlock, inner_dim=inner_dim, cond_dim=cond_dim) + else: + raise ValueError(f"Unsupported block type during runtime: {self.block_type}") + + + def forward_layer(self, layer: nn.Module, x: torch.Tensor, cond: torch.Tensor,): + if self.block_type == 'basic': + return layer(x) + elif self.block_type == 'cond': + return layer(x, cond) + else: + raise NotImplementedError + + def forward(self, x: torch.Tensor, cond: torch.Tensor = None): + # x: [N, L, D] + # cond: [N, L_cond, D_cond] or None + for layer in self.layers: + x = self.forward_layer(layer, x, cond) + x = self.norm(x) + return x + +class Voxel2Triplane(nn.Module): + """ + Full model of the basic single-view large reconstruction model. + """ + def __init__(self, transformer_dim: int, transformer_layers: int, transformer_heads: int, + triplane_low_res: int, triplane_high_res: int, triplane_dim: int, voxel_feat_dim: int, normalize_vox_feat=False, voxel_dim=16): + super().__init__() + + # attributes + self.triplane_low_res = triplane_low_res + self.triplane_high_res = triplane_high_res + self.triplane_dim = triplane_dim + self.voxel_feat_dim = voxel_feat_dim + + # initialize pos_embed with 1/sqrt(dim) * N(0, 1) + self.pos_embed = nn.Parameter(torch.randn(1, 3*triplane_low_res**2, transformer_dim) * (1. / transformer_dim) ** 0.5) + self.transformer = TransformerDecoder( + block_type='cond', + num_layers=transformer_layers, num_heads=transformer_heads, + inner_dim=transformer_dim, cond_dim=voxel_feat_dim + ) + self.upsampler = nn.ConvTranspose2d(transformer_dim, triplane_dim, kernel_size=8, stride=8, padding=0) + + self.normalize_vox_feat = normalize_vox_feat + if normalize_vox_feat: + self.vox_norm = nn.LayerNorm(voxel_feat_dim, eps=1e-6) + self.vox_pos_embed = nn.Parameter(torch.randn(1, voxel_dim * voxel_dim * voxel_dim, voxel_feat_dim) * (1. / voxel_feat_dim) ** 0.5) + + def forward_transformer(self, voxel_feats): + N = voxel_feats.shape[0] + x = self.pos_embed.repeat(N, 1, 1) # [N, L, D] + if self.normalize_vox_feat: + vox_pos_embed = self.vox_pos_embed.repeat(N, 1, 1) # [N, L, D] + voxel_feats = self.vox_norm(voxel_feats + vox_pos_embed) + x = self.transformer( + x, + cond=voxel_feats + ) + return x + + def reshape_upsample(self, tokens): + N = tokens.shape[0] + H = W = self.triplane_low_res + x = tokens.view(N, 3, H, W, -1) + x = torch.einsum('nihwd->indhw', x) # [3, N, D, H, W] + x = x.contiguous().view(3*N, -1, H, W) # [3*N, D, H, W] + x = self.upsampler(x) # [3*N, D', H', W'] + x = x.view(3, N, *x.shape[-3:]) # [3, N, D', H', W'] + x = torch.einsum('indhw->nidhw', x) # [N, 3, D', H', W'] + x = x.contiguous() + return x + + def forward(self, voxel_feats): + N = voxel_feats.shape[0] + + # encode image + assert voxel_feats.shape[-1] == self.voxel_feat_dim, \ + f"Feature dimension mismatch: {voxel_feats.shape[-1]} vs {self.voxel_feat_dim}" + + # transformer generating planes + tokens = self.forward_transformer(voxel_feats) + planes = self.reshape_upsample(tokens) + assert planes.shape[0] == N, "Batch size mismatch for planes" + assert planes.shape[1] == 3, "Planes should have 3 channels" + + return planes + + +class TriplaneTransformer(nn.Module): + """ + Full model of the basic single-view large reconstruction model. + """ + def __init__(self, input_dim: int, transformer_dim: int, transformer_layers: int, transformer_heads: int, + triplane_low_res: int, triplane_high_res: int, triplane_dim: int): + super().__init__() + + # attributes + self.triplane_low_res = triplane_low_res + self.triplane_high_res = triplane_high_res + self.triplane_dim = triplane_dim + + # initialize pos_embed with 1/sqrt(dim) * N(0, 1) + self.pos_embed = nn.Parameter(torch.randn(1, 3*triplane_low_res**2, transformer_dim) * (1. / transformer_dim) ** 0.5) + self.transformer = TransformerDecoder( + block_type='basic', + num_layers=transformer_layers, num_heads=transformer_heads, + inner_dim=transformer_dim, + ) + + self.downsampler = nn.Sequential( + nn.Conv2d(input_dim, transformer_dim, kernel_size=3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2d(kernel_size=2, stride=2), # Reduces size from 128x128 to 64x64 + + nn.Conv2d(transformer_dim, transformer_dim, kernel_size=3, stride=1, padding=1), + nn.ReLU(), + nn.MaxPool2d(kernel_size=2, stride=2), # Reduces size from 64x64 to 32x32 + ) + + self.upsampler = nn.ConvTranspose2d(transformer_dim, triplane_dim, kernel_size=4, stride=4, padding=0) + + self.mlp = nn.Sequential( + nn.Linear(input_dim, triplane_dim), + nn.ReLU(), + nn.Linear(triplane_dim, triplane_dim) + ) + + def forward_transformer(self, triplanes): + N = triplanes.shape[0] + tokens = torch.einsum('nidhw->nihwd', triplanes).reshape(N, self.pos_embed.shape[1], -1) # [N, L, D] + x = self.pos_embed.repeat(N, 1, 1) + tokens # [N, L, D] + x = self.transformer(x) + return x + + def reshape_downsample(self, triplanes): + N = triplanes.shape[0] + H = W = self.triplane_high_res + x = triplanes.view(N, 3, -1, H, W) + x = torch.einsum('nidhw->indhw', x) # [3, N, D, H, W] + x = x.contiguous().view(3*N, -1, H, W) # [3*N, D, H, W] + x = self.downsampler(x) # [3*N, D', H', W'] + x = x.view(3, N, *x.shape[-3:]) # [3, N, D', H', W'] + x = torch.einsum('indhw->nidhw', x) # [N, 3, D', H', W'] + x = x.contiguous() + return x + + def reshape_upsample(self, tokens): + N = tokens.shape[0] + H = W = self.triplane_low_res + x = tokens.view(N, 3, H, W, -1) + x = torch.einsum('nihwd->indhw', x) # [3, N, D, H, W] + x = x.contiguous().view(3*N, -1, H, W) # [3*N, D, H, W] + x = self.upsampler(x) # [3*N, D', H', W'] + x = x.view(3, N, *x.shape[-3:]) # [3, N, D', H', W'] + x = torch.einsum('indhw->nidhw', x) # [N, 3, D', H', W'] + x = x.contiguous() + return x + + def forward(self, triplanes): + downsampled_triplanes = self.reshape_downsample(triplanes) + tokens = self.forward_transformer(downsampled_triplanes) + residual = self.reshape_upsample(tokens) + + triplanes = triplanes.permute(0, 1, 3, 4, 2).contiguous() + triplanes = self.mlp(triplanes) + triplanes = triplanes.permute(0, 1, 4, 2, 3).contiguous() + planes = triplanes + residual + return planes diff --git a/third_party/PartField/partfield/model_trainer_pvcnn_only_demo.py b/third_party/PartField/partfield/model_trainer_pvcnn_only_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..f78a0c3b965117bbdf853d49c2c0a3aa4b16398c --- /dev/null +++ b/third_party/PartField/partfield/model_trainer_pvcnn_only_demo.py @@ -0,0 +1,117 @@ +import torch +import lightning.pytorch as pl +import torch.nn as nn +import os +import trimesh +import numpy as np + +from torch.utils.data import DataLoader + +from third_party.PartField.partfield.model.PVCNN.encoder_pc import TriPlanePC2Encoder, sample_triplane_feat +from third_party.PartField.partfield.model.triplane import TriplaneTransformer, get_grid_coord #, sample_from_planes, Voxel2Triplane +from third_party.PartField.partfield.model.model_utils import VanillaMLP +from third_party.PartField.partfield.dataloader import Demo_Dataset + +class Model(pl.LightningModule): + def __init__(self, cfg, obj_path): + super().__init__() + self.obj_path = obj_path + + self.save_hyperparameters() + self.cfg = cfg + self.automatic_optimization = False + self.triplane_resolution = cfg.triplane_resolution + self.triplane_channels_low = cfg.triplane_channels_low + self.triplane_transformer = TriplaneTransformer( + input_dim=cfg.triplane_channels_low * 2, + transformer_dim=1024, + transformer_layers=6, + transformer_heads=8, + triplane_low_res=32, + triplane_high_res=128, + triplane_dim=cfg.triplane_channels_high, + ) + self.sdf_decoder = VanillaMLP(input_dim=64, + output_dim=1, + out_activation="tanh", + n_neurons=64, #64 + n_hidden_layers=6) #6 + self.use_pvcnn = cfg.use_pvcnnonly + self.use_2d_feat = cfg.use_2d_feat + if self.use_pvcnn: + self.pvcnn = TriPlanePC2Encoder( + cfg.pvcnn, + device="cuda", + shape_min=-1, + shape_length=2, + use_2d_feat=self.use_2d_feat) #.cuda() + self.logit_scale = nn.Parameter(torch.tensor([1.0], requires_grad=True)) + self.grid_coord = get_grid_coord(256) + self.mse_loss = torch.nn.MSELoss() + self.l1_loss = torch.nn.L1Loss(reduction='none') + + if cfg.regress_2d_feat: + self.feat_decoder = VanillaMLP(input_dim=64, + output_dim=192, + out_activation="GELU", + n_neurons=64, #64 + n_hidden_layers=6) #6 + + def predict_dataloader(self): + dataset = Demo_Dataset(self.obj_path) + dataloader = DataLoader(dataset, + num_workers=self.cfg.dataset.val_num_workers, + batch_size=self.cfg.dataset.val_batch_size, + shuffle=False, + pin_memory=True, + drop_last=False) + + return dataloader + + @torch.no_grad() + def predict_step(self, batch, batch_idx): + N = batch['pc'].shape[0] + + assert N == 1 + + pc_feat = self.pvcnn(batch['pc'], batch['pc']) + + planes = pc_feat + planes = self.triplane_transformer(planes) + sdf_planes, part_planes = torch.split(planes, [64, planes.shape[2] - 64], dim=2) + + def sample_points(vertices, faces, n_point_per_face): + # Generate random barycentric coordinates + # borrowed from Kaolin https://github.com/NVIDIAGameWorks/kaolin/blob/master/kaolin/ops/mesh/trianglemesh.py#L43 + n_f = faces.shape[0] + u = torch.sqrt(torch.rand((n_f, n_point_per_face, 1), + device=vertices.device, + dtype=vertices.dtype)) + v = torch.rand((n_f, n_point_per_face, 1), + device=vertices.device, + dtype=vertices.dtype) + w0 = 1 - u + w1 = u * (1 - v) + w2 = u * v + + face_v_0 = torch.index_select(vertices, 0, faces[:, 0].reshape(-1)) + face_v_1 = torch.index_select(vertices, 0, faces[:, 1].reshape(-1)) + face_v_2 = torch.index_select(vertices, 0, faces[:, 2].reshape(-1)) + points = w0 * face_v_0.unsqueeze(dim=1) + w1 * face_v_1.unsqueeze(dim=1) + w2 * face_v_2.unsqueeze(dim=1) + return points + + def sample_and_mean_memory_save_version(part_planes, tensor_vertices, n_point_per_face): + n_sample_each = self.cfg.n_sample_each # we iterate over this to avoid OOM + n_v = tensor_vertices.shape[1] + n_sample = n_v // n_sample_each + 1 + all_sample = [] + for i_sample in range(n_sample): + sampled_feature = sample_triplane_feat(part_planes, tensor_vertices[:, i_sample * n_sample_each: i_sample * n_sample_each + n_sample_each,]) + assert sampled_feature.shape[1] % n_point_per_face == 0 + sampled_feature = sampled_feature.reshape(1, -1, n_point_per_face, sampled_feature.shape[-1]) + sampled_feature = torch.mean(sampled_feature, axis=-2) + all_sample.append(sampled_feature) + return torch.cat(all_sample, dim=1) + + part_planes = part_planes.cpu().numpy() + return part_planes, batch['uid'][0] \ No newline at end of file diff --git a/third_party/PartField/partfield/utils.py b/third_party/PartField/partfield/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc176434f91bd753f2daee6cc7d3c4fd7ce163b --- /dev/null +++ b/third_party/PartField/partfield/utils.py @@ -0,0 +1,5 @@ +import trimesh + +def load_mesh_util(input_fname): + mesh = trimesh.load(input_fname, force='mesh', process=False) + return mesh \ No newline at end of file diff --git a/third_party/TRELLIS/dataset_toolkits/blender_script/io_scene_usdz.zip b/third_party/TRELLIS/dataset_toolkits/blender_script/io_scene_usdz.zip new file mode 100644 index 0000000000000000000000000000000000000000..153a9d97245cfbf98c2c53869987d82f1396922e --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/blender_script/io_scene_usdz.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec07ab6125fe0a021ed08c64169eceda126330401aba3d494d5203d26ac4b093 +size 34685 diff --git a/third_party/TRELLIS/dataset_toolkits/blender_script/render.py b/third_party/TRELLIS/dataset_toolkits/blender_script/render.py new file mode 100644 index 0000000000000000000000000000000000000000..ba0010b0840b6fc7027e9a0d37e6b865a90267e3 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/blender_script/render.py @@ -0,0 +1,564 @@ +import argparse, sys, os, math, re, glob +from typing import * +import bpy +from mathutils import Vector, Matrix +import numpy as np +import json +import glob + + +"""=============== BLENDER ===============""" + +IMPORT_FUNCTIONS: Dict[str, Callable] = { + "obj": bpy.ops.import_scene.obj, + "glb": bpy.ops.import_scene.gltf, + "gltf": bpy.ops.import_scene.gltf, + "usd": bpy.ops.import_scene.usd, + "fbx": bpy.ops.import_scene.fbx, + "stl": bpy.ops.import_mesh.stl, + "usda": bpy.ops.import_scene.usda, + "dae": bpy.ops.wm.collada_import, + "ply": bpy.ops.import_mesh.ply, + "abc": bpy.ops.wm.alembic_import, + "blend": bpy.ops.wm.append, +} + +EXT = { + 'PNG': 'png', + 'JPEG': 'jpg', + 'OPEN_EXR': 'exr', + 'TIFF': 'tiff', + 'BMP': 'bmp', + 'HDR': 'hdr', + 'TARGA': 'tga' +} + +def init_render(engine='CYCLES', resolution=512, geo_mode=False, threads=None): + bpy.context.scene.render.engine = engine + bpy.context.scene.render.resolution_x = resolution + bpy.context.scene.render.resolution_y = resolution + bpy.context.scene.render.resolution_percentage = 100 + bpy.context.scene.render.image_settings.file_format = 'PNG' + bpy.context.scene.render.image_settings.color_mode = 'RGBA' + bpy.context.scene.render.film_transparent = True + bpy.context.scene.render.use_persistent_data = True + + if threads is not None and threads > 0: + bpy.context.scene.render.threads_mode = 'FIXED' + bpy.context.scene.render.threads = threads + + bpy.context.scene.cycles.device = 'GPU' + bpy.context.scene.cycles.samples = 128 if not geo_mode else 1 + bpy.context.scene.cycles.filter_type = 'BOX' + bpy.context.scene.cycles.filter_width = 1 + bpy.context.scene.cycles.diffuse_bounces = 1 + bpy.context.scene.cycles.glossy_bounces = 1 + bpy.context.scene.cycles.transparent_max_bounces = 3 if not geo_mode else 0 + bpy.context.scene.cycles.transmission_bounces = 3 if not geo_mode else 1 + bpy.context.scene.cycles.use_denoising = True + # bpy.context.scene.cycles.denoiser = 'OPTIX' + bpy.context.scene.cycles.denoiser = 'OPENIMAGEDENOISE' + bpy.context.scene.cycles.noise_threshold = 0.05 + + # Force single tile if possible to maximize GPU throughput for small images + if hasattr(bpy.context.scene.cycles, 'use_auto_tile'): + bpy.context.scene.cycles.use_auto_tile = False + if hasattr(bpy.context.scene.cycles, 'tile_size'): + bpy.context.scene.cycles.tile_size = resolution + + # Device setup + cycles_prefs = bpy.context.preferences.addons['cycles'].preferences + cycles_prefs.get_devices() + + cuda_devices = [d for d in cycles_prefs.devices if d.type == 'CUDA'] + optix_devices = [d for d in cycles_prefs.devices if d.type == 'OPTIX'] + + # Check environment variable to optionally disable GPU + use_gpu = (len(cuda_devices) > 0 or len(optix_devices) > 0) and os.environ.get("CUDA_VISIBLE_DEVICES") != "-1" + + if use_gpu: + bpy.context.scene.cycles.device = 'GPU' + if len(optix_devices) > 0: + cycles_prefs.compute_device_type = 'OPTIX' + else: + cycles_prefs.compute_device_type = 'CUDA' + + for device in cycles_prefs.devices: + if device.type in {'CUDA', 'OPTIX'}: + device.use = True + print(f"[init_render] Using GPU: {cycles_prefs.compute_device_type}") + else: + bpy.context.scene.cycles.device = 'CPU' + print("[init_render] GPU not found or disabled, using CPU.") + +def init_nodes(save_depth=False, save_normal=False, save_albedo=False, save_mist=False): + if not any([save_depth, save_normal, save_albedo, save_mist]): + return {}, {} + outputs = {} + spec_nodes = {} + + bpy.context.scene.use_nodes = True + bpy.context.scene.view_layers['View Layer'].use_pass_z = save_depth + bpy.context.scene.view_layers['View Layer'].use_pass_normal = save_normal + bpy.context.scene.view_layers['View Layer'].use_pass_diffuse_color = save_albedo + bpy.context.scene.view_layers['View Layer'].use_pass_mist = save_mist + + nodes = bpy.context.scene.node_tree.nodes + links = bpy.context.scene.node_tree.links + for n in nodes: + nodes.remove(n) + + render_layers = nodes.new('CompositorNodeRLayers') + + if save_depth: + depth_file_output = nodes.new('CompositorNodeOutputFile') + depth_file_output.base_path = '' + depth_file_output.file_slots[0].use_node_format = True + depth_file_output.format.file_format = 'PNG' + depth_file_output.format.color_depth = '16' + depth_file_output.format.color_mode = 'BW' + # Remap to 0-1 + map = nodes.new(type="CompositorNodeMapRange") + map.inputs[1].default_value = 0 # (min value you will be getting) + map.inputs[2].default_value = 10 # (max value you will be getting) + map.inputs[3].default_value = 0 # (min value you will map to) + map.inputs[4].default_value = 1 # (max value you will map to) + + links.new(render_layers.outputs['Depth'], map.inputs[0]) + links.new(map.outputs[0], depth_file_output.inputs[0]) + + outputs['depth'] = depth_file_output + spec_nodes['depth_map'] = map + + if save_normal: + normal_file_output = nodes.new('CompositorNodeOutputFile') + normal_file_output.base_path = '' + normal_file_output.file_slots[0].use_node_format = True + normal_file_output.format.file_format = 'OPEN_EXR' + normal_file_output.format.color_mode = 'RGB' + normal_file_output.format.color_depth = '16' + + links.new(render_layers.outputs['Normal'], normal_file_output.inputs[0]) + + outputs['normal'] = normal_file_output + + if save_albedo: + albedo_file_output = nodes.new('CompositorNodeOutputFile') + albedo_file_output.base_path = '' + albedo_file_output.file_slots[0].use_node_format = True + albedo_file_output.format.file_format = 'PNG' + albedo_file_output.format.color_mode = 'RGBA' + albedo_file_output.format.color_depth = '8' + + alpha_albedo = nodes.new('CompositorNodeSetAlpha') + + links.new(render_layers.outputs['DiffCol'], alpha_albedo.inputs['Image']) + links.new(render_layers.outputs['Alpha'], alpha_albedo.inputs['Alpha']) + links.new(alpha_albedo.outputs['Image'], albedo_file_output.inputs[0]) + + outputs['albedo'] = albedo_file_output + + if save_mist: + bpy.data.worlds['World'].mist_settings.start = 0 + bpy.data.worlds['World'].mist_settings.depth = 10 + + mist_file_output = nodes.new('CompositorNodeOutputFile') + mist_file_output.base_path = '' + mist_file_output.file_slots[0].use_node_format = True + mist_file_output.format.file_format = 'PNG' + mist_file_output.format.color_mode = 'BW' + mist_file_output.format.color_depth = '16' + + links.new(render_layers.outputs['Mist'], mist_file_output.inputs[0]) + + outputs['mist'] = mist_file_output + + return outputs, spec_nodes + +def init_scene() -> None: + """Resets the scene to a clean state. + + Returns: + None + """ + # delete everything + for obj in bpy.data.objects: + bpy.data.objects.remove(obj, do_unlink=True) + + # delete all the materials + for material in bpy.data.materials: + bpy.data.materials.remove(material, do_unlink=True) + + # delete all the textures + for texture in bpy.data.textures: + bpy.data.textures.remove(texture, do_unlink=True) + + # delete all the images + for image in bpy.data.images: + bpy.data.images.remove(image, do_unlink=True) + +def init_camera(): + cam = bpy.data.objects.new('Camera', bpy.data.cameras.new('Camera')) + bpy.context.collection.objects.link(cam) + bpy.context.scene.camera = cam + cam.data.sensor_height = cam.data.sensor_width = 32 + cam_constraint = cam.constraints.new(type='TRACK_TO') + cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' + cam_constraint.up_axis = 'UP_Y' + cam_empty = bpy.data.objects.new("Empty", None) + cam_empty.location = (0, 0, 0) + bpy.context.scene.collection.objects.link(cam_empty) + cam_constraint.target = cam_empty + return cam + +def init_lighting(): + # Clear existing lights + bpy.ops.object.select_all(action="DESELECT") + bpy.ops.object.select_by_type(type="LIGHT") + bpy.ops.object.delete() + + # Create key light + default_light = bpy.data.objects.new("Default_Light", bpy.data.lights.new("Default_Light", type="POINT")) + bpy.context.collection.objects.link(default_light) + default_light.data.energy = 1000 + default_light.location = (4, 1, 6) + default_light.rotation_euler = (0, 0, 0) + + # create top light + top_light = bpy.data.objects.new("Top_Light", bpy.data.lights.new("Top_Light", type="AREA")) + bpy.context.collection.objects.link(top_light) + top_light.data.energy = 10000 + top_light.location = (0, 0, 10) + top_light.scale = (100, 100, 100) + + # create bottom light + bottom_light = bpy.data.objects.new("Bottom_Light", bpy.data.lights.new("Bottom_Light", type="AREA")) + bpy.context.collection.objects.link(bottom_light) + bottom_light.data.energy = 1000 + bottom_light.location = (0, 0, -10) + bottom_light.rotation_euler = (0, 0, 0) + + return { + "default_light": default_light, + "top_light": top_light, + "bottom_light": bottom_light + } + + +def load_object(object_path: str) -> None: + """Loads a model with a supported file extension into the scene. + + Args: + object_path (str): Path to the model file. + + Raises: + ValueError: If the file extension is not supported. + + Returns: + None + """ + file_extension = object_path.split(".")[-1].lower() + if file_extension is None: + raise ValueError(f"Unsupported file type: {object_path}") + + if file_extension == "usdz": + # install usdz io package + dirname = os.path.dirname(os.path.realpath(__file__)) + usdz_package = os.path.join(dirname, "io_scene_usdz.zip") + bpy.ops.preferences.addon_install(filepath=usdz_package) + # enable it + addon_name = "io_scene_usdz" + bpy.ops.preferences.addon_enable(module=addon_name) + # import the usdz + from io_scene_usdz.import_usdz import import_usdz + + import_usdz(context, filepath=object_path, materials=True, animations=True) + return None + + # load from existing import functions + import_function = IMPORT_FUNCTIONS[file_extension] + + print(f"Loading object from {object_path}") + if file_extension == "blend": + import_function(directory=object_path, link=False) + elif file_extension in {"glb", "gltf"}: + import_function(filepath=object_path, merge_vertices=True, import_shading='NORMALS') + else: + import_function(filepath=object_path) + +def delete_invisible_objects() -> None: + """Deletes all invisible objects in the scene. + + Returns: + None + """ + # bpy.ops.object.mode_set(mode="OBJECT") + bpy.ops.object.select_all(action="DESELECT") + for obj in bpy.context.scene.objects: + if obj.hide_viewport or obj.hide_render: + obj.hide_viewport = False + obj.hide_render = False + obj.hide_select = False + obj.select_set(True) + bpy.ops.object.delete() + + # Delete invisible collections + invisible_collections = [col for col in bpy.data.collections if col.hide_viewport] + for col in invisible_collections: + bpy.data.collections.remove(col) + +def split_mesh_normal(): + bpy.ops.object.select_all(action="DESELECT") + objs = [obj for obj in bpy.context.scene.objects if obj.type == "MESH"] + bpy.context.view_layer.objects.active = objs[0] + for obj in objs: + obj.select_set(True) + bpy.ops.object.mode_set(mode="EDIT") + bpy.ops.mesh.select_all(action='SELECT') + bpy.ops.mesh.split_normals() + bpy.ops.object.mode_set(mode='OBJECT') + bpy.ops.object.select_all(action="DESELECT") + +def delete_custom_normals(): + for this_obj in bpy.data.objects: + if this_obj.type == "MESH": + bpy.context.view_layer.objects.active = this_obj + bpy.ops.mesh.customdata_custom_splitnormals_clear() + +def override_material(): + new_mat = bpy.data.materials.new(name="Override0123456789") + new_mat.use_nodes = True + new_mat.node_tree.nodes.clear() + bsdf = new_mat.node_tree.nodes.new('ShaderNodeBsdfDiffuse') + bsdf.inputs[0].default_value = (0.5, 0.5, 0.5, 1) + bsdf.inputs[1].default_value = 1 + output = new_mat.node_tree.nodes.new('ShaderNodeOutputMaterial') + new_mat.node_tree.links.new(bsdf.outputs['BSDF'], output.inputs['Surface']) + bpy.context.scene.view_layers['View Layer'].material_override = new_mat + +def unhide_all_objects() -> None: + """Unhides all objects in the scene. + + Returns: + None + """ + for obj in bpy.context.scene.objects: + obj.hide_set(False) + +def convert_to_meshes() -> None: + """Converts all objects in the scene to meshes. + + Returns: + None + """ + bpy.ops.object.select_all(action="DESELECT") + bpy.context.view_layer.objects.active = [obj for obj in bpy.context.scene.objects if obj.type == "MESH"][0] + for obj in bpy.context.scene.objects: + obj.select_set(True) + bpy.ops.object.convert(target="MESH") + +def triangulate_meshes() -> None: + """Triangulates all meshes in the scene. + + Returns: + None + """ + bpy.ops.object.select_all(action="DESELECT") + objs = [obj for obj in bpy.context.scene.objects if obj.type == "MESH"] + bpy.context.view_layer.objects.active = objs[0] + for obj in objs: + obj.select_set(True) + bpy.ops.object.mode_set(mode="EDIT") + bpy.ops.mesh.reveal() + bpy.ops.mesh.select_all(action="SELECT") + bpy.ops.mesh.quads_convert_to_tris(quad_method="BEAUTY", ngon_method="BEAUTY") + bpy.ops.object.mode_set(mode="OBJECT") + bpy.ops.object.select_all(action="DESELECT") + +def scene_bbox() -> Tuple[Vector, Vector]: + """Returns the bounding box of the scene. + + Taken from Shap-E rendering script + (https://github.com/openai/shap-e/blob/main/shap_e/rendering/blender/blender_script.py#L68-L82) + + Returns: + Tuple[Vector, Vector]: The minimum and maximum coordinates of the bounding box. + """ + bbox_min = (math.inf,) * 3 + bbox_max = (-math.inf,) * 3 + found = False + scene_meshes = [obj for obj in bpy.context.scene.objects.values() if isinstance(obj.data, bpy.types.Mesh)] + for obj in scene_meshes: + found = True + for coord in obj.bound_box: + coord = Vector(coord) + coord = obj.matrix_world @ coord + bbox_min = tuple(min(x, y) for x, y in zip(bbox_min, coord)) + bbox_max = tuple(max(x, y) for x, y in zip(bbox_max, coord)) + if not found: + raise RuntimeError("no objects in scene to compute bounding box for") + return Vector(bbox_min), Vector(bbox_max) + +def normalize_scene() -> Tuple[float, Vector]: + """Normalizes the scene by scaling and translating it to fit in a unit cube centered + at the origin. + + Mostly taken from the Point-E / Shap-E rendering script + (https://github.com/openai/point-e/blob/main/point_e/evals/scripts/blender_script.py#L97-L112), + but fix for multiple root objects: (see bug report here: + https://github.com/openai/shap-e/pull/60). + + Returns: + Tuple[float, Vector]: The scale factor and the offset applied to the scene. + """ + scene_root_objects = [obj for obj in bpy.context.scene.objects.values() if not obj.parent] + if len(scene_root_objects) > 1: + # create an empty object to be used as a parent for all root objects + scene = bpy.data.objects.new("ParentEmpty", None) + bpy.context.scene.collection.objects.link(scene) + + # parent all root objects to the empty object + for obj in scene_root_objects: + obj.parent = scene + else: + scene = scene_root_objects[0] + + bbox_min, bbox_max = scene_bbox() + scale = 1 / max(bbox_max - bbox_min) + scene.scale = scene.scale * scale + + # Apply scale to matrix_world. + bpy.context.view_layer.update() + bbox_min, bbox_max = scene_bbox() + offset = -(bbox_min + bbox_max) / 2 + scene.matrix_world.translation += offset + bpy.ops.object.select_all(action="DESELECT") + + return scale, offset + +def get_transform_matrix(obj: bpy.types.Object) -> list: + pos, rt, _ = obj.matrix_world.decompose() + rt = rt.to_matrix() + matrix = [] + for ii in range(3): + a = [] + for jj in range(3): + a.append(rt[ii][jj]) + a.append(pos[ii]) + matrix.append(a) + matrix.append([0, 0, 0, 1]) + return matrix + +def main(arg): + os.makedirs(arg.output_folder, exist_ok=True) + + # Initialize context + init_render(engine=arg.engine, resolution=arg.resolution, geo_mode=arg.geo_mode, threads=arg.threads) + outputs, spec_nodes = init_nodes( + save_depth=arg.save_depth, + save_normal=arg.save_normal, + save_albedo=arg.save_albedo, + save_mist=arg.save_mist + ) + if arg.object.endswith(".blend"): + delete_invisible_objects() + else: + init_scene() + load_object(arg.object) + if arg.split_normal: + split_mesh_normal() + # delete_custom_normals() + print('[INFO] Scene initialized.') + + # normalize scene + scale, offset = normalize_scene() + print('[INFO] Scene normalized.') + + # Initialize camera and lighting + cam = init_camera() + init_lighting() + print('[INFO] Camera and lighting initialized.') + + # Override material + if arg.geo_mode: + override_material() + + # Create a list of views + to_export = { + "aabb": [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]], + "scale": scale, + "offset": [offset.x, offset.y, offset.z], + "frames": [] + } + views = json.loads(arg.views) + for i, view in enumerate(views): + cam.location = ( + view['radius'] * np.cos(view['yaw']) * np.cos(view['pitch']), + view['radius'] * np.sin(view['yaw']) * np.cos(view['pitch']), + view['radius'] * np.sin(view['pitch']) + ) + cam.data.lens = 16 / np.tan(view['fov'] / 2) + + if arg.save_depth: + spec_nodes['depth_map'].inputs[1].default_value = view['radius'] - 0.5 * np.sqrt(3) + spec_nodes['depth_map'].inputs[2].default_value = view['radius'] + 0.5 * np.sqrt(3) + + bpy.context.scene.render.filepath = os.path.join(arg.output_folder, f'{i:03d}.png') + for name, output in outputs.items(): + output.file_slots[0].path = os.path.join(arg.output_folder, f'{i:03d}_{name}') + + # Render the scene + bpy.ops.render.render(write_still=True) + bpy.context.view_layer.update() + for name, output in outputs.items(): + ext = EXT[output.format.file_format] + path = glob.glob(f'{output.file_slots[0].path}*.{ext}')[0] + os.rename(path, f'{output.file_slots[0].path}.{ext}') + + # Save camera parameters + metadata = { + "file_path": f'{i:03d}.png', + "camera_angle_x": view['fov'], + "transform_matrix": get_transform_matrix(cam) + } + if arg.save_depth: + metadata['depth'] = { + 'min': view['radius'] - 0.5 * np.sqrt(3), + 'max': view['radius'] + 0.5 * np.sqrt(3) + } + to_export["frames"].append(metadata) + + # Save the camera parameters + with open(os.path.join(arg.output_folder, 'transforms.json'), 'w') as f: + json.dump(to_export, f, indent=4) + + if arg.save_mesh: + # triangulate meshes + unhide_all_objects() + convert_to_meshes() + triangulate_meshes() + print('[INFO] Meshes triangulated.') + + # export ply mesh + bpy.ops.export_mesh.ply(filepath=os.path.join(arg.output_folder, 'mesh.ply')) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') + parser.add_argument('--views', type=str, help='JSON string of views. Contains a list of {yaw, pitch, radius, fov} object.') + parser.add_argument('--object', type=str, help='Path to the 3D model file to be rendered.') + parser.add_argument('--output_folder', type=str, default='/tmp', help='The path the output will be dumped to.') + parser.add_argument('--resolution', type=int, default=512, help='Resolution of the images.') + parser.add_argument('--engine', type=str, default='CYCLES', help='Blender internal engine for rendering. E.g. CYCLES, BLENDER_EEVEE, ...') + parser.add_argument('--geo_mode', action='store_true', help='Geometry mode for rendering.') + parser.add_argument('--save_depth', action='store_true', help='Save the depth maps.') + parser.add_argument('--save_normal', action='store_true', help='Save the normal maps.') + parser.add_argument('--save_albedo', action='store_true', help='Save the albedo maps.') + parser.add_argument('--save_mist', action='store_true', help='Save the mist distance maps.') + parser.add_argument('--split_normal', action='store_true', help='Split the normals of the mesh.') + parser.add_argument('--save_mesh', action='store_true', help='Save the mesh as a .ply file.') + parser.add_argument('--threads', type=int, default=None, help='Number of CPU threads to use.') + argv = sys.argv[sys.argv.index("--") + 1:] + args = parser.parse_args(argv) + + main(args) \ No newline at end of file diff --git a/third_party/TRELLIS/dataset_toolkits/build_metadata.py b/third_party/TRELLIS/dataset_toolkits/build_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1f6396a9d11ca99be8eb8ffae22e64472f4ec8 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/build_metadata.py @@ -0,0 +1,270 @@ +import os +import shutil +import sys +import time +import importlib +import argparse +import numpy as np +import pandas as pd +from tqdm import tqdm +from easydict import EasyDict as edict +from concurrent.futures import ThreadPoolExecutor +import utils3d + +def get_first_directory(path): + with os.scandir(path) as it: + for entry in it: + if entry.is_dir(): + return entry.name + return None + +def need_process(key): + return key in opt.field or opt.field == ['all'] + +if __name__ == '__main__': + dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}') + + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--field', type=str, default='all', + help='Fields to process, separated by commas') + parser.add_argument('--from_file', action='store_true', + help='Build metadata from file instead of from records of processings.' + + 'Useful when some processing fail to generate records but file already exists.') + dataset_utils.add_args(parser) + opt = parser.parse_args(sys.argv[2:]) + opt = edict(vars(opt)) + + os.makedirs(opt.output_dir, exist_ok=True) + os.makedirs(os.path.join(opt.output_dir, 'merged_records'), exist_ok=True) + + opt.field = opt.field.split(',') + + timestamp = str(int(time.time())) + + # get file list + if os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + print('Loading previous metadata...') + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + else: + metadata = dataset_utils.get_metadata(**opt) + metadata.set_index('sha256', inplace=True) + + # merge downloaded + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith('downloaded_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + if 'local_path' in metadata.columns: + metadata.update(df, overwrite=True) + else: + metadata = metadata.join(df, on='sha256', how='left') + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # detect models + image_models = [] + if os.path.exists(os.path.join(opt.output_dir, 'features')): + image_models = os.listdir(os.path.join(opt.output_dir, 'features')) + latent_models = [] + if os.path.exists(os.path.join(opt.output_dir, 'latents')): + latent_models = os.listdir(os.path.join(opt.output_dir, 'latents')) + ss_latent_models = [] + if os.path.exists(os.path.join(opt.output_dir, 'ss_latents')): + ss_latent_models = os.listdir(os.path.join(opt.output_dir, 'ss_latents')) + print(f'Image models: {image_models}') + print(f'Latent models: {latent_models}') + print(f'Sparse Structure latent models: {ss_latent_models}') + + if 'rendered' not in metadata.columns: + metadata['rendered'] = [False] * len(metadata) + if 'voxelized' not in metadata.columns: + metadata['voxelized'] = [False] * len(metadata) + if 'num_voxels' not in metadata.columns: + metadata['num_voxels'] = [0] * len(metadata) + if 'cond_rendered' not in metadata.columns: + metadata['cond_rendered'] = [False] * len(metadata) + for model in image_models: + if f'feature_{model}' not in metadata.columns: + metadata[f'feature_{model}'] = [False] * len(metadata) + for model in latent_models: + if f'latent_{model}' not in metadata.columns: + metadata[f'latent_{model}'] = [False] * len(metadata) + for model in ss_latent_models: + if f'ss_latent_{model}' not in metadata.columns: + metadata[f'ss_latent_{model}'] = [False] * len(metadata) + + # merge rendered + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith('rendered_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # merge voxelized + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith('voxelized_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # merge cond_rendered + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith('cond_rendered_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # merge features + for model in image_models: + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith(f'feature_{model}_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # merge latents + for model in latent_models: + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith(f'latent_{model}_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # merge sparse structure latents + for model in ss_latent_models: + df_files = [f for f in os.listdir(opt.output_dir) if f.startswith(f'ss_latent_{model}_') and f.endswith('.csv')] + df_parts = [] + for f in df_files: + try: + df_parts.append(pd.read_csv(os.path.join(opt.output_dir, f))) + except: + pass + if len(df_parts) > 0: + df = pd.concat(df_parts) + df.set_index('sha256', inplace=True) + metadata.update(df, overwrite=True) + for f in df_files: + shutil.move(os.path.join(opt.output_dir, f), os.path.join(opt.output_dir, 'merged_records', f'{timestamp}_{f}')) + + # build metadata from files + if opt.from_file: + with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \ + tqdm(total=len(metadata), desc="Building metadata") as pbar: + def worker(sha256): + try: + if need_process('rendered') and metadata.loc[sha256, 'rendered'] == False and \ + os.path.exists(os.path.join(opt.output_dir, 'renders', sha256, 'transforms.json')): + metadata.loc[sha256, 'rendered'] = True + if need_process('voxelized') and metadata.loc[sha256, 'rendered'] == True and metadata.loc[sha256, 'voxelized'] == False and \ + os.path.exists(os.path.join(opt.output_dir, 'voxels', f'{sha256}.ply')): + try: + pts = utils3d.io.read_ply(os.path.join(opt.output_dir, 'voxels', f'{sha256}.ply'))[0] + metadata.loc[sha256, 'voxelized'] = True + metadata.loc[sha256, 'num_voxels'] = len(pts) + except Exception as e: + pass + if need_process('cond_rendered') and metadata.loc[sha256, 'cond_rendered'] == False and \ + os.path.exists(os.path.join(opt.output_dir, 'renders_cond', sha256, 'transforms.json')): + metadata.loc[sha256, 'cond_rendered'] = True + for model in image_models: + if need_process(f'feature_{model}') and \ + metadata.loc[sha256, f'feature_{model}'] == False and \ + metadata.loc[sha256, 'rendered'] == True and \ + metadata.loc[sha256, 'voxelized'] == True and \ + os.path.exists(os.path.join(opt.output_dir, 'features', model, f'{sha256}.npz')): + metadata.loc[sha256, f'feature_{model}'] = True + for model in latent_models: + if need_process(f'latent_{model}') and \ + metadata.loc[sha256, f'latent_{model}'] == False and \ + metadata.loc[sha256, 'rendered'] == True and \ + metadata.loc[sha256, 'voxelized'] == True and \ + os.path.exists(os.path.join(opt.output_dir, 'latents', model, f'{sha256}.npz')): + metadata.loc[sha256, f'latent_{model}'] = True + for model in ss_latent_models: + if need_process(f'ss_latent_{model}') and \ + metadata.loc[sha256, f'ss_latent_{model}'] == False and \ + metadata.loc[sha256, 'voxelized'] == True and \ + os.path.exists(os.path.join(opt.output_dir, 'ss_latents', model, f'{sha256}.npz')): + metadata.loc[sha256, f'ss_latent_{model}'] = True + pbar.update() + except Exception as e: + print(f'Error processing {sha256}: {e}') + pbar.update() + + executor.map(worker, metadata.index) + executor.shutdown(wait=True) + + # statistics + metadata.to_csv(os.path.join(opt.output_dir, 'metadata.csv')) + num_downloaded = metadata['local_path'].count() if 'local_path' in metadata.columns else 0 + with open(os.path.join(opt.output_dir, 'statistics.txt'), 'w') as f: + f.write('Statistics:\n') + f.write(f' - Number of assets: {len(metadata)}\n') + f.write(f' - Number of assets downloaded: {num_downloaded}\n') + f.write(f' - Number of assets rendered: {metadata["rendered"].sum()}\n') + f.write(f' - Number of assets voxelized: {metadata["voxelized"].sum()}\n') + if len(image_models) != 0: + f.write(f' - Number of assets with image features extracted:\n') + for model in image_models: + f.write(f' - {model}: {metadata[f"feature_{model}"].sum()}\n') + if len(latent_models) != 0: + f.write(f' - Number of assets with latents extracted:\n') + for model in latent_models: + f.write(f' - {model}: {metadata[f"latent_{model}"].sum()}\n') + if len(ss_latent_models) != 0: + f.write(f' - Number of assets with sparse structure latents extracted:\n') + for model in ss_latent_models: + f.write(f' - {model}: {metadata[f"ss_latent_{model}"].sum()}\n') + f.write(f' - Number of assets with captions: {metadata["captions"].count()}\n') + f.write(f' - Number of assets with image conditions: {metadata["cond_rendered"].sum()}\n') + + with open(os.path.join(opt.output_dir, 'statistics.txt'), 'r') as f: + print(f.read()) \ No newline at end of file diff --git a/third_party/TRELLIS/dataset_toolkits/datasets/3D-FUTURE.py b/third_party/TRELLIS/dataset_toolkits/datasets/3D-FUTURE.py new file mode 100644 index 0000000000000000000000000000000000000000..a5ccc632de8e5a1d4f4a3b2b3024b8a3361e12c4 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/datasets/3D-FUTURE.py @@ -0,0 +1,97 @@ +import os +import re +import argparse +import zipfile +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import pandas as pd +from utils import get_file_hash + + +def add_args(parser: argparse.ArgumentParser): + pass + + +def get_metadata(**kwargs): + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/3D-FUTURE.csv") + return metadata + + +def download(metadata, output_dir, **kwargs): + os.makedirs(output_dir, exist_ok=True) + + if not os.path.exists(os.path.join(output_dir, 'raw', '3D-FUTURE-model.zip')): + print("\033[93m") + print("3D-FUTURE have to be downloaded manually") + print(f"Please download the 3D-FUTURE-model.zip file and place it in the {output_dir}/raw directory") + print("Visit https://tianchi.aliyun.com/specials/promotion/alibaba-3d-future for more information") + print("\033[0m") + raise FileNotFoundError("3D-FUTURE-model.zip not found") + + downloaded = {} + metadata = metadata.set_index("file_identifier") + with zipfile.ZipFile(os.path.join(output_dir, 'raw', '3D-FUTURE-model.zip')) as zip_ref: + all_names = zip_ref.namelist() + instances = [instance[:-1] for instance in all_names if re.match(r"^3D-FUTURE-model/[^/]+/$", instance)] + instances = list(filter(lambda x: x in metadata.index, instances)) + + with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \ + tqdm(total=len(instances), desc="Extracting") as pbar: + def worker(instance: str) -> str: + try: + instance_files = list(filter(lambda x: x.startswith(f"{instance}/") and not x.endswith("/"), all_names)) + zip_ref.extractall(os.path.join(output_dir, 'raw'), members=instance_files) + sha256 = get_file_hash(os.path.join(output_dir, 'raw', f"{instance}/image.jpg")) + pbar.update() + return sha256 + except Exception as e: + pbar.update() + print(f"Error extracting for {instance}: {e}") + return None + + sha256s = executor.map(worker, instances) + executor.shutdown(wait=True) + + for k, sha256 in zip(instances, sha256s): + if sha256 is not None: + if sha256 == metadata.loc[k, "sha256"]: + downloaded[sha256] = os.path.join("raw", f"{k}/raw_model.obj") + else: + print(f"Error downloading {k}: sha256s do not match") + + return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path']) + + +def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame: + import os + from concurrent.futures import ThreadPoolExecutor + from tqdm import tqdm + + # load metadata + metadata = metadata.to_dict('records') + + # processing objects + records = [] + max_workers = max_workers or os.cpu_count() + try: + with ThreadPoolExecutor(max_workers=max_workers) as executor, \ + tqdm(total=len(metadata), desc=desc) as pbar: + def worker(metadatum): + try: + local_path = metadatum['local_path'] + sha256 = metadatum['sha256'] + file = os.path.join(output_dir, local_path) + record = func(file, sha256) + if record is not None: + records.append(record) + pbar.update() + except Exception as e: + print(f"Error processing object {sha256}: {e}") + pbar.update() + + executor.map(worker, metadata) + executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + return pd.DataFrame.from_records(records) diff --git a/third_party/TRELLIS/dataset_toolkits/datasets/ABO.py b/third_party/TRELLIS/dataset_toolkits/datasets/ABO.py new file mode 100644 index 0000000000000000000000000000000000000000..b0aba22c2c4980b024091058a458811ab93805dd --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/datasets/ABO.py @@ -0,0 +1,96 @@ +import os +import re +import argparse +import tarfile +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import pandas as pd +from utils import get_file_hash + + +def add_args(parser: argparse.ArgumentParser): + pass + + +def get_metadata(**kwargs): + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/ABO.csv") + return metadata + + +def download(metadata, output_dir, **kwargs): + os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True) + + if not os.path.exists(os.path.join(output_dir, 'raw', 'abo-3dmodels.tar')): + try: + os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True) + os.system(f"wget -O {output_dir}/raw/abo-3dmodels.tar https://amazon-berkeley-objects.s3.amazonaws.com/archives/abo-3dmodels.tar") + except: + print("\033[93m") + print("Error downloading ABO dataset. Please check your internet connection and try again.") + print("Or, you can manually download the abo-3dmodels.tar file and place it in the {output_dir}/raw directory") + print("Visit https://amazon-berkeley-objects.s3.amazonaws.com/index.html for more information") + print("\033[0m") + raise FileNotFoundError("Error downloading ABO dataset") + + downloaded = {} + metadata = metadata.set_index("file_identifier") + with tarfile.open(os.path.join(output_dir, 'raw', 'abo-3dmodels.tar')) as tar: + with ThreadPoolExecutor(max_workers=1) as executor, \ + tqdm(total=len(metadata), desc="Extracting") as pbar: + def worker(instance: str) -> str: + try: + tar.extract(f"3dmodels/original/{instance}", path=os.path.join(output_dir, 'raw')) + sha256 = get_file_hash(os.path.join(output_dir, 'raw/3dmodels/original', instance)) + pbar.update() + return sha256 + except Exception as e: + pbar.update() + print(f"Error extracting for {instance}: {e}") + return None + + sha256s = executor.map(worker, metadata.index) + executor.shutdown(wait=True) + + for k, sha256 in zip(metadata.index, sha256s): + if sha256 is not None: + if sha256 == metadata.loc[k, "sha256"]: + downloaded[sha256] = os.path.join('raw/3dmodels/original', k) + else: + print(f"Error downloading {k}: sha256s do not match") + + return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path']) + + +def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame: + import os + from concurrent.futures import ThreadPoolExecutor + from tqdm import tqdm + + # load metadata + metadata = metadata.to_dict('records') + + # processing objects + records = [] + max_workers = max_workers or os.cpu_count() + try: + with ThreadPoolExecutor(max_workers=max_workers) as executor, \ + tqdm(total=len(metadata), desc=desc) as pbar: + def worker(metadatum): + try: + local_path = metadatum['local_path'] + sha256 = metadatum['sha256'] + file = os.path.join(output_dir, local_path) + record = func(file, sha256) + if record is not None: + records.append(record) + pbar.update() + except Exception as e: + print(f"Error processing object {sha256}: {e}") + pbar.update() + + executor.map(worker, metadata) + executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + return pd.DataFrame.from_records(records) diff --git a/third_party/TRELLIS/dataset_toolkits/datasets/HSSD.py b/third_party/TRELLIS/dataset_toolkits/datasets/HSSD.py new file mode 100644 index 0000000000000000000000000000000000000000..465e6a140010d0b33ba6435e8129825599dda5db --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/datasets/HSSD.py @@ -0,0 +1,103 @@ +import os +import re +import argparse +import tarfile +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import pandas as pd +import huggingface_hub +from utils import get_file_hash + + +def add_args(parser: argparse.ArgumentParser): + pass + + +def get_metadata(**kwargs): + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/HSSD.csv") + return metadata + + +def download(metadata, output_dir, **kwargs): + os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True) + + # check login + try: + huggingface_hub.whoami() + except: + print("\033[93m") + print("Haven't logged in to the Hugging Face Hub.") + print("Visit https://huggingface.co/settings/tokens to get a token.") + print("\033[0m") + huggingface_hub.login() + + try: + huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename="README.md", repo_type="dataset") + except: + print("\033[93m") + print("Error downloading HSSD dataset.") + print("Check if you have access to the HSSD dataset.") + print("Visit https://huggingface.co/datasets/hssd/hssd-models for more information") + print("\033[0m") + + downloaded = {} + metadata = metadata.set_index("file_identifier") + with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \ + tqdm(total=len(metadata), desc="Downloading") as pbar: + def worker(instance: str) -> str: + try: + huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename=instance, repo_type="dataset", local_dir=os.path.join(output_dir, 'raw')) + sha256 = get_file_hash(os.path.join(output_dir, 'raw', instance)) + pbar.update() + return sha256 + except Exception as e: + pbar.update() + print(f"Error extracting for {instance}: {e}") + return None + + sha256s = executor.map(worker, metadata.index) + executor.shutdown(wait=True) + + for k, sha256 in zip(metadata.index, sha256s): + if sha256 is not None: + if sha256 == metadata.loc[k, "sha256"]: + downloaded[sha256] = os.path.join('raw', k) + else: + print(f"Error downloading {k}: sha256s do not match") + + return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path']) + + +def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame: + import os + from concurrent.futures import ThreadPoolExecutor + from tqdm import tqdm + + # load metadata + metadata = metadata.to_dict('records') + + # processing objects + records = [] + max_workers = max_workers or os.cpu_count() + try: + with ThreadPoolExecutor(max_workers=max_workers) as executor, \ + tqdm(total=len(metadata), desc=desc) as pbar: + def worker(metadatum): + try: + local_path = metadatum['local_path'] + sha256 = metadatum['sha256'] + file = os.path.join(output_dir, local_path) + record = func(file, sha256) + if record is not None: + records.append(record) + pbar.update() + except Exception as e: + print(f"Error processing object {sha256}: {e}") + pbar.update() + + executor.map(worker, metadata) + executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + return pd.DataFrame.from_records(records) diff --git a/third_party/TRELLIS/dataset_toolkits/datasets/ObjaverseXL.py b/third_party/TRELLIS/dataset_toolkits/datasets/ObjaverseXL.py new file mode 100644 index 0000000000000000000000000000000000000000..b2f5c76c07701b198a20af32fc87c81614ae7f79 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/datasets/ObjaverseXL.py @@ -0,0 +1,92 @@ +import os +import argparse +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import pandas as pd +import objaverse.xl as oxl +from utils import get_file_hash + + +def add_args(parser: argparse.ArgumentParser): + parser.add_argument('--source', type=str, default='sketchfab', + help='Data source to download annotations from (github, sketchfab)') + + +def get_metadata(source, **kwargs): + if source == 'sketchfab': + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/ObjaverseXL_sketchfab.csv") + elif source == 'github': + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/ObjaverseXL_github.csv") + else: + raise ValueError(f"Invalid source: {source}") + return metadata + + +def download(metadata, output_dir, **kwargs): + os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True) + + # download annotations + annotations = oxl.get_annotations() + annotations = annotations[annotations['sha256'].isin(metadata['sha256'].values)] + + # download and render objects + file_paths = oxl.download_objects( + annotations, + download_dir=os.path.join(output_dir, "raw"), + save_repo_format="zip", + ) + + downloaded = {} + metadata = metadata.set_index("file_identifier") + for k, v in file_paths.items(): + sha256 = metadata.loc[k, "sha256"] + downloaded[sha256] = os.path.relpath(v, output_dir) + + return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path']) + + +def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame: + import os + from concurrent.futures import ThreadPoolExecutor + from tqdm import tqdm + import tempfile + import zipfile + + # load metadata + metadata = metadata.to_dict('records') + + # processing objects + records = [] + max_workers = max_workers or os.cpu_count() + try: + with ThreadPoolExecutor(max_workers=max_workers) as executor, \ + tqdm(total=len(metadata), desc=desc) as pbar: + def worker(metadatum): + try: + local_path = metadatum['local_path'] + sha256 = metadatum['sha256'] + if local_path.startswith('raw/github/repos/'): + path_parts = local_path.split('/') + file_name = os.path.join(*path_parts[5:]) + zip_file = os.path.join(output_dir, *path_parts[:5]) + with tempfile.TemporaryDirectory() as tmp_dir: + with zipfile.ZipFile(zip_file, 'r') as zip_ref: + zip_ref.extractall(tmp_dir) + file = os.path.join(tmp_dir, file_name) + record = func(file, sha256) + else: + file = os.path.join(output_dir, local_path) + record = func(file, sha256) + if record is not None: + records.append(record) + pbar.update() + except Exception as e: + print(f"Error processing object {sha256}: {e}") + pbar.update() + + executor.map(worker, metadata) + executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + return pd.DataFrame.from_records(records) diff --git a/third_party/TRELLIS/dataset_toolkits/datasets/Toys4k.py b/third_party/TRELLIS/dataset_toolkits/datasets/Toys4k.py new file mode 100644 index 0000000000000000000000000000000000000000..378afdaa87b1b2a5962a33e7e55efa9540f28436 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/datasets/Toys4k.py @@ -0,0 +1,92 @@ +import os +import re +import argparse +import zipfile +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import pandas as pd +from utils import get_file_hash + + +def add_args(parser: argparse.ArgumentParser): + pass + + +def get_metadata(**kwargs): + metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/Toys4k.csv") + return metadata + + +def download(metadata, output_dir, **kwargs): + os.makedirs(output_dir, exist_ok=True) + + if not os.path.exists(os.path.join(output_dir, 'raw', 'toys4k_blend_files.zip')): + print("\033[93m") + print("Toys4k have to be downloaded manually") + print(f"Please download the toys4k_blend_files.zip file and place it in the {output_dir}/raw directory") + print("Visit https://github.com/rehg-lab/lowshot-shapebias/tree/main/toys4k for more information") + print("\033[0m") + raise FileNotFoundError("toys4k_blend_files.zip not found") + + downloaded = {} + metadata = metadata.set_index("file_identifier") + with zipfile.ZipFile(os.path.join(output_dir, 'raw', 'toys4k_blend_files.zip')) as zip_ref: + with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \ + tqdm(total=len(metadata), desc="Extracting") as pbar: + def worker(instance: str) -> str: + try: + zip_ref.extract(os.path.join('toys4k_blend_files', instance), os.path.join(output_dir, 'raw')) + sha256 = get_file_hash(os.path.join(output_dir, 'raw/toys4k_blend_files', instance)) + pbar.update() + return sha256 + except Exception as e: + pbar.update() + print(f"Error extracting for {instance}: {e}") + return None + + sha256s = executor.map(worker, metadata.index) + executor.shutdown(wait=True) + + for k, sha256 in zip(metadata.index, sha256s): + if sha256 is not None: + if sha256 == metadata.loc[k, "sha256"]: + downloaded[sha256] = os.path.join("raw/toys4k_blend_files", k) + else: + print(f"Error downloading {k}: sha256s do not match") + + return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path']) + + +def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame: + import os + from concurrent.futures import ThreadPoolExecutor + from tqdm import tqdm + + # load metadata + metadata = metadata.to_dict('records') + + # processing objects + records = [] + max_workers = max_workers or os.cpu_count() + try: + with ThreadPoolExecutor(max_workers=max_workers) as executor, \ + tqdm(total=len(metadata), desc=desc) as pbar: + def worker(metadatum): + try: + local_path = metadatum['local_path'] + sha256 = metadatum['sha256'] + file = os.path.join(output_dir, local_path) + record = func(file, sha256) + if record is not None: + records.append(record) + pbar.update() + except Exception as e: + print(f"Error processing object {sha256}: {e}") + pbar.update() + + executor.map(worker, metadata) + executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + return pd.DataFrame.from_records(records) diff --git a/third_party/TRELLIS/dataset_toolkits/download.py b/third_party/TRELLIS/dataset_toolkits/download.py new file mode 100644 index 0000000000000000000000000000000000000000..36e684ff5e61105d8c69c101291dc8fa4415af3d --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/download.py @@ -0,0 +1,52 @@ +import os +import copy +import sys +import importlib +import argparse +import pandas as pd +from easydict import EasyDict as edict + +if __name__ == '__main__': + dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}') + + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + dataset_utils.add_args(parser) + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + opt = parser.parse_args(sys.argv[2:]) + opt = edict(vars(opt)) + + os.makedirs(opt.output_dir, exist_ok=True) + + # get file list + if not os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + raise ValueError('metadata.csv not found') + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + if opt.instances is None: + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + if 'local_path' in metadata.columns: + metadata = metadata[metadata['local_path'].isna()] + else: + if os.path.exists(opt.instances): + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + else: + instances = opt.instances.split(',') + metadata = metadata[metadata['sha256'].isin(instances)] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + + print(f'Processing {len(metadata)} objects...') + + # process objects + downloaded = dataset_utils.download(metadata, **opt) + downloaded.to_csv(os.path.join(opt.output_dir, f'downloaded_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/dataset_toolkits/encode_latent.py b/third_party/TRELLIS/dataset_toolkits/encode_latent.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c4770909d56ca3820f7aacd09bcf9b8988b9ac --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/encode_latent.py @@ -0,0 +1,127 @@ +import os +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import copy +import json +import argparse +import torch +import numpy as np +import pandas as pd +from tqdm import tqdm +from easydict import EasyDict as edict +from concurrent.futures import ThreadPoolExecutor +from queue import Queue + +import trellis.models as models +import trellis.modules.sparse as sp + + +torch.set_grad_enabled(False) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--feat_model', type=str, default='dinov2_vitl14_reg', + help='Feature model') + parser.add_argument('--enc_pretrained', type=str, default='JeffreyXiang/TRELLIS-image-large/ckpts/slat_enc_swin8_B_64l8_fp16', + help='Pretrained encoder model') + parser.add_argument('--model_root', type=str, default='results', + help='Root directory of models') + parser.add_argument('--enc_model', type=str, default=None, + help='Encoder model. if specified, use this model instead of pretrained model') + parser.add_argument('--ckpt', type=str, default=None, + help='Checkpoint to load') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + opt = parser.parse_args() + opt = edict(vars(opt)) + + if opt.enc_model is None: + latent_name = f'{opt.feat_model}_{opt.enc_pretrained.split("/")[-1]}' + encoder = models.from_pretrained(opt.enc_pretrained).eval().cuda() + else: + latent_name = f'{opt.feat_model}_{opt.enc_model}_{opt.ckpt}' + cfg = edict(json.load(open(os.path.join(opt.model_root, opt.enc_model, 'config.json'), 'r'))) + encoder = getattr(models, cfg.models.encoder.name)(**cfg.models.encoder.args).cuda() + ckpt_path = os.path.join(opt.model_root, opt.enc_model, 'ckpts', f'encoder_{opt.ckpt}.pt') + encoder.load_state_dict(torch.load(ckpt_path), strict=False) + encoder.eval() + print(f'Loaded model from {ckpt_path}') + + os.makedirs(os.path.join(opt.output_dir, 'latents', latent_name), exist_ok=True) + + # get file list + if os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + else: + raise ValueError('metadata.csv not found') + if opt.instances is not None: + with open(opt.instances, 'r') as f: + sha256s = [line.strip() for line in f] + metadata = metadata[metadata['sha256'].isin(sha256s)] + else: + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + metadata = metadata[metadata[f'feature_{opt.feat_model}'] == True] + if f'latent_{latent_name}' in metadata.columns: + metadata = metadata[metadata[f'latent_{latent_name}'] == False] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + sha256s = list(metadata['sha256'].values) + for sha256 in copy.copy(sha256s): + if os.path.exists(os.path.join(opt.output_dir, 'latents', latent_name, f'{sha256}.npz')): + records.append({'sha256': sha256, f'latent_{latent_name}': True}) + sha256s.remove(sha256) + + # encode latents + load_queue = Queue(maxsize=4) + try: + with ThreadPoolExecutor(max_workers=32) as loader_executor, \ + ThreadPoolExecutor(max_workers=32) as saver_executor: + def loader(sha256): + try: + feats = np.load(os.path.join(opt.output_dir, 'features', opt.feat_model, f'{sha256}.npz')) + load_queue.put((sha256, feats)) + except Exception as e: + print(f"Error loading features for {sha256}: {e}") + loader_executor.map(loader, sha256s) + + def saver(sha256, pack): + save_path = os.path.join(opt.output_dir, 'latents', latent_name, f'{sha256}.npz') + np.savez_compressed(save_path, **pack) + records.append({'sha256': sha256, f'latent_{latent_name}': True}) + + for _ in tqdm(range(len(sha256s)), desc="Extracting latents"): + sha256, feats = load_queue.get() + feats = sp.SparseTensor( + feats = torch.from_numpy(feats['patchtokens']).float(), + coords = torch.cat([ + torch.zeros(feats['patchtokens'].shape[0], 1).int(), + torch.from_numpy(feats['indices']).int(), + ], dim=1), + ).cuda() + latent = encoder(feats, sample_posterior=False) + assert torch.isfinite(latent.feats).all(), "Non-finite latent" + pack = { + 'feats': latent.feats.cpu().numpy().astype(np.float32), + 'coords': latent.coords[:, 1:].cpu().numpy().astype(np.uint8), + } + saver_executor.submit(saver, sha256, pack) + + saver_executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + records = pd.DataFrame.from_records(records) + records.to_csv(os.path.join(opt.output_dir, f'latent_{latent_name}_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/dataset_toolkits/encode_ss_latent.py b/third_party/TRELLIS/dataset_toolkits/encode_ss_latent.py new file mode 100644 index 0000000000000000000000000000000000000000..c5af5df048524218308dbb4e3a29580fc3b41c20 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/encode_ss_latent.py @@ -0,0 +1,128 @@ +import os +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import copy +import json +import argparse +import torch +import numpy as np +import pandas as pd +import utils3d +from tqdm import tqdm +from easydict import EasyDict as edict +from concurrent.futures import ThreadPoolExecutor +from queue import Queue + +import trellis.models as models + + +torch.set_grad_enabled(False) + + +def get_voxels(instance): + position = utils3d.io.read_ply(os.path.join(opt.output_dir, 'voxels', f'{instance}.ply'))[0] + coords = ((torch.tensor(position) + 0.5) * opt.resolution).int().contiguous() + ss = torch.zeros(1, opt.resolution, opt.resolution, opt.resolution, dtype=torch.long) + ss[:, coords[:, 0], coords[:, 1], coords[:, 2]] = 1 + return ss + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--enc_pretrained', type=str, default='JeffreyXiang/TRELLIS-image-large/ckpts/ss_enc_conv3d_16l8_fp16', + help='Pretrained encoder model') + parser.add_argument('--model_root', type=str, default='results', + help='Root directory of models') + parser.add_argument('--enc_model', type=str, default=None, + help='Encoder model. if specified, use this model instead of pretrained model') + parser.add_argument('--ckpt', type=str, default=None, + help='Checkpoint to load') + parser.add_argument('--resolution', type=int, default=64, + help='Resolution') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + opt = parser.parse_args() + opt = edict(vars(opt)) + + if opt.enc_model is None: + latent_name = f'{opt.enc_pretrained.split("/")[-1]}' + encoder = models.from_pretrained(opt.enc_pretrained).eval().cuda() + else: + latent_name = f'{opt.enc_model}_{opt.ckpt}' + cfg = edict(json.load(open(os.path.join(opt.model_root, opt.enc_model, 'config.json'), 'r'))) + encoder = getattr(models, cfg.models.encoder.name)(**cfg.models.encoder.args).cuda() + ckpt_path = os.path.join(opt.model_root, opt.enc_model, 'ckpts', f'encoder_{opt.ckpt}.pt') + encoder.load_state_dict(torch.load(ckpt_path), strict=False) + encoder.eval() + print(f'Loaded model from {ckpt_path}') + + os.makedirs(os.path.join(opt.output_dir, 'ss_latents', latent_name), exist_ok=True) + + # get file list + if os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + else: + raise ValueError('metadata.csv not found') + if opt.instances is not None: + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + metadata = metadata[metadata['sha256'].isin(instances)] + else: + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + metadata = metadata[metadata['voxelized'] == True] + if f'ss_latent_{latent_name}' in metadata.columns: + metadata = metadata[metadata[f'ss_latent_{latent_name}'] == False] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + sha256s = list(metadata['sha256'].values) + for sha256 in copy.copy(sha256s): + if os.path.exists(os.path.join(opt.output_dir, 'ss_latents', latent_name, f'{sha256}.npz')): + records.append({'sha256': sha256, f'ss_latent_{latent_name}': True}) + sha256s.remove(sha256) + + # encode latents + load_queue = Queue(maxsize=4) + try: + with ThreadPoolExecutor(max_workers=32) as loader_executor, \ + ThreadPoolExecutor(max_workers=32) as saver_executor: + def loader(sha256): + try: + ss = get_voxels(sha256)[None].float() + load_queue.put((sha256, ss)) + except Exception as e: + print(f"Error loading features for {sha256}: {e}") + loader_executor.map(loader, sha256s) + + def saver(sha256, pack): + save_path = os.path.join(opt.output_dir, 'ss_latents', latent_name, f'{sha256}.npz') + np.savez_compressed(save_path, **pack) + records.append({'sha256': sha256, f'ss_latent_{latent_name}': True}) + + for _ in tqdm(range(len(sha256s)), desc="Extracting latents"): + sha256, ss = load_queue.get() + ss = ss.cuda().float() + latent = encoder(ss, sample_posterior=False) + assert torch.isfinite(latent).all(), "Non-finite latent" + pack = { + 'mean': latent[0].cpu().numpy(), + } + saver_executor.submit(saver, sha256, pack) + + saver_executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + records = pd.DataFrame.from_records(records) + records.to_csv(os.path.join(opt.output_dir, f'ss_latent_{latent_name}_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/dataset_toolkits/extract_feature.py b/third_party/TRELLIS/dataset_toolkits/extract_feature.py new file mode 100644 index 0000000000000000000000000000000000000000..1ded2fe03b3e4355da00af429c9439a68069308a --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/extract_feature.py @@ -0,0 +1,179 @@ +import os +import copy +import sys +import json +import importlib +import argparse +import torch +import torch.nn.functional as F +import numpy as np +import pandas as pd +import utils3d +from tqdm import tqdm +from easydict import EasyDict as edict +from concurrent.futures import ThreadPoolExecutor +from queue import Queue +from torchvision import transforms +from PIL import Image + + +torch.set_grad_enabled(False) + + +def get_data(frames, sha256): + with ThreadPoolExecutor(max_workers=16) as executor: + def worker(view): + image_path = os.path.join(opt.output_dir, 'renders', sha256, view['file_path']) + try: + image = Image.open(image_path) + except: + print(f"Error loading image {image_path}") + return None + image = image.resize((518, 518), Image.Resampling.LANCZOS) + image = np.array(image).astype(np.float32) / 255 + image = image[:, :, :3] * image[:, :, 3:] + image = torch.from_numpy(image).permute(2, 0, 1).float() + + c2w = torch.tensor(view['transform_matrix']) + c2w[:3, 1:3] *= -1 + extrinsics = torch.inverse(c2w) + fov = view['camera_angle_x'] + intrinsics = utils3d.torch.intrinsics_from_fov_xy(torch.tensor(fov), torch.tensor(fov)) + + return { + 'image': image, + 'extrinsics': extrinsics, + 'intrinsics': intrinsics + } + + datas = executor.map(worker, frames) + for data in datas: + if data is not None: + yield data + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--model', type=str, default='dinov2_vitl14_reg', + help='Feature extraction model') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--batch_size', type=int, default=16) + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + opt = parser.parse_args() + opt = edict(vars(opt)) + + feature_name = opt.model + os.makedirs(os.path.join(opt.output_dir, 'features', feature_name), exist_ok=True) + + # load model + dinov2_model = torch.hub.load('facebookresearch/dinov2', opt.model) + dinov2_model.eval().cuda() + transform = transforms.Compose([ + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + n_patch = 518 // 14 + + # get file list + if os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + else: + raise ValueError('metadata.csv not found') + if opt.instances is not None: + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + metadata = metadata[metadata['sha256'].isin(instances)] + else: + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + if f'feature_{feature_name}' in metadata.columns: + metadata = metadata[metadata[f'feature_{feature_name}'] == False] + metadata = metadata[metadata['voxelized'] == True] + metadata = metadata[metadata['rendered'] == True] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + sha256s = list(metadata['sha256'].values) + for sha256 in copy.copy(sha256s): + if os.path.exists(os.path.join(opt.output_dir, 'features', feature_name, f'{sha256}.npz')): + records.append({'sha256': sha256, f'feature_{feature_name}' : True}) + sha256s.remove(sha256) + + # extract features + load_queue = Queue(maxsize=4) + try: + with ThreadPoolExecutor(max_workers=8) as loader_executor, \ + ThreadPoolExecutor(max_workers=8) as saver_executor: + def loader(sha256): + try: + with open(os.path.join(opt.output_dir, 'renders', sha256, 'transforms.json'), 'r') as f: + metadata = json.load(f) + frames = metadata['frames'] + data = [] + for datum in get_data(frames, sha256): + datum['image'] = transform(datum['image']) + data.append(datum) + positions = utils3d.io.read_ply(os.path.join(opt.output_dir, 'voxels', f'{sha256}.ply'))[0] + load_queue.put((sha256, data, positions)) + except Exception as e: + print(f"Error loading data for {sha256}: {e}") + + loader_executor.map(loader, sha256s) + + def saver(sha256, pack, patchtokens, uv): + pack['patchtokens'] = F.grid_sample( + patchtokens, + uv.unsqueeze(1), + mode='bilinear', + align_corners=False, + ).squeeze(2).permute(0, 2, 1).cpu().numpy() + pack['patchtokens'] = np.mean(pack['patchtokens'], axis=0).astype(np.float16) + save_path = os.path.join(opt.output_dir, 'features', feature_name, f'{sha256}.npz') + np.savez_compressed(save_path, **pack) + records.append({'sha256': sha256, f'feature_{feature_name}' : True}) + + for _ in tqdm(range(len(sha256s)), desc="Extracting features"): + sha256, data, positions = load_queue.get() + positions = torch.from_numpy(positions).float().cuda() + indices = ((positions + 0.5) * 64).long() + assert torch.all(indices >= 0) and torch.all(indices < 64), "Some vertices are out of bounds" + n_views = len(data) + N = positions.shape[0] + pack = { + 'indices': indices.cpu().numpy().astype(np.uint8), + } + patchtokens_lst = [] + uv_lst = [] + for i in range(0, n_views, opt.batch_size): + batch_data = data[i:i+opt.batch_size] + bs = len(batch_data) + batch_images = torch.stack([d['image'] for d in batch_data]).cuda() + batch_extrinsics = torch.stack([d['extrinsics'] for d in batch_data]).cuda() + batch_intrinsics = torch.stack([d['intrinsics'] for d in batch_data]).cuda() + features = dinov2_model(batch_images, is_training=True) + uv = utils3d.torch.project_cv(positions, batch_extrinsics, batch_intrinsics)[0] * 2 - 1 + patchtokens = features['x_prenorm'][:, dinov2_model.num_register_tokens + 1:].permute(0, 2, 1).reshape(bs, 1024, n_patch, n_patch) + patchtokens_lst.append(patchtokens) + uv_lst.append(uv) + patchtokens = torch.cat(patchtokens_lst, dim=0) + uv = torch.cat(uv_lst, dim=0) + + # save features + saver_executor.submit(saver, sha256, pack, patchtokens, uv) + + saver_executor.shutdown(wait=True) + except: + print("Error happened during processing.") + + records = pd.DataFrame.from_records(records) + records.to_csv(os.path.join(opt.output_dir, f'feature_{feature_name}_{opt.rank}.csv'), index=False) + \ No newline at end of file diff --git a/third_party/TRELLIS/dataset_toolkits/render.py b/third_party/TRELLIS/dataset_toolkits/render.py new file mode 100644 index 0000000000000000000000000000000000000000..636f3b308fa9a33e1304b6b64370221167885118 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/render.py @@ -0,0 +1,121 @@ +import os +import json +import copy +import sys +import importlib +import argparse +import pandas as pd +from easydict import EasyDict as edict +from functools import partial +from subprocess import DEVNULL, call +import numpy as np +from utils import sphere_hammersley_sequence + + +BLENDER_LINK = 'https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +BLENDER_INSTALLATION_PATH = '/tmp' +BLENDER_PATH = f'{BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender' + +def _install_blender(): + if not os.path.exists(BLENDER_PATH): + os.system('sudo apt-get update') + os.system('sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6') + os.system(f'wget {BLENDER_LINK} -P {BLENDER_INSTALLATION_PATH}') + os.system(f'tar -xvf {BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz -C {BLENDER_INSTALLATION_PATH}') + + +def _render(file_path, sha256, output_dir, num_views): + output_folder = os.path.join(output_dir, 'renders', sha256) + + # Build camera {yaw, pitch, radius, fov} + yaws = [] + pitchs = [] + offset = (np.random.rand(), np.random.rand()) + for i in range(num_views): + y, p = sphere_hammersley_sequence(i, num_views, offset) + yaws.append(y) + pitchs.append(p) + radius = [2] * num_views + fov = [40 / 180 * np.pi] * num_views + views = [{'yaw': y, 'pitch': p, 'radius': r, 'fov': f} for y, p, r, f in zip(yaws, pitchs, radius, fov)] + + args = [ + BLENDER_PATH, '-b', '-P', os.path.join(os.path.dirname(__file__), 'blender_script', 'render.py'), + '--', + '--views', json.dumps(views), + '--object', os.path.expanduser(file_path), + '--resolution', '512', + '--output_folder', output_folder, + '--engine', 'CYCLES', + '--save_mesh', + ] + if file_path.endswith('.blend'): + args.insert(1, file_path) + + call(args, stdout=DEVNULL, stderr=DEVNULL) + + if os.path.exists(os.path.join(output_folder, 'transforms.json')): + return {'sha256': sha256, 'rendered': True} + + +if __name__ == '__main__': + dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}') + + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--num_views', type=int, default=150, + help='Number of views to render') + dataset_utils.add_args(parser) + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + parser.add_argument('--max_workers', type=int, default=8) + opt = parser.parse_args(sys.argv[2:]) + opt = edict(vars(opt)) + + os.makedirs(os.path.join(opt.output_dir, 'renders'), exist_ok=True) + + # install blender + print('Checking blender...', flush=True) + _install_blender() + + # get file list + if not os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + raise ValueError('metadata.csv not found') + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + if opt.instances is None: + metadata = metadata[metadata['local_path'].notna()] + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + if 'rendered' in metadata.columns: + metadata = metadata[metadata['rendered'] == False] + else: + if os.path.exists(opt.instances): + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + else: + instances = opt.instances.split(',') + metadata = metadata[metadata['sha256'].isin(instances)] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + for sha256 in copy.copy(metadata['sha256'].values): + if os.path.exists(os.path.join(opt.output_dir, 'renders', sha256, 'transforms.json')): + records.append({'sha256': sha256, 'rendered': True}) + metadata = metadata[metadata['sha256'] != sha256] + + print(f'Processing {len(metadata)} objects...') + + # process objects + func = partial(_render, output_dir=opt.output_dir, num_views=opt.num_views) + rendered = dataset_utils.foreach_instance(metadata, opt.output_dir, func, max_workers=opt.max_workers, desc='Rendering objects') + rendered = pd.concat([rendered, pd.DataFrame.from_records(records)]) + rendered.to_csv(os.path.join(opt.output_dir, f'rendered_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/dataset_toolkits/render_cond.py b/third_party/TRELLIS/dataset_toolkits/render_cond.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a40e6d9974a9e7256571d9ed636f4a5758b5c7 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/render_cond.py @@ -0,0 +1,125 @@ +import os +import json +import copy +import sys +import importlib +import argparse +import pandas as pd +from easydict import EasyDict as edict +from functools import partial +from subprocess import DEVNULL, call +import numpy as np +from utils import sphere_hammersley_sequence + + +BLENDER_LINK = 'https://download.blender.org/release/Blender3.0/blender-3.0.1-linux-x64.tar.xz' +BLENDER_INSTALLATION_PATH = '/tmp' +BLENDER_PATH = f'{BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64/blender' + +def _install_blender(): + if not os.path.exists(BLENDER_PATH): + os.system('sudo apt-get update') + os.system('sudo apt-get install -y libxrender1 libxi6 libxkbcommon-x11-0 libsm6') + os.system(f'wget {BLENDER_LINK} -P {BLENDER_INSTALLATION_PATH}') + os.system(f'tar -xvf {BLENDER_INSTALLATION_PATH}/blender-3.0.1-linux-x64.tar.xz -C {BLENDER_INSTALLATION_PATH}') + + +def _render_cond(file_path, sha256, output_dir, num_views): + output_folder = os.path.join(output_dir, 'renders_cond', sha256) + + # Build camera {yaw, pitch, radius, fov} + yaws = [] + pitchs = [] + offset = (np.random.rand(), np.random.rand()) + for i in range(num_views): + y, p = sphere_hammersley_sequence(i, num_views, offset) + yaws.append(y) + pitchs.append(p) + fov_min, fov_max = 10, 70 + radius_min = np.sqrt(3) / 2 / np.sin(fov_max / 360 * np.pi) + radius_max = np.sqrt(3) / 2 / np.sin(fov_min / 360 * np.pi) + k_min = 1 / radius_max**2 + k_max = 1 / radius_min**2 + ks = np.random.uniform(k_min, k_max, (1000000,)) + radius = [1 / np.sqrt(k) for k in ks] + fov = [2 * np.arcsin(np.sqrt(3) / 2 / r) for r in radius] + views = [{'yaw': y, 'pitch': p, 'radius': r, 'fov': f} for y, p, r, f in zip(yaws, pitchs, radius, fov)] + + args = [ + BLENDER_PATH, '-b', '-P', os.path.join(os.path.dirname(__file__), 'blender_script', 'render.py'), + '--', + '--views', json.dumps(views), + '--object', os.path.expanduser(file_path), + '--output_folder', os.path.expanduser(output_folder), + '--resolution', '1024', + ] + if file_path.endswith('.blend'): + args.insert(1, file_path) + + call(args, stdout=DEVNULL) + + if os.path.exists(os.path.join(output_folder, 'transforms.json')): + return {'sha256': sha256, 'cond_rendered': True} + + +if __name__ == '__main__': + dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}') + + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--num_views', type=int, default=24, + help='Number of views to render') + dataset_utils.add_args(parser) + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + parser.add_argument('--max_workers', type=int, default=8) + opt = parser.parse_args(sys.argv[2:]) + opt = edict(vars(opt)) + + os.makedirs(os.path.join(opt.output_dir, 'renders_cond'), exist_ok=True) + + # install blender + print('Checking blender...', flush=True) + _install_blender() + + # get file list + if not os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + raise ValueError('metadata.csv not found') + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + if opt.instances is None: + metadata = metadata[metadata['local_path'].notna()] + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + if 'cond_rendered' in metadata.columns: + metadata = metadata[metadata['cond_rendered'] == False] + else: + if os.path.exists(opt.instances): + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + else: + instances = opt.instances.split(',') + metadata = metadata[metadata['sha256'].isin(instances)] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + for sha256 in copy.copy(metadata['sha256'].values): + if os.path.exists(os.path.join(opt.output_dir, 'renders_cond', sha256, 'transforms.json')): + records.append({'sha256': sha256, 'cond_rendered': True}) + metadata = metadata[metadata['sha256'] != sha256] + + print(f'Processing {len(metadata)} objects...') + + # process objects + func = partial(_render_cond, output_dir=opt.output_dir, num_views=opt.num_views) + cond_rendered = dataset_utils.foreach_instance(metadata, opt.output_dir, func, max_workers=opt.max_workers, desc='Rendering objects') + cond_rendered = pd.concat([cond_rendered, pd.DataFrame.from_records(records)]) + cond_rendered.to_csv(os.path.join(opt.output_dir, f'cond_rendered_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/dataset_toolkits/setup.sh b/third_party/TRELLIS/dataset_toolkits/setup.sh new file mode 100644 index 0000000000000000000000000000000000000000..e387ea4fe1f7d6b68ee7f5ccf07043f80bd624f4 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/setup.sh @@ -0,0 +1 @@ +pip install pillow imageio imageio-ffmpeg tqdm easydict opencv-python-headless pandas open3d objaverse huggingface_hub diff --git a/third_party/TRELLIS/dataset_toolkits/stat_latent.py b/third_party/TRELLIS/dataset_toolkits/stat_latent.py new file mode 100644 index 0000000000000000000000000000000000000000..7f27a062bb8be898ef6c705a3f3d9488f94299fe --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/stat_latent.py @@ -0,0 +1,66 @@ +import os +import json +import argparse +import numpy as np +import pandas as pd +from tqdm import tqdm +from easydict import EasyDict as edict +from concurrent.futures import ThreadPoolExecutor + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--model', type=str, default='dinov2_vitl14_reg_slat_enc_swin8_B_64l8_fp16', + help='Latent model to use') + parser.add_argument('--num_samples', type=int, default=50000, + help='Number of samples to use for calculating stats') + opt = parser.parse_args() + opt = edict(vars(opt)) + + # get file list + if os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + else: + raise ValueError('metadata.csv not found') + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + metadata = metadata[metadata[f'latent_{opt.model}'] == True] + sha256s = metadata['sha256'].values + sha256s = np.random.choice(sha256s, min(opt.num_samples, len(sha256s)), replace=False) + + # stats + means = [] + mean2s = [] + with ThreadPoolExecutor(max_workers=16) as executor, \ + tqdm(total=len(sha256s), desc="Extracting features") as pbar: + def worker(sha256): + try: + feats = np.load(os.path.join(opt.output_dir, 'latents', opt.model, f'{sha256}.npz')) + feats = feats['feats'] + means.append(feats.mean(axis=0)) + mean2s.append((feats ** 2).mean(axis=0)) + pbar.update() + except Exception as e: + print(f"Error extracting features for {sha256}: {e}") + pbar.update() + + executor.map(worker, sha256s) + executor.shutdown(wait=True) + + mean = np.array(means).mean(axis=0) + mean2 = np.array(mean2s).mean(axis=0) + std = np.sqrt(mean2 - mean ** 2) + + print('mean:', mean) + print('std:', std) + + with open(os.path.join(opt.output_dir, 'latents', opt.model, 'stats.json'), 'w') as f: + json.dump({ + 'mean': mean.tolist(), + 'std': std.tolist(), + }, f, indent=4) + \ No newline at end of file diff --git a/third_party/TRELLIS/dataset_toolkits/utils.py b/third_party/TRELLIS/dataset_toolkits/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..69830845d40697bd2c4e7c68e64e54b4d4091a81 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/utils.py @@ -0,0 +1,43 @@ +from typing import * +import hashlib +import numpy as np + + +def get_file_hash(file: str) -> str: + sha256 = hashlib.sha256() + # Read the file from the path + with open(file, "rb") as f: + # Update the hash with the file content + for byte_block in iter(lambda: f.read(4096), b""): + sha256.update(byte_block) + return sha256.hexdigest() + +# ===============LOW DISCREPANCY SEQUENCES================ + +PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53] + +def radical_inverse(base, n): + val = 0 + inv_base = 1.0 / base + inv_base_n = inv_base + while n > 0: + digit = n % base + val += digit * inv_base_n + n //= base + inv_base_n *= inv_base + return val + +def halton_sequence(dim, n): + return [radical_inverse(PRIMES[dim], n) for dim in range(dim)] + +def hammersley_sequence(dim, n, num_samples): + return [n / num_samples] + halton_sequence(dim - 1, n) + +def sphere_hammersley_sequence(n, num_samples, offset=(0, 0)): + u, v = hammersley_sequence(2, n, num_samples) + u += offset[0] / num_samples + v += offset[1] + u = 2 * u if u < 0.25 else 2 / 3 * u + 1 / 3 + theta = np.arccos(1 - 2 * u) - np.pi / 2 + phi = v * 2 * np.pi + return [phi, theta] diff --git a/third_party/TRELLIS/dataset_toolkits/voxelize.py b/third_party/TRELLIS/dataset_toolkits/voxelize.py new file mode 100644 index 0000000000000000000000000000000000000000..479632774d99cef6742b1523d0f25e0f2833b6a6 --- /dev/null +++ b/third_party/TRELLIS/dataset_toolkits/voxelize.py @@ -0,0 +1,86 @@ +import os +import copy +import sys +import importlib +import argparse +import pandas as pd +from easydict import EasyDict as edict +from functools import partial +import numpy as np +import open3d_pycg as o3d +import utils3d + + +def _voxelize(file, sha256, output_dir): + mesh = o3d.io.read_triangle_mesh(os.path.join(output_dir, 'renders', sha256, 'mesh.ply')) + # clamp vertices to the range [-0.5, 0.5] + vertices = np.clip(np.asarray(mesh.vertices), -0.5 + 1e-6, 0.5 - 1e-6) + mesh.vertices = o3d.utility.Vector3dVector(vertices) + voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(mesh, voxel_size=1/64, min_bound=(-0.5, -0.5, -0.5), max_bound=(0.5, 0.5, 0.5)) + vertices = np.array([voxel.grid_index for voxel in voxel_grid.get_voxels()]) + assert np.all(vertices >= 0) and np.all(vertices < 64), "Some vertices are out of bounds" + vertices = (vertices + 0.5) / 64 - 0.5 + utils3d.io.write_ply(os.path.join(output_dir, 'voxels', f'{sha256}.ply'), vertices) + return {'sha256': sha256, 'voxelized': True, 'num_voxels': len(vertices)} + + +if __name__ == '__main__': + dataset_utils = importlib.import_module(f'datasets.{sys.argv[1]}') + + parser = argparse.ArgumentParser() + parser.add_argument('--output_dir', type=str, required=True, + help='Directory to save the metadata') + parser.add_argument('--filter_low_aesthetic_score', type=float, default=None, + help='Filter objects with aesthetic score lower than this value') + parser.add_argument('--instances', type=str, default=None, + help='Instances to process') + parser.add_argument('--num_views', type=int, default=150, + help='Number of views to render') + dataset_utils.add_args(parser) + parser.add_argument('--rank', type=int, default=0) + parser.add_argument('--world_size', type=int, default=1) + parser.add_argument('--max_workers', type=int, default=None) + opt = parser.parse_args(sys.argv[2:]) + opt = edict(vars(opt)) + + os.makedirs(os.path.join(opt.output_dir, 'voxels'), exist_ok=True) + + # get file list + if not os.path.exists(os.path.join(opt.output_dir, 'metadata.csv')): + raise ValueError('metadata.csv not found') + metadata = pd.read_csv(os.path.join(opt.output_dir, 'metadata.csv')) + if opt.instances is None: + if opt.filter_low_aesthetic_score is not None: + metadata = metadata[metadata['aesthetic_score'] >= opt.filter_low_aesthetic_score] + if 'rendered' not in metadata.columns: + raise ValueError('metadata.csv does not have "rendered" column, please run "build_metadata.py" first') + metadata = metadata[metadata['rendered'] == True] + if 'voxelized' in metadata.columns: + metadata = metadata[metadata['voxelized'] == False] + else: + if os.path.exists(opt.instances): + with open(opt.instances, 'r') as f: + instances = f.read().splitlines() + else: + instances = opt.instances.split(',') + metadata = metadata[metadata['sha256'].isin(instances)] + + start = len(metadata) * opt.rank // opt.world_size + end = len(metadata) * (opt.rank + 1) // opt.world_size + metadata = metadata[start:end] + records = [] + + # filter out objects that are already processed + for sha256 in copy.copy(metadata['sha256'].values): + if os.path.exists(os.path.join(opt.output_dir, 'voxels', f'{sha256}.ply')): + pts = utils3d.io.read_ply(os.path.join(opt.output_dir, 'voxels', f'{sha256}.ply'))[0] + records.append({'sha256': sha256, 'voxelized': True, 'num_voxels': len(pts)}) + metadata = metadata[metadata['sha256'] != sha256] + + print(f'Processing {len(metadata)} objects...') + + # process objects + func = partial(_voxelize, output_dir=opt.output_dir) + voxelized = dataset_utils.foreach_instance(metadata, opt.output_dir, func, max_workers=opt.max_workers, desc='Voxelizing') + voxelized = pd.concat([voxelized, pd.DataFrame.from_records(records)]) + voxelized.to_csv(os.path.join(opt.output_dir, f'voxelized_{opt.rank}.csv'), index=False) diff --git a/third_party/TRELLIS/extensions/vox2seq/benchmark.py b/third_party/TRELLIS/extensions/vox2seq/benchmark.py new file mode 100755 index 0000000000000000000000000000000000000000..30351e0251cfed4db82d286af1b53654f8cdce8b --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/benchmark.py @@ -0,0 +1,45 @@ +import time +import torch +import vox2seq + + +if __name__ == "__main__": + stats = { + 'z_order_cuda': [], + 'z_order_pytorch': [], + 'hilbert_cuda': [], + 'hilbert_pytorch': [], + } + RES = [16, 32, 64, 128, 256] + for res in RES: + coords = torch.meshgrid(torch.arange(res), torch.arange(res), torch.arange(res)) + coords = torch.stack(coords, dim=-1).reshape(-1, 3).int().cuda() + + start = time.time() + for _ in range(100): + code_z_cuda = vox2seq.encode(coords, mode='z_order').cuda() + torch.cuda.synchronize() + stats['z_order_cuda'].append((time.time() - start) / 100) + + start = time.time() + for _ in range(100): + code_z_pytorch = vox2seq.pytorch.encode(coords, mode='z_order').cuda() + torch.cuda.synchronize() + stats['z_order_pytorch'].append((time.time() - start) / 100) + + start = time.time() + for _ in range(100): + code_h_cuda = vox2seq.encode(coords, mode='hilbert').cuda() + torch.cuda.synchronize() + stats['hilbert_cuda'].append((time.time() - start) / 100) + + start = time.time() + for _ in range(100): + code_h_pytorch = vox2seq.pytorch.encode(coords, mode='hilbert').cuda() + torch.cuda.synchronize() + stats['hilbert_pytorch'].append((time.time() - start) / 100) + + print(f"{'Resolution':<12}{'Z-Order (CUDA)':<24}{'Z-Order (PyTorch)':<24}{'Hilbert (CUDA)':<24}{'Hilbert (PyTorch)':<24}") + for res, z_order_cuda, z_order_pytorch, hilbert_cuda, hilbert_pytorch in zip(RES, stats['z_order_cuda'], stats['z_order_pytorch'], stats['hilbert_cuda'], stats['hilbert_pytorch']): + print(f"{res:<12}{z_order_cuda:<24.6f}{z_order_pytorch:<24.6f}{hilbert_cuda:<24.6f}{hilbert_pytorch:<24.6f}") + diff --git a/third_party/TRELLIS/extensions/vox2seq/setup.py b/third_party/TRELLIS/extensions/vox2seq/setup.py new file mode 100755 index 0000000000000000000000000000000000000000..500d97b7c2c69e2ced48a9286b1b030976f2fb47 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/setup.py @@ -0,0 +1,34 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +from setuptools import setup +from torch.utils.cpp_extension import CUDAExtension, BuildExtension +import os +os.path.dirname(os.path.abspath(__file__)) + +setup( + name="vox2seq", + packages=['vox2seq', 'vox2seq.pytorch'], + ext_modules=[ + CUDAExtension( + name="vox2seq._C", + sources=[ + "src/api.cu", + "src/z_order.cu", + "src/hilbert.cu", + "src/ext.cpp", + ], + ) + ], + cmdclass={ + 'build_ext': BuildExtension + } +) diff --git a/third_party/TRELLIS/extensions/vox2seq/src/api.cu b/third_party/TRELLIS/extensions/vox2seq/src/api.cu new file mode 100755 index 0000000000000000000000000000000000000000..072e930f90278f2f407b45750220d7d98c37b91e --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/api.cu @@ -0,0 +1,92 @@ +#include +#include "api.h" +#include "z_order.h" +#include "hilbert.h" + + +torch::Tensor +z_order_encode( + const torch::Tensor& x, + const torch::Tensor& y, + const torch::Tensor& z +) { + // Allocate output tensor + torch::Tensor codes = torch::empty_like(x); + + // Call CUDA kernel + z_order_encode_cuda<<<(x.size(0) + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>( + x.size(0), + reinterpret_cast(x.contiguous().data_ptr()), + reinterpret_cast(y.contiguous().data_ptr()), + reinterpret_cast(z.contiguous().data_ptr()), + reinterpret_cast(codes.data_ptr()) + ); + + return codes; +} + + +std::tuple +z_order_decode( + const torch::Tensor& codes +) { + // Allocate output tensors + torch::Tensor x = torch::empty_like(codes); + torch::Tensor y = torch::empty_like(codes); + torch::Tensor z = torch::empty_like(codes); + + // Call CUDA kernel + z_order_decode_cuda<<<(codes.size(0) + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>( + codes.size(0), + reinterpret_cast(codes.contiguous().data_ptr()), + reinterpret_cast(x.data_ptr()), + reinterpret_cast(y.data_ptr()), + reinterpret_cast(z.data_ptr()) + ); + + return std::make_tuple(x, y, z); +} + + +torch::Tensor +hilbert_encode( + const torch::Tensor& x, + const torch::Tensor& y, + const torch::Tensor& z +) { + // Allocate output tensor + torch::Tensor codes = torch::empty_like(x); + + // Call CUDA kernel + hilbert_encode_cuda<<<(x.size(0) + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>( + x.size(0), + reinterpret_cast(x.contiguous().data_ptr()), + reinterpret_cast(y.contiguous().data_ptr()), + reinterpret_cast(z.contiguous().data_ptr()), + reinterpret_cast(codes.data_ptr()) + ); + + return codes; +} + + +std::tuple +hilbert_decode( + const torch::Tensor& codes +) { + // Allocate output tensors + torch::Tensor x = torch::empty_like(codes); + torch::Tensor y = torch::empty_like(codes); + torch::Tensor z = torch::empty_like(codes); + + // Call CUDA kernel + hilbert_decode_cuda<<<(codes.size(0) + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>( + codes.size(0), + reinterpret_cast(codes.contiguous().data_ptr()), + reinterpret_cast(x.data_ptr()), + reinterpret_cast(y.data_ptr()), + reinterpret_cast(z.data_ptr()) + ); + + return std::make_tuple(x, y, z); +} diff --git a/third_party/TRELLIS/extensions/vox2seq/src/api.h b/third_party/TRELLIS/extensions/vox2seq/src/api.h new file mode 100755 index 0000000000000000000000000000000000000000..26a68348d56585d0e9e1dfb4900a0d23587df9a6 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/api.h @@ -0,0 +1,76 @@ +/* + * Serialize a voxel grid + * + * Copyright (C) 2024, Jianfeng XIANG + * All rights reserved. + * + * Licensed under The MIT License [see LICENSE for details] + * + * Written by Jianfeng XIANG + */ + +#pragma once +#include + + +#define BLOCK_SIZE 256 + + +/** + * Z-order encode 3D points + * + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + * + * @return [N] tensor containing the z-order encoded values + */ +torch::Tensor +z_order_encode( + const torch::Tensor& x, + const torch::Tensor& y, + const torch::Tensor& z +); + + +/** + * Z-order decode 3D points + * + * @param codes [N] tensor containing the z-order encoded values + * + * @return 3 tensors [N] containing the x, y, z coordinates + */ +std::tuple +z_order_decode( + const torch::Tensor& codes +); + + +/** + * Hilbert encode 3D points + * + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + * + * @return [N] tensor containing the Hilbert encoded values + */ +torch::Tensor +hilbert_encode( + const torch::Tensor& x, + const torch::Tensor& y, + const torch::Tensor& z +); + + +/** + * Hilbert decode 3D points + * + * @param codes [N] tensor containing the Hilbert encoded values + * + * @return 3 tensors [N] containing the x, y, z coordinates + */ +std::tuple +hilbert_decode( + const torch::Tensor& codes +); diff --git a/third_party/TRELLIS/extensions/vox2seq/src/ext.cpp b/third_party/TRELLIS/extensions/vox2seq/src/ext.cpp new file mode 100755 index 0000000000000000000000000000000000000000..72e76d3b361eb8f355760f067f71005d4e37902c --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/ext.cpp @@ -0,0 +1,10 @@ +#include +#include "api.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("z_order_encode", &z_order_encode); + m.def("z_order_decode", &z_order_decode); + m.def("hilbert_encode", &hilbert_encode); + m.def("hilbert_decode", &hilbert_decode); +} \ No newline at end of file diff --git a/third_party/TRELLIS/extensions/vox2seq/src/hilbert.cu b/third_party/TRELLIS/extensions/vox2seq/src/hilbert.cu new file mode 100755 index 0000000000000000000000000000000000000000..b3c5bb19474a528cf6f3102e728fa7550588ca61 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/hilbert.cu @@ -0,0 +1,133 @@ +#include +#include +#include + +#include +#include +namespace cg = cooperative_groups; + +#include "hilbert.h" + + +// Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit. +static __device__ uint32_t expandBits(uint32_t v) +{ + v = (v * 0x00010001u) & 0xFF0000FFu; + v = (v * 0x00000101u) & 0x0F00F00Fu; + v = (v * 0x00000011u) & 0xC30C30C3u; + v = (v * 0x00000005u) & 0x49249249u; + return v; +} + + +// Removes 2 zeros after each bit in a 30-bit integer. +static __device__ uint32_t extractBits(uint32_t v) +{ + v = v & 0x49249249; + v = (v ^ (v >> 2)) & 0x030C30C3u; + v = (v ^ (v >> 4)) & 0x0300F00Fu; + v = (v ^ (v >> 8)) & 0x030000FFu; + v = (v ^ (v >> 16)) & 0x000003FFu; + return v; +} + + +__global__ void hilbert_encode_cuda( + size_t N, + const uint32_t* x, + const uint32_t* y, + const uint32_t* z, + uint32_t* codes +) { + size_t thread_id = cg::this_grid().thread_rank(); + if (thread_id >= N) return; + + uint32_t point[3] = {x[thread_id], y[thread_id], z[thread_id]}; + + uint32_t m = 1 << 9, q, p, t; + + // Inverse undo excess work + q = m; + while (q > 1) { + p = q - 1; + for (int i = 0; i < 3; i++) { + if (point[i] & q) { + point[0] ^= p; // invert + } else { + t = (point[0] ^ point[i]) & p; + point[0] ^= t; + point[i] ^= t; + } + } + q >>= 1; + } + + // Gray encode + for (int i = 1; i < 3; i++) { + point[i] ^= point[i - 1]; + } + t = 0; + q = m; + while (q > 1) { + if (point[2] & q) { + t ^= q - 1; + } + q >>= 1; + } + for (int i = 0; i < 3; i++) { + point[i] ^= t; + } + + // Convert to 3D Hilbert code + uint32_t xx = expandBits(point[0]); + uint32_t yy = expandBits(point[1]); + uint32_t zz = expandBits(point[2]); + + codes[thread_id] = xx * 4 + yy * 2 + zz; +} + + +__global__ void hilbert_decode_cuda( + size_t N, + const uint32_t* codes, + uint32_t* x, + uint32_t* y, + uint32_t* z +) { + size_t thread_id = cg::this_grid().thread_rank(); + if (thread_id >= N) return; + + uint32_t point[3]; + point[0] = extractBits(codes[thread_id] >> 2); + point[1] = extractBits(codes[thread_id] >> 1); + point[2] = extractBits(codes[thread_id]); + + uint32_t m = 2 << 9, q, p, t; + + // Gray decode by H ^ (H/2) + t = point[2] >> 1; + for (int i = 2; i > 0; i--) { + point[i] ^= point[i - 1]; + } + point[0] ^= t; + + // Undo excess work + q = 2; + while (q != m) { + p = q - 1; + for (int i = 2; i >= 0; i--) { + if (point[i] & q) { + point[0] ^= p; + } else { + t = (point[0] ^ point[i]) & p; + point[0] ^= t; + point[i] ^= t; + } + } + q <<= 1; + } + + x[thread_id] = point[0]; + y[thread_id] = point[1]; + z[thread_id] = point[2]; +} diff --git a/third_party/TRELLIS/extensions/vox2seq/src/hilbert.h b/third_party/TRELLIS/extensions/vox2seq/src/hilbert.h new file mode 100755 index 0000000000000000000000000000000000000000..4896bf6006f43e5e527d8bde691ce7a54b38c4d7 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/hilbert.h @@ -0,0 +1,35 @@ +#pragma once + +/** + * Hilbert encode 3D points + * + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + * + * @return [N] tensor containing the z-order encoded values + */ +__global__ void hilbert_encode_cuda( + size_t N, + const uint32_t* x, + const uint32_t* y, + const uint32_t* z, + uint32_t* codes +); + + +/** + * Hilbert decode 3D points + * + * @param codes [N] tensor containing the z-order encoded values + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + */ +__global__ void hilbert_decode_cuda( + size_t N, + const uint32_t* codes, + uint32_t* x, + uint32_t* y, + uint32_t* z +); diff --git a/third_party/TRELLIS/extensions/vox2seq/src/z_order.cu b/third_party/TRELLIS/extensions/vox2seq/src/z_order.cu new file mode 100755 index 0000000000000000000000000000000000000000..ba6f5a91e55588d1ca4bea7cb45e6936330a694b --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/z_order.cu @@ -0,0 +1,66 @@ +#include +#include +#include + +#include +#include +namespace cg = cooperative_groups; + +#include "z_order.h" + + +// Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit. +static __device__ uint32_t expandBits(uint32_t v) +{ + v = (v * 0x00010001u) & 0xFF0000FFu; + v = (v * 0x00000101u) & 0x0F00F00Fu; + v = (v * 0x00000011u) & 0xC30C30C3u; + v = (v * 0x00000005u) & 0x49249249u; + return v; +} + + +// Removes 2 zeros after each bit in a 30-bit integer. +static __device__ uint32_t extractBits(uint32_t v) +{ + v = v & 0x49249249; + v = (v ^ (v >> 2)) & 0x030C30C3u; + v = (v ^ (v >> 4)) & 0x0300F00Fu; + v = (v ^ (v >> 8)) & 0x030000FFu; + v = (v ^ (v >> 16)) & 0x000003FFu; + return v; +} + + +__global__ void z_order_encode_cuda( + size_t N, + const uint32_t* x, + const uint32_t* y, + const uint32_t* z, + uint32_t* codes +) { + size_t thread_id = cg::this_grid().thread_rank(); + if (thread_id >= N) return; + + uint32_t xx = expandBits(x[thread_id]); + uint32_t yy = expandBits(y[thread_id]); + uint32_t zz = expandBits(z[thread_id]); + + codes[thread_id] = xx * 4 + yy * 2 + zz; +} + + +__global__ void z_order_decode_cuda( + size_t N, + const uint32_t* codes, + uint32_t* x, + uint32_t* y, + uint32_t* z +) { + size_t thread_id = cg::this_grid().thread_rank(); + if (thread_id >= N) return; + + x[thread_id] = extractBits(codes[thread_id] >> 2); + y[thread_id] = extractBits(codes[thread_id] >> 1); + z[thread_id] = extractBits(codes[thread_id]); +} diff --git a/third_party/TRELLIS/extensions/vox2seq/src/z_order.h b/third_party/TRELLIS/extensions/vox2seq/src/z_order.h new file mode 100755 index 0000000000000000000000000000000000000000..a4aa857d064e375c8f2eb023abd9ac4af5a2d8f5 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/src/z_order.h @@ -0,0 +1,35 @@ +#pragma once + +/** + * Z-order encode 3D points + * + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + * + * @return [N] tensor containing the z-order encoded values + */ +__global__ void z_order_encode_cuda( + size_t N, + const uint32_t* x, + const uint32_t* y, + const uint32_t* z, + uint32_t* codes +); + + +/** + * Z-order decode 3D points + * + * @param codes [N] tensor containing the z-order encoded values + * @param x [N] tensor containing the x coordinates + * @param y [N] tensor containing the y coordinates + * @param z [N] tensor containing the z coordinates + */ +__global__ void z_order_decode_cuda( + size_t N, + const uint32_t* codes, + uint32_t* x, + uint32_t* y, + uint32_t* z +); diff --git a/third_party/TRELLIS/extensions/vox2seq/test.py b/third_party/TRELLIS/extensions/vox2seq/test.py new file mode 100755 index 0000000000000000000000000000000000000000..60f4fc0ae1aa0ff55744455e66b0854706908506 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/test.py @@ -0,0 +1,25 @@ +import torch +import vox2seq + + +if __name__ == "__main__": + RES = 256 + coords = torch.meshgrid(torch.arange(RES), torch.arange(RES), torch.arange(RES)) + coords = torch.stack(coords, dim=-1).reshape(-1, 3).int().cuda() + code_z_cuda = vox2seq.encode(coords, mode='z_order') + code_z_pytorch = vox2seq.pytorch.encode(coords, mode='z_order') + code_h_cuda = vox2seq.encode(coords, mode='hilbert') + code_h_pytorch = vox2seq.pytorch.encode(coords, mode='hilbert') + assert torch.equal(code_z_cuda, code_z_pytorch) + assert torch.equal(code_h_cuda, code_h_pytorch) + + code = torch.arange(RES**3).int().cuda() + coords_z_cuda = vox2seq.decode(code, mode='z_order') + coords_z_pytorch = vox2seq.pytorch.decode(code, mode='z_order') + coords_h_cuda = vox2seq.decode(code, mode='hilbert') + coords_h_pytorch = vox2seq.pytorch.decode(code, mode='hilbert') + assert torch.equal(coords_z_cuda, coords_z_pytorch) + assert torch.equal(coords_h_cuda, coords_h_pytorch) + + print("All tests passed.") + diff --git a/third_party/TRELLIS/extensions/vox2seq/vox2seq/__init__.py b/third_party/TRELLIS/extensions/vox2seq/vox2seq/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..ba13bb5f46b2ba90c7882f23b7d97c3b6fe960ac --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/vox2seq/__init__.py @@ -0,0 +1,50 @@ + +from typing import * +import torch +from . import _C +from . import pytorch + + +@torch.no_grad() +def encode(coords: torch.Tensor, permute: List[int] = [0, 1, 2], mode: Literal['z_order', 'hilbert'] = 'z_order') -> torch.Tensor: + """ + Encodes 3D coordinates into a 30-bit code. + + Args: + coords: a tensor of shape [N, 3] containing the 3D coordinates. + permute: the permutation of the coordinates. + mode: the encoding mode to use. + """ + assert coords.shape[-1] == 3 and coords.ndim == 2, "Input coordinates must be of shape [N, 3]" + x = coords[:, permute[0]].int() + y = coords[:, permute[1]].int() + z = coords[:, permute[2]].int() + if mode == 'z_order': + return _C.z_order_encode(x, y, z) + elif mode == 'hilbert': + return _C.hilbert_encode(x, y, z) + else: + raise ValueError(f"Unknown encoding mode: {mode}") + + +@torch.no_grad() +def decode(code: torch.Tensor, permute: List[int] = [0, 1, 2], mode: Literal['z_order', 'hilbert'] = 'z_order') -> torch.Tensor: + """ + Decodes a 30-bit code into 3D coordinates. + + Args: + code: a tensor of shape [N] containing the 30-bit code. + permute: the permutation of the coordinates. + mode: the decoding mode to use. + """ + assert code.ndim == 1, "Input code must be of shape [N]" + if mode == 'z_order': + coords = _C.z_order_decode(code) + elif mode == 'hilbert': + coords = _C.hilbert_decode(code) + else: + raise ValueError(f"Unknown decoding mode: {mode}") + x = coords[permute.index(0)] + y = coords[permute.index(1)] + z = coords[permute.index(2)] + return torch.stack([x, y, z], dim=-1) diff --git a/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/__init__.py b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..25c74c42feb802b24eee9a1bc8744040468c927d --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/__init__.py @@ -0,0 +1,48 @@ +import torch +from typing import * + +from .default import ( + encode, + decode, + z_order_encode, + z_order_decode, + hilbert_encode, + hilbert_decode, +) + + +@torch.no_grad() +def encode(coords: torch.Tensor, permute: List[int] = [0, 1, 2], mode: Literal['z_order', 'hilbert'] = 'z_order') -> torch.Tensor: + """ + Encodes 3D coordinates into a 30-bit code. + + Args: + coords: a tensor of shape [N, 3] containing the 3D coordinates. + permute: the permutation of the coordinates. + mode: the encoding mode to use. + """ + if mode == 'z_order': + return z_order_encode(coords[:, permute], depth=10).int() + elif mode == 'hilbert': + return hilbert_encode(coords[:, permute], depth=10).int() + else: + raise ValueError(f"Unknown encoding mode: {mode}") + + +@torch.no_grad() +def decode(code: torch.Tensor, permute: List[int] = [0, 1, 2], mode: Literal['z_order', 'hilbert'] = 'z_order') -> torch.Tensor: + """ + Decodes a 30-bit code into 3D coordinates. + + Args: + code: a tensor of shape [N] containing the 30-bit code. + permute: the permutation of the coordinates. + mode: the decoding mode to use. + """ + if mode == 'z_order': + return z_order_decode(code, depth=10)[:, permute].float() + elif mode == 'hilbert': + return hilbert_decode(code, depth=10)[:, permute].float() + else: + raise ValueError(f"Unknown decoding mode: {mode}") + \ No newline at end of file diff --git a/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/default.py b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/default.py new file mode 100755 index 0000000000000000000000000000000000000000..906f9bfbe80fcf71977ca774b6491ff63a1ee43b --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/default.py @@ -0,0 +1,59 @@ +import torch +from .z_order import xyz2key as z_order_encode_ +from .z_order import key2xyz as z_order_decode_ +from .hilbert import encode as hilbert_encode_ +from .hilbert import decode as hilbert_decode_ + + +@torch.inference_mode() +def encode(grid_coord, batch=None, depth=16, order="z"): + assert order in {"z", "z-trans", "hilbert", "hilbert-trans"} + if order == "z": + code = z_order_encode(grid_coord, depth=depth) + elif order == "z-trans": + code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth) + elif order == "hilbert": + code = hilbert_encode(grid_coord, depth=depth) + elif order == "hilbert-trans": + code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth) + else: + raise NotImplementedError + if batch is not None: + batch = batch.long() + code = batch << depth * 3 | code + return code + + +@torch.inference_mode() +def decode(code, depth=16, order="z"): + assert order in {"z", "hilbert"} + batch = code >> depth * 3 + code = code & ((1 << depth * 3) - 1) + if order == "z": + grid_coord = z_order_decode(code, depth=depth) + elif order == "hilbert": + grid_coord = hilbert_decode(code, depth=depth) + else: + raise NotImplementedError + return grid_coord, batch + + +def z_order_encode(grid_coord: torch.Tensor, depth: int = 16): + x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long() + # we block the support to batch, maintain batched code in Point class + code = z_order_encode_(x, y, z, b=None, depth=depth) + return code + + +def z_order_decode(code: torch.Tensor, depth): + x, y, z, _ = z_order_decode_(code, depth=depth) + grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3) + return grid_coord + + +def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16): + return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth) + + +def hilbert_decode(code: torch.Tensor, depth: int = 16): + return hilbert_decode_(code, num_dims=3, num_bits=depth) \ No newline at end of file diff --git a/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/hilbert.py b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/hilbert.py new file mode 100755 index 0000000000000000000000000000000000000000..c3fb6565ff855c50553d6215eb74407f88b43a01 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/hilbert.py @@ -0,0 +1,303 @@ +""" +Hilbert Order +Modified from https://github.com/PrincetonLIPS/numpy-hilbert-curve + +Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com), Kaixin Xu +Please cite our work if the code is helpful to you. +""" + +import torch + + +def right_shift(binary, k=1, axis=-1): + """Right shift an array of binary values. + + Parameters: + ----------- + binary: An ndarray of binary values. + + k: The number of bits to shift. Default 1. + + axis: The axis along which to shift. Default -1. + + Returns: + -------- + Returns an ndarray with zero prepended and the ends truncated, along + whatever axis was specified.""" + + # If we're shifting the whole thing, just return zeros. + if binary.shape[axis] <= k: + return torch.zeros_like(binary) + + # Determine the padding pattern. + # padding = [(0,0)] * len(binary.shape) + # padding[axis] = (k,0) + + # Determine the slicing pattern to eliminate just the last one. + slicing = [slice(None)] * len(binary.shape) + slicing[axis] = slice(None, -k) + shifted = torch.nn.functional.pad( + binary[tuple(slicing)], (k, 0), mode="constant", value=0 + ) + + return shifted + + +def binary2gray(binary, axis=-1): + """Convert an array of binary values into Gray codes. + + This uses the classic X ^ (X >> 1) trick to compute the Gray code. + + Parameters: + ----------- + binary: An ndarray of binary values. + + axis: The axis along which to compute the gray code. Default=-1. + + Returns: + -------- + Returns an ndarray of Gray codes. + """ + shifted = right_shift(binary, axis=axis) + + # Do the X ^ (X >> 1) trick. + gray = torch.logical_xor(binary, shifted) + + return gray + + +def gray2binary(gray, axis=-1): + """Convert an array of Gray codes back into binary values. + + Parameters: + ----------- + gray: An ndarray of gray codes. + + axis: The axis along which to perform Gray decoding. Default=-1. + + Returns: + -------- + Returns an ndarray of binary values. + """ + + # Loop the log2(bits) number of times necessary, with shift and xor. + shift = 2 ** (torch.Tensor([gray.shape[axis]]).log2().ceil().int() - 1) + while shift > 0: + gray = torch.logical_xor(gray, right_shift(gray, shift)) + shift = torch.div(shift, 2, rounding_mode="floor") + return gray + + +def encode(locs, num_dims, num_bits): + """Decode an array of locations in a hypercube into a Hilbert integer. + + This is a vectorized-ish version of the Hilbert curve implementation by John + Skilling as described in: + + Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference + Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics. + + Params: + ------- + locs - An ndarray of locations in a hypercube of num_dims dimensions, in + which each dimension runs from 0 to 2**num_bits-1. The shape can + be arbitrary, as long as the last dimension of the same has size + num_dims. + + num_dims - The dimensionality of the hypercube. Integer. + + num_bits - The number of bits for each dimension. Integer. + + Returns: + -------- + The output is an ndarray of uint64 integers with the same shape as the + input, excluding the last dimension, which needs to be num_dims. + """ + + # Keep around the original shape for later. + orig_shape = locs.shape + bitpack_mask = 1 << torch.arange(0, 8).to(locs.device) + bitpack_mask_rev = bitpack_mask.flip(-1) + + if orig_shape[-1] != num_dims: + raise ValueError( + """ + The shape of locs was surprising in that the last dimension was of size + %d, but num_dims=%d. These need to be equal. + """ + % (orig_shape[-1], num_dims) + ) + + if num_dims * num_bits > 63: + raise ValueError( + """ + num_dims=%d and num_bits=%d for %d bits total, which can't be encoded + into a int64. Are you sure you need that many points on your Hilbert + curve? + """ + % (num_dims, num_bits, num_dims * num_bits) + ) + + # Treat the location integers as 64-bit unsigned and then split them up into + # a sequence of uint8s. Preserve the association by dimension. + locs_uint8 = locs.long().view(torch.uint8).reshape((-1, num_dims, 8)).flip(-1) + + # Now turn these into bits and truncate to num_bits. + gray = ( + locs_uint8.unsqueeze(-1) + .bitwise_and(bitpack_mask_rev) + .ne(0) + .byte() + .flatten(-2, -1)[..., -num_bits:] + ) + + # Run the decoding process the other way. + # Iterate forwards through the bits. + for bit in range(0, num_bits): + # Iterate forwards through the dimensions. + for dim in range(0, num_dims): + # Identify which ones have this bit active. + mask = gray[:, dim, bit] + + # Where this bit is on, invert the 0 dimension for lower bits. + gray[:, 0, bit + 1 :] = torch.logical_xor( + gray[:, 0, bit + 1 :], mask[:, None] + ) + + # Where the bit is off, exchange the lower bits with the 0 dimension. + to_flip = torch.logical_and( + torch.logical_not(mask[:, None]).repeat(1, gray.shape[2] - bit - 1), + torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]), + ) + gray[:, dim, bit + 1 :] = torch.logical_xor( + gray[:, dim, bit + 1 :], to_flip + ) + gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip) + + # Now flatten out. + gray = gray.swapaxes(1, 2).reshape((-1, num_bits * num_dims)) + + # Convert Gray back to binary. + hh_bin = gray2binary(gray) + + # Pad back out to 64 bits. + extra_dims = 64 - num_bits * num_dims + padded = torch.nn.functional.pad(hh_bin, (extra_dims, 0), "constant", 0) + + # Convert binary values into uint8s. + hh_uint8 = ( + (padded.flip(-1).reshape((-1, 8, 8)) * bitpack_mask) + .sum(2) + .squeeze() + .type(torch.uint8) + ) + + # Convert uint8s into uint64s. + hh_uint64 = hh_uint8.view(torch.int64).squeeze() + + return hh_uint64 + + +def decode(hilberts, num_dims, num_bits): + """Decode an array of Hilbert integers into locations in a hypercube. + + This is a vectorized-ish version of the Hilbert curve implementation by John + Skilling as described in: + + Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference + Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics. + + Params: + ------- + hilberts - An ndarray of Hilbert integers. Must be an integer dtype and + cannot have fewer bits than num_dims * num_bits. + + num_dims - The dimensionality of the hypercube. Integer. + + num_bits - The number of bits for each dimension. Integer. + + Returns: + -------- + The output is an ndarray of unsigned integers with the same shape as hilberts + but with an additional dimension of size num_dims. + """ + + if num_dims * num_bits > 64: + raise ValueError( + """ + num_dims=%d and num_bits=%d for %d bits total, which can't be encoded + into a uint64. Are you sure you need that many points on your Hilbert + curve? + """ + % (num_dims, num_bits) + ) + + # Handle the case where we got handed a naked integer. + hilberts = torch.atleast_1d(hilberts) + + # Keep around the shape for later. + orig_shape = hilberts.shape + bitpack_mask = 2 ** torch.arange(0, 8).to(hilberts.device) + bitpack_mask_rev = bitpack_mask.flip(-1) + + # Treat each of the hilberts as a s equence of eight uint8. + # This treats all of the inputs as uint64 and makes things uniform. + hh_uint8 = ( + hilberts.ravel().type(torch.int64).view(torch.uint8).reshape((-1, 8)).flip(-1) + ) + + # Turn these lists of uints into lists of bits and then truncate to the size + # we actually need for using Skilling's procedure. + hh_bits = ( + hh_uint8.unsqueeze(-1) + .bitwise_and(bitpack_mask_rev) + .ne(0) + .byte() + .flatten(-2, -1)[:, -num_dims * num_bits :] + ) + + # Take the sequence of bits and Gray-code it. + gray = binary2gray(hh_bits) + + # There has got to be a better way to do this. + # I could index them differently, but the eventual packbits likes it this way. + gray = gray.reshape((-1, num_bits, num_dims)).swapaxes(1, 2) + + # Iterate backwards through the bits. + for bit in range(num_bits - 1, -1, -1): + # Iterate backwards through the dimensions. + for dim in range(num_dims - 1, -1, -1): + # Identify which ones have this bit active. + mask = gray[:, dim, bit] + + # Where this bit is on, invert the 0 dimension for lower bits. + gray[:, 0, bit + 1 :] = torch.logical_xor( + gray[:, 0, bit + 1 :], mask[:, None] + ) + + # Where the bit is off, exchange the lower bits with the 0 dimension. + to_flip = torch.logical_and( + torch.logical_not(mask[:, None]), + torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]), + ) + gray[:, dim, bit + 1 :] = torch.logical_xor( + gray[:, dim, bit + 1 :], to_flip + ) + gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip) + + # Pad back out to 64 bits. + extra_dims = 64 - num_bits + padded = torch.nn.functional.pad(gray, (extra_dims, 0), "constant", 0) + + # Now chop these up into blocks of 8. + locs_chopped = padded.flip(-1).reshape((-1, num_dims, 8, 8)) + + # Take those blocks and turn them unto uint8s. + # from IPython import embed; embed() + locs_uint8 = (locs_chopped * bitpack_mask).sum(3).squeeze().type(torch.uint8) + + # Finally, treat these as uint64s. + flat_locs = locs_uint8.view(torch.int64) + + # Return them in the expected shape. + return flat_locs.reshape((*orig_shape, num_dims)) \ No newline at end of file diff --git a/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/z_order.py b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/z_order.py new file mode 100755 index 0000000000000000000000000000000000000000..b33963de44ddfcbecdf2e403ea70166f2d0e4eb0 --- /dev/null +++ b/third_party/TRELLIS/extensions/vox2seq/vox2seq/pytorch/z_order.py @@ -0,0 +1,126 @@ +# -------------------------------------------------------- +# Octree-based Sparse Convolutional Neural Networks +# Copyright (c) 2022 Peng-Shuai Wang +# Licensed under The MIT License [see LICENSE for details] +# Written by Peng-Shuai Wang +# -------------------------------------------------------- + +import torch +from typing import Optional, Union + + +class KeyLUT: + def __init__(self): + r256 = torch.arange(256, dtype=torch.int64) + r512 = torch.arange(512, dtype=torch.int64) + zero = torch.zeros(256, dtype=torch.int64) + device = torch.device("cpu") + + self._encode = { + device: ( + self.xyz2key(r256, zero, zero, 8), + self.xyz2key(zero, r256, zero, 8), + self.xyz2key(zero, zero, r256, 8), + ) + } + self._decode = {device: self.key2xyz(r512, 9)} + + def encode_lut(self, device=torch.device("cpu")): + if device not in self._encode: + cpu = torch.device("cpu") + self._encode[device] = tuple(e.to(device) for e in self._encode[cpu]) + return self._encode[device] + + def decode_lut(self, device=torch.device("cpu")): + if device not in self._decode: + cpu = torch.device("cpu") + self._decode[device] = tuple(e.to(device) for e in self._decode[cpu]) + return self._decode[device] + + def xyz2key(self, x, y, z, depth): + key = torch.zeros_like(x) + for i in range(depth): + mask = 1 << i + key = ( + key + | ((x & mask) << (2 * i + 2)) + | ((y & mask) << (2 * i + 1)) + | ((z & mask) << (2 * i + 0)) + ) + return key + + def key2xyz(self, key, depth): + x = torch.zeros_like(key) + y = torch.zeros_like(key) + z = torch.zeros_like(key) + for i in range(depth): + x = x | ((key & (1 << (3 * i + 2))) >> (2 * i + 2)) + y = y | ((key & (1 << (3 * i + 1))) >> (2 * i + 1)) + z = z | ((key & (1 << (3 * i + 0))) >> (2 * i + 0)) + return x, y, z + + +_key_lut = KeyLUT() + + +def xyz2key( + x: torch.Tensor, + y: torch.Tensor, + z: torch.Tensor, + b: Optional[Union[torch.Tensor, int]] = None, + depth: int = 16, +): + r"""Encodes :attr:`x`, :attr:`y`, :attr:`z` coordinates to the shuffled keys + based on pre-computed look up tables. The speed of this function is much + faster than the method based on for-loop. + + Args: + x (torch.Tensor): The x coordinate. + y (torch.Tensor): The y coordinate. + z (torch.Tensor): The z coordinate. + b (torch.Tensor or int): The batch index of the coordinates, and should be + smaller than 32768. If :attr:`b` is :obj:`torch.Tensor`, the size of + :attr:`b` must be the same as :attr:`x`, :attr:`y`, and :attr:`z`. + depth (int): The depth of the shuffled key, and must be smaller than 17 (< 17). + """ + + EX, EY, EZ = _key_lut.encode_lut(x.device) + x, y, z = x.long(), y.long(), z.long() + + mask = 255 if depth > 8 else (1 << depth) - 1 + key = EX[x & mask] | EY[y & mask] | EZ[z & mask] + if depth > 8: + mask = (1 << (depth - 8)) - 1 + key16 = EX[(x >> 8) & mask] | EY[(y >> 8) & mask] | EZ[(z >> 8) & mask] + key = key16 << 24 | key + + if b is not None: + b = b.long() + key = b << 48 | key + + return key + + +def key2xyz(key: torch.Tensor, depth: int = 16): + r"""Decodes the shuffled key to :attr:`x`, :attr:`y`, :attr:`z` coordinates + and the batch index based on pre-computed look up tables. + + Args: + key (torch.Tensor): The shuffled key. + depth (int): The depth of the shuffled key, and must be smaller than 17 (< 17). + """ + + DX, DY, DZ = _key_lut.decode_lut(key.device) + x, y, z = torch.zeros_like(key), torch.zeros_like(key), torch.zeros_like(key) + + b = key >> 48 + key = key & ((1 << 48) - 1) + + n = (depth + 2) // 3 + for i in range(n): + k = key >> (i * 9) & 511 + x = x | (DX[k] << (i * 3)) + y = y | (DY[k] << (i * 3)) + z = z | (DZ[k] << (i * 3)) + + return x, y, z, b \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/__init__.py b/third_party/TRELLIS/trellis/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..20d240afc9c26a21aee76954628b3d4ef9a1ccbd --- /dev/null +++ b/third_party/TRELLIS/trellis/__init__.py @@ -0,0 +1,6 @@ +from . import models +from . import modules +from . import pipelines +from . import renderers +from . import representations +from . import utils diff --git a/third_party/TRELLIS/trellis/models/__init__.py b/third_party/TRELLIS/trellis/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae9d17cbaa645a61d7d173cb185cdb090f96b74f --- /dev/null +++ b/third_party/TRELLIS/trellis/models/__init__.py @@ -0,0 +1,96 @@ +import importlib + +__attributes = { + 'SparseStructureEncoder': 'sparse_structure_vae', + 'SparseStructureDecoder': 'sparse_structure_vae', + + 'SparseStructureFlowModel': 'sparse_structure_flow', + + 'SLatEncoder': 'structured_latent_vae', + 'SLatGaussianDecoder': 'structured_latent_vae', + 'SLatRadianceFieldDecoder': 'structured_latent_vae', + 'SLatMeshDecoder': 'structured_latent_vae', + 'ElasticSLatEncoder': 'structured_latent_vae', + 'ElasticSLatGaussianDecoder': 'structured_latent_vae', + 'ElasticSLatRadianceFieldDecoder': 'structured_latent_vae', + 'ElasticSLatMeshDecoder': 'structured_latent_vae', + + 'SLatFlowModel': 'structured_latent_flow', + 'ElasticSLatFlowModel': 'structured_latent_flow', +} + +__submodules = [] + +__all__ = list(__attributes.keys()) + __submodules + +def __getattr__(name): + if name not in globals(): + if name in __attributes: + module_name = __attributes[name] + module = importlib.import_module(f".{module_name}", __name__) + globals()[name] = getattr(module, name) + elif name in __submodules: + module = importlib.import_module(f".{name}", __name__) + globals()[name] = module + else: + raise AttributeError(f"module {__name__} has no attribute {name}") + return globals()[name] + + +def from_pretrained(path: str, **kwargs): + """ + Load a model from a pretrained checkpoint. + + Args: + path: The path to the checkpoint. Can be either local path or a Hugging Face model name. + NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively. + **kwargs: Additional arguments for the model constructor. + """ + import os + import json + from safetensors.torch import load_file + is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors") + + if is_local: + config_file = f"{path}.json" + model_file = f"{path}.safetensors" + else: + from huggingface_hub import hf_hub_download + path_parts = path.split('/') + repo_id = f'{path_parts[0]}/{path_parts[1]}' + model_name = '/'.join(path_parts[2:]) + config_file = hf_hub_download(repo_id, f"{model_name}.json") + model_file = hf_hub_download(repo_id, f"{model_name}.safetensors") + + with open(config_file, 'r') as f: + config = json.load(f) + model = __getattr__(config['name'])(**config['args'], **kwargs) + model.load_state_dict(load_file(model_file)) + + return model + + +# For Pylance +if __name__ == '__main__': + from .sparse_structure_vae import ( + SparseStructureEncoder, + SparseStructureDecoder, + ) + + from .sparse_structure_flow import SparseStructureFlowModel + + from .structured_latent_vae import ( + SLatEncoder, + SLatGaussianDecoder, + SLatRadianceFieldDecoder, + SLatMeshDecoder, + ElasticSLatEncoder, + ElasticSLatGaussianDecoder, + ElasticSLatRadianceFieldDecoder, + ElasticSLatMeshDecoder, + ) + + from .structured_latent_flow import ( + SLatFlowModel, + ElasticSLatFlowModel, + ) diff --git a/third_party/TRELLIS/trellis/models/sparse_elastic_mixin.py b/third_party/TRELLIS/trellis/models/sparse_elastic_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..66d204c89bedabc2afd1795cdfc6f5d58a6b1ac0 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/sparse_elastic_mixin.py @@ -0,0 +1,24 @@ +from contextlib import contextmanager +from typing import * +import math +from ..modules import sparse as sp +from ..utils.elastic_utils import ElasticModuleMixin + + +class SparseTransformerElasticMixin(ElasticModuleMixin): + def _get_input_size(self, x: sp.SparseTensor, *args, **kwargs): + return x.feats.shape[0] + + @contextmanager + def with_mem_ratio(self, mem_ratio=1.0): + if mem_ratio == 1.0: + yield 1.0 + return + num_blocks = len(self.blocks) + num_checkpoint_blocks = min(math.ceil((1 - mem_ratio) * num_blocks) + 1, num_blocks) + exact_mem_ratio = 1 - (num_checkpoint_blocks - 1) / num_blocks + for i in range(num_blocks): + self.blocks[i].use_checkpoint = i < num_checkpoint_blocks + yield exact_mem_ratio + for i in range(num_blocks): + self.blocks[i].use_checkpoint = False diff --git a/third_party/TRELLIS/trellis/models/sparse_structure_flow.py b/third_party/TRELLIS/trellis/models/sparse_structure_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..aee71a9686fd3795960cf1df970e9b8db0ebd57a --- /dev/null +++ b/third_party/TRELLIS/trellis/models/sparse_structure_flow.py @@ -0,0 +1,200 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ..modules.utils import convert_module_to_f16, convert_module_to_f32 +from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock +from ..modules.spatial import patchify, unpatchify + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + Args: + t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + dim: the dimension of the output. + max_period: controls the minimum frequency of the embeddings. + + Returns: + an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + half = dim // 2 + freqs = torch.exp( + -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size) + t_emb = self.mlp(t_freq) + return t_emb + + +class SparseStructureFlowModel(nn.Module): + def __init__( + self, + resolution: int, + in_channels: int, + model_channels: int, + cond_channels: int, + out_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + patch_size: int = 2, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + share_mod: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + ): + super().__init__() + self.resolution = resolution + self.in_channels = in_channels + self.model_channels = model_channels + self.cond_channels = cond_channels + self.out_channels = out_channels + self.num_blocks = num_blocks + self.num_heads = num_heads or model_channels // num_head_channels + self.mlp_ratio = mlp_ratio + self.patch_size = patch_size + self.pe_mode = pe_mode + self.use_fp16 = use_fp16 + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.qk_rms_norm = qk_rms_norm + self.qk_rms_norm_cross = qk_rms_norm_cross + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.t_embedder = TimestepEmbedder(model_channels) + if share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(model_channels, 6 * model_channels, bias=True) + ) + + if pe_mode == "ape": + pos_embedder = AbsolutePositionEmbedder(model_channels, 3) + coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij') + coords = torch.stack(coords, dim=-1).reshape(-1, 3) + pos_emb = pos_embedder(coords) + self.register_buffer("pos_emb", pos_emb) + + self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels) + + self.blocks = nn.ModuleList([ + ModulatedTransformerCrossBlock( + model_channels, + cond_channels, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + attn_mode='full', + use_checkpoint=self.use_checkpoint, + use_rope=(pe_mode == "rope"), + share_mod=share_mod, + qk_rms_norm=self.qk_rms_norm, + qk_rms_norm_cross=self.qk_rms_norm_cross, + ) + for _ in range(num_blocks) + ]) + + self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.blocks.apply(convert_module_to_f32) + + def initialize_weights(self) -> None: + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + if self.share_mod: + nn.init.constant_(self.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.adaLN_modulation[-1].bias, 0) + else: + for block in self.blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor: + assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \ + f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}" + + h = patchify(x, self.patch_size) + h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous() + + h = self.input_layer(h) + h = h + self.pos_emb[None] + t_emb = self.t_embedder(t) + if self.share_mod: + t_emb = self.adaLN_modulation(t_emb) + t_emb = t_emb.type(self.dtype) + h = h.type(self.dtype) + cond = cond.type(self.dtype) + for block in self.blocks: + h = block(h, t_emb, cond) + h = h.type(x.dtype) + h = F.layer_norm(h, h.shape[-1:]) + h = self.out_layer(h) + + h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3) + h = unpatchify(h, self.patch_size).contiguous() + + return h diff --git a/third_party/TRELLIS/trellis/models/sparse_structure_vae.py b/third_party/TRELLIS/trellis/models/sparse_structure_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..c3e09136cf294c4c1b47b0f09fa6ee57bad2166d --- /dev/null +++ b/third_party/TRELLIS/trellis/models/sparse_structure_vae.py @@ -0,0 +1,306 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from ..modules.norm import GroupNorm32, ChannelLayerNorm32 +from ..modules.spatial import pixel_shuffle_3d +from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32 + + +def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module: + """ + Return a normalization layer. + """ + if norm_type == "group": + return GroupNorm32(32, *args, **kwargs) + elif norm_type == "layer": + return ChannelLayerNorm32(*args, **kwargs) + else: + raise ValueError(f"Invalid norm type {norm_type}") + + +class ResBlock3d(nn.Module): + def __init__( + self, + channels: int, + out_channels: Optional[int] = None, + norm_type: Literal["group", "layer"] = "layer", + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.norm1 = norm_layer(norm_type, channels) + self.norm2 = norm_layer(norm_type, self.out_channels) + self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1) + self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1)) + self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.norm1(x) + h = F.silu(h) + h = self.conv1(h) + h = self.norm2(h) + h = F.silu(h) + h = self.conv2(h) + h = h + self.skip_connection(x) + return h + + +class DownsampleBlock3d(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + mode: Literal["conv", "avgpool"] = "conv", + ): + assert mode in ["conv", "avgpool"], f"Invalid mode {mode}" + + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + if mode == "conv": + self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2) + elif mode == "avgpool": + assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if hasattr(self, "conv"): + return self.conv(x) + else: + return F.avg_pool3d(x, 2) + + +class UpsampleBlock3d(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + mode: Literal["conv", "nearest"] = "conv", + ): + assert mode in ["conv", "nearest"], f"Invalid mode {mode}" + + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + if mode == "conv": + self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1) + elif mode == "nearest": + assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if hasattr(self, "conv"): + x = self.conv(x) + return pixel_shuffle_3d(x, 2) + else: + return F.interpolate(x, scale_factor=2, mode="nearest") + + +class SparseStructureEncoder(nn.Module): + """ + Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3). + + Args: + in_channels (int): Channels of the input. + latent_channels (int): Channels of the latent representation. + num_res_blocks (int): Number of residual blocks at each resolution. + channels (List[int]): Channels of the encoder blocks. + num_res_blocks_middle (int): Number of residual blocks in the middle. + norm_type (Literal["group", "layer"]): Type of normalization layer. + use_fp16 (bool): Whether to use FP16. + """ + def __init__( + self, + in_channels: int, + latent_channels: int, + num_res_blocks: int, + channels: List[int], + num_res_blocks_middle: int = 2, + norm_type: Literal["group", "layer"] = "layer", + use_fp16: bool = False, + ): + super().__init__() + self.in_channels = in_channels + self.latent_channels = latent_channels + self.num_res_blocks = num_res_blocks + self.channels = channels + self.num_res_blocks_middle = num_res_blocks_middle + self.norm_type = norm_type + self.use_fp16 = use_fp16 + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1) + + self.blocks = nn.ModuleList([]) + for i, ch in enumerate(channels): + self.blocks.extend([ + ResBlock3d(ch, ch) + for _ in range(num_res_blocks) + ]) + if i < len(channels) - 1: + self.blocks.append( + DownsampleBlock3d(ch, channels[i+1]) + ) + + self.middle_block = nn.Sequential(*[ + ResBlock3d(channels[-1], channels[-1]) + for _ in range(num_res_blocks_middle) + ]) + + self.out_layer = nn.Sequential( + norm_layer(norm_type, channels[-1]), + nn.SiLU(), + nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1) + ) + + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.use_fp16 = True + self.dtype = torch.float16 + self.blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.use_fp16 = False + self.dtype = torch.float32 + self.blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor: + h = self.input_layer(x) + h = h.type(self.dtype) + + for block in self.blocks: + h = block(h) + h = self.middle_block(h) + + h = h.type(x.dtype) + h = self.out_layer(h) + + mean, logvar = h.chunk(2, dim=1) + + if sample_posterior: + std = torch.exp(0.5 * logvar) + z = mean + std * torch.randn_like(std) + else: + z = mean + + if return_raw: + return z, mean, logvar + return z + + +class SparseStructureDecoder(nn.Module): + """ + Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3). + + Args: + out_channels (int): Channels of the output. + latent_channels (int): Channels of the latent representation. + num_res_blocks (int): Number of residual blocks at each resolution. + channels (List[int]): Channels of the decoder blocks. + num_res_blocks_middle (int): Number of residual blocks in the middle. + norm_type (Literal["group", "layer"]): Type of normalization layer. + use_fp16 (bool): Whether to use FP16. + """ + def __init__( + self, + out_channels: int, + latent_channels: int, + num_res_blocks: int, + channels: List[int], + num_res_blocks_middle: int = 2, + norm_type: Literal["group", "layer"] = "layer", + use_fp16: bool = False, + ): + super().__init__() + self.out_channels = out_channels + self.latent_channels = latent_channels + self.num_res_blocks = num_res_blocks + self.channels = channels + self.num_res_blocks_middle = num_res_blocks_middle + self.norm_type = norm_type + self.use_fp16 = use_fp16 + self.dtype = torch.float16 if use_fp16 else torch.float32 + + self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1) + + self.middle_block = nn.Sequential(*[ + ResBlock3d(channels[0], channels[0]) + for _ in range(num_res_blocks_middle) + ]) + + self.blocks = nn.ModuleList([]) + for i, ch in enumerate(channels): + self.blocks.extend([ + ResBlock3d(ch, ch) + for _ in range(num_res_blocks) + ]) + if i < len(channels) - 1: + self.blocks.append( + UpsampleBlock3d(ch, channels[i+1]) + ) + + self.out_layer = nn.Sequential( + norm_layer(norm_type, channels[-1]), + nn.SiLU(), + nn.Conv3d(channels[-1], out_channels, 3, padding=1) + ) + + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.use_fp16 = True + self.dtype = torch.float16 + self.blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.use_fp16 = False + self.dtype = torch.float32 + self.blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.input_layer(x) + + h = h.type(self.dtype) + + h = self.middle_block(h) + for block in self.blocks: + h = block(h) + + h = h.type(x.dtype) + h = self.out_layer(h) + return h diff --git a/third_party/TRELLIS/trellis/models/structured_latent_flow.py b/third_party/TRELLIS/trellis/models/structured_latent_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..4d6f61b8a58b64069bb93413ba891d52896c3300 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_flow.py @@ -0,0 +1,276 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32 +from ..modules.transformer import AbsolutePositionEmbedder +from ..modules.norm import LayerNorm32 +from ..modules import sparse as sp +from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock +from .sparse_structure_flow import TimestepEmbedder +from .sparse_elastic_mixin import SparseTransformerElasticMixin + + +class SparseResBlock3d(nn.Module): + def __init__( + self, + channels: int, + emb_channels: int, + out_channels: Optional[int] = None, + downsample: bool = False, + upsample: bool = False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.out_channels = out_channels or channels + self.downsample = downsample + self.upsample = upsample + + assert not (downsample and upsample), "Cannot downsample and upsample at the same time" + + self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6) + self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6) + self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3) + self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3)) + self.emb_layers = nn.Sequential( + nn.SiLU(), + nn.Linear(emb_channels, 2 * self.out_channels, bias=True), + ) + self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity() + self.updown = None + if self.downsample: + self.updown = sp.SparseDownsample(2) + elif self.upsample: + self.updown = sp.SparseUpsample(2) + + def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor: + if self.updown is not None: + x = self.updown(x) + return x + + def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor: + emb_out = self.emb_layers(emb).type(x.dtype) + scale, shift = torch.chunk(emb_out, 2, dim=1) + + x = self._updown(x) + h = x.replace(self.norm1(x.feats)) + h = h.replace(F.silu(h.feats)) + h = self.conv1(h) + h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift + h = h.replace(F.silu(h.feats)) + h = self.conv2(h) + h = h + self.skip_connection(x) + + return h + + +class SLatFlowModel(nn.Module): + def __init__( + self, + resolution: int, + in_channels: int, + model_channels: int, + cond_channels: int, + out_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + patch_size: int = 2, + num_io_res_blocks: int = 2, + io_block_channels: List[int] = None, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + use_skip_connection: bool = True, + share_mod: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + ): + super().__init__() + self.resolution = resolution + self.in_channels = in_channels + self.model_channels = model_channels + self.cond_channels = cond_channels + self.out_channels = out_channels + self.num_blocks = num_blocks + self.num_heads = num_heads or model_channels // num_head_channels + self.mlp_ratio = mlp_ratio + self.patch_size = patch_size + self.num_io_res_blocks = num_io_res_blocks + self.io_block_channels = io_block_channels + self.pe_mode = pe_mode + self.use_fp16 = use_fp16 + self.use_checkpoint = use_checkpoint + self.use_skip_connection = use_skip_connection + self.share_mod = share_mod + self.qk_rms_norm = qk_rms_norm + self.qk_rms_norm_cross = qk_rms_norm_cross + self.dtype = torch.float16 if use_fp16 else torch.float32 + + if self.io_block_channels is not None: + assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2" + assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages" + + self.t_embedder = TimestepEmbedder(model_channels) + if share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(model_channels, 6 * model_channels, bias=True) + ) + + if pe_mode == "ape": + self.pos_embedder = AbsolutePositionEmbedder(model_channels) + + self.input_layer = sp.SparseLinear(in_channels, model_channels if io_block_channels is None else io_block_channels[0]) + + self.input_blocks = nn.ModuleList([]) + if io_block_channels is not None: + for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]): + self.input_blocks.extend([ + SparseResBlock3d( + chs, + model_channels, + out_channels=chs, + ) + for _ in range(num_io_res_blocks-1) + ]) + self.input_blocks.append( + SparseResBlock3d( + chs, + model_channels, + out_channels=next_chs, + downsample=True, + ) + ) + + self.blocks = nn.ModuleList([ + ModulatedSparseTransformerCrossBlock( + model_channels, + cond_channels, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + attn_mode='full', + use_checkpoint=self.use_checkpoint, + use_rope=(pe_mode == "rope"), + share_mod=self.share_mod, + qk_rms_norm=self.qk_rms_norm, + qk_rms_norm_cross=self.qk_rms_norm_cross, + ) + for _ in range(num_blocks) + ]) + + self.out_blocks = nn.ModuleList([]) + if io_block_channels is not None: + for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))): + self.out_blocks.append( + SparseResBlock3d( + prev_chs * 2 if self.use_skip_connection else prev_chs, + model_channels, + out_channels=chs, + upsample=True, + ) + ) + self.out_blocks.extend([ + SparseResBlock3d( + chs * 2 if self.use_skip_connection else chs, + model_channels, + out_channels=chs, + ) + for _ in range(num_io_res_blocks-1) + ]) + + self.out_layer = sp.SparseLinear(model_channels if io_block_channels is None else io_block_channels[0], out_channels) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.blocks.apply(convert_module_to_f16) + self.out_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.blocks.apply(convert_module_to_f32) + self.out_blocks.apply(convert_module_to_f32) + + def initialize_weights(self) -> None: + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + if self.share_mod: + nn.init.constant_(self.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.adaLN_modulation[-1].bias, 0) + else: + for block in self.blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor: + h = self.input_layer(x).type(self.dtype) + t_emb = self.t_embedder(t) + if self.share_mod: + t_emb = self.adaLN_modulation(t_emb) + t_emb = t_emb.type(self.dtype) + cond = cond.type(self.dtype) + + skips = [] + # pack with input blocks + for block in self.input_blocks: + h = block(h, t_emb) + skips.append(h.feats) + + if self.pe_mode == "ape": + h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype) + for block in self.blocks: + h = block(h, t_emb, cond) + + # unpack with output blocks + for block, skip in zip(self.out_blocks, reversed(skips)): + if self.use_skip_connection: + h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb) + else: + h = block(h, t_emb) + + h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:])) + h = self.out_layer(h.type(x.dtype)) + return h + + +class ElasticSLatFlowModel(SparseTransformerElasticMixin, SLatFlowModel): + """ + SLat Flow Model with elastic memory management. + Used for training with low VRAM. + """ + pass diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/__init__.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e2ac3531d71b5f4fb168cae62e776a7d4d55576 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/__init__.py @@ -0,0 +1,4 @@ +from .encoder import SLatEncoder, ElasticSLatEncoder +from .decoder_gs import SLatGaussianDecoder, ElasticSLatGaussianDecoder +from .decoder_rf import SLatRadianceFieldDecoder, ElasticSLatRadianceFieldDecoder +from .decoder_mesh import SLatMeshDecoder, ElasticSLatMeshDecoder diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/base.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ab0bf6a850b1c146e081c32ad92c7c44ead5ef6e --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/base.py @@ -0,0 +1,117 @@ +from typing import * +import torch +import torch.nn as nn +from ...modules.utils import convert_module_to_f16, convert_module_to_f32 +from ...modules import sparse as sp +from ...modules.transformer import AbsolutePositionEmbedder +from ...modules.sparse.transformer import SparseTransformerBlock + + +def block_attn_config(self): + """ + Return the attention configuration of the model. + """ + for i in range(self.num_blocks): + if self.attn_mode == "shift_window": + yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER + elif self.attn_mode == "shift_sequence": + yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER + elif self.attn_mode == "shift_order": + yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4] + elif self.attn_mode == "full": + yield "full", None, None, None, None + elif self.attn_mode == "swin": + yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None + + +class SparseTransformerBase(nn.Module): + """ + Sparse Transformer without output layers. + Serve as the base class for encoder and decoder. + """ + def __init__( + self, + in_channels: int, + model_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full", + window_size: Optional[int] = None, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + qk_rms_norm: bool = False, + ): + super().__init__() + self.in_channels = in_channels + self.model_channels = model_channels + self.num_blocks = num_blocks + self.window_size = window_size + self.num_heads = num_heads or model_channels // num_head_channels + self.mlp_ratio = mlp_ratio + self.attn_mode = attn_mode + self.pe_mode = pe_mode + self.use_fp16 = use_fp16 + self.use_checkpoint = use_checkpoint + self.qk_rms_norm = qk_rms_norm + self.dtype = torch.float16 if use_fp16 else torch.float32 + + if pe_mode == "ape": + self.pos_embedder = AbsolutePositionEmbedder(model_channels) + + self.input_layer = sp.SparseLinear(in_channels, model_channels) + self.blocks = nn.ModuleList([ + SparseTransformerBlock( + model_channels, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + attn_mode=attn_mode, + window_size=window_size, + shift_sequence=shift_sequence, + shift_window=shift_window, + serialize_mode=serialize_mode, + use_checkpoint=self.use_checkpoint, + use_rope=(pe_mode == "rope"), + qk_rms_norm=self.qk_rms_norm, + ) + for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self) + ]) + + @property + def device(self) -> torch.device: + """ + Return the device of the model. + """ + return next(self.parameters()).device + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + self.blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + self.blocks.apply(convert_module_to_f32) + + def initialize_weights(self) -> None: + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + def forward(self, x: sp.SparseTensor) -> sp.SparseTensor: + h = self.input_layer(x) + if self.pe_mode == "ape": + h = h + self.pos_embedder(x.coords[:, 1:]) + h = h.type(self.dtype) + for block in self.blocks: + h = block(h) + return h diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_gs.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_gs.py new file mode 100644 index 0000000000000000000000000000000000000000..47304f381833bf9529f6576000972ae14f0cc4ba --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_gs.py @@ -0,0 +1,131 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from ...modules import sparse as sp +from ...utils.random_utils import hammersley_sequence +from .base import SparseTransformerBase +from ...representations import Gaussian +from ..sparse_elastic_mixin import SparseTransformerElasticMixin + + +class SLatGaussianDecoder(SparseTransformerBase): + def __init__( + self, + resolution: int, + model_channels: int, + latent_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin", + window_size: int = 8, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + qk_rms_norm: bool = False, + representation_config: dict = None, + ): + super().__init__( + in_channels=latent_channels, + model_channels=model_channels, + num_blocks=num_blocks, + num_heads=num_heads, + num_head_channels=num_head_channels, + mlp_ratio=mlp_ratio, + attn_mode=attn_mode, + window_size=window_size, + pe_mode=pe_mode, + use_fp16=use_fp16, + use_checkpoint=use_checkpoint, + qk_rms_norm=qk_rms_norm, + ) + self.resolution = resolution + self.rep_config = representation_config + self._calc_layout() + self.out_layer = sp.SparseLinear(model_channels, self.out_channels) + self._build_perturbation() + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + def initialize_weights(self) -> None: + super().initialize_weights() + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def _build_perturbation(self) -> None: + perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])] + perturbation = torch.tensor(perturbation).float() * 2 - 1 + perturbation = perturbation / self.rep_config['voxel_size'] + perturbation = torch.atanh(perturbation).to(self.device) + self.register_buffer('offset_perturbation', perturbation) + + def _calc_layout(self) -> None: + self.layout = { + '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3}, + '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3}, + '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3}, + '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4}, + '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']}, + } + start = 0 + for k, v in self.layout.items(): + v['range'] = (start, start + v['size']) + start += v['size'] + self.out_channels = start + + def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]: + """ + Convert a batch of network outputs to 3D representations. + + Args: + x: The [N x * x C] sparse tensor output by the network. + + Returns: + list of representations + """ + ret = [] + for i in range(x.shape[0]): + representation = Gaussian( + sh_degree=0, + aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0], + mininum_kernel_size = self.rep_config['3d_filter_kernel_size'], + scaling_bias = self.rep_config['scaling_bias'], + opacity_bias = self.rep_config['opacity_bias'], + scaling_activation = self.rep_config['scaling_activation'] + ) + xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution + for k, v in self.layout.items(): + if k == '_xyz': + offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']) + offset = offset * self.rep_config['lr'][k] + if self.rep_config['perturb_offset']: + offset = offset + self.offset_perturbation + offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size'] + _xyz = xyz.unsqueeze(1) + offset + setattr(representation, k, _xyz.flatten(0, 1)) + else: + feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1) + feats = feats * self.rep_config['lr'][k] + setattr(representation, k, feats) + ret.append(representation) + return ret + + def forward(self, x: sp.SparseTensor) -> List[Gaussian]: + h = super().forward(x) + h = h.type(x.dtype) + h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:])) + h = self.out_layer(h) + return self.to_representation(h) + + +class ElasticSLatGaussianDecoder(SparseTransformerElasticMixin, SLatGaussianDecoder): + """ + Slat VAE Gaussian decoder with elastic memory management. + Used for training with low VRAM. + """ + pass diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_mesh.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..f9f5659d1d0556bcf51b973ace584fdb347ac4d6 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_mesh.py @@ -0,0 +1,176 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32 +from ...modules import sparse as sp +from .base import SparseTransformerBase +from ...representations import MeshExtractResult +from ...representations.mesh import SparseFeatures2Mesh +from ..sparse_elastic_mixin import SparseTransformerElasticMixin + + +class SparseSubdivideBlock3d(nn.Module): + """ + A 3D subdivide block that can subdivide the sparse tensor. + + Args: + channels: channels in the inputs and outputs. + out_channels: if specified, the number of output channels. + num_groups: the number of groups for the group norm. + """ + def __init__( + self, + channels: int, + resolution: int, + out_channels: Optional[int] = None, + num_groups: int = 32 + ): + super().__init__() + self.channels = channels + self.resolution = resolution + self.out_resolution = resolution * 2 + self.out_channels = out_channels or channels + + self.act_layers = nn.Sequential( + sp.SparseGroupNorm32(num_groups, channels), + sp.SparseSiLU() + ) + + self.sub = sp.SparseSubdivide() + + self.out_layers = nn.Sequential( + sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"), + sp.SparseGroupNorm32(num_groups, self.out_channels), + sp.SparseSiLU(), + zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + else: + self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}") + + def forward(self, x: sp.SparseTensor) -> sp.SparseTensor: + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + + Args: + x: an [N x C x ...] Tensor of features. + Returns: + an [N x C x ...] Tensor of outputs. + """ + h = self.act_layers(x) + h = self.sub(h) + x = self.sub(x) + h = self.out_layers(h) + h = h + self.skip_connection(x) + return h + + +class SLatMeshDecoder(SparseTransformerBase): + def __init__( + self, + resolution: int, + model_channels: int, + latent_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin", + window_size: int = 8, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + qk_rms_norm: bool = False, + representation_config: dict = None, + ): + super().__init__( + in_channels=latent_channels, + model_channels=model_channels, + num_blocks=num_blocks, + num_heads=num_heads, + num_head_channels=num_head_channels, + mlp_ratio=mlp_ratio, + attn_mode=attn_mode, + window_size=window_size, + pe_mode=pe_mode, + use_fp16=use_fp16, + use_checkpoint=use_checkpoint, + qk_rms_norm=qk_rms_norm, + ) + self.resolution = resolution + self.rep_config = representation_config + self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False)) + self.out_channels = self.mesh_extractor.feats_channels + self.upsample = nn.ModuleList([ + SparseSubdivideBlock3d( + channels=model_channels, + resolution=resolution, + out_channels=model_channels // 4 + ), + SparseSubdivideBlock3d( + channels=model_channels // 4, + resolution=resolution * 2, + out_channels=model_channels // 8 + ) + ]) + self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + def initialize_weights(self) -> None: + super().initialize_weights() + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def convert_to_fp16(self) -> None: + """ + Convert the torso of the model to float16. + """ + super().convert_to_fp16() + self.upsample.apply(convert_module_to_f16) + + def convert_to_fp32(self) -> None: + """ + Convert the torso of the model to float32. + """ + super().convert_to_fp32() + self.upsample.apply(convert_module_to_f32) + + def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]: + """ + Convert a batch of network outputs to 3D representations. + + Args: + x: The [N x * x C] sparse tensor output by the network. + + Returns: + list of representations + """ + ret = [] + for i in range(x.shape[0]): + mesh = self.mesh_extractor(x[i], training=self.training) + ret.append(mesh) + return ret + + def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]: + h = super().forward(x) + for block in self.upsample: + h = block(h) + h = h.type(x.dtype) + h = self.out_layer(h) + return self.to_representation(h) + + +class ElasticSLatMeshDecoder(SparseTransformerElasticMixin, SLatMeshDecoder): + """ + Slat VAE Mesh decoder with elastic memory management. + Used for training with low VRAM. + """ + pass diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_rf.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_rf.py new file mode 100644 index 0000000000000000000000000000000000000000..bb1809449f689dfa7bb03673782039f3015d8479 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/decoder_rf.py @@ -0,0 +1,113 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ...modules import sparse as sp +from .base import SparseTransformerBase +from ...representations import Strivec +from ..sparse_elastic_mixin import SparseTransformerElasticMixin + + +class SLatRadianceFieldDecoder(SparseTransformerBase): + def __init__( + self, + resolution: int, + model_channels: int, + latent_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin", + window_size: int = 8, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + qk_rms_norm: bool = False, + representation_config: dict = None, + ): + super().__init__( + in_channels=latent_channels, + model_channels=model_channels, + num_blocks=num_blocks, + num_heads=num_heads, + num_head_channels=num_head_channels, + mlp_ratio=mlp_ratio, + attn_mode=attn_mode, + window_size=window_size, + pe_mode=pe_mode, + use_fp16=use_fp16, + use_checkpoint=use_checkpoint, + qk_rms_norm=qk_rms_norm, + ) + self.resolution = resolution + self.rep_config = representation_config + self._calc_layout() + self.out_layer = sp.SparseLinear(model_channels, self.out_channels) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + def initialize_weights(self) -> None: + super().initialize_weights() + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def _calc_layout(self) -> None: + self.layout = { + 'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']}, + 'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']}, + 'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3}, + } + start = 0 + for k, v in self.layout.items(): + v['range'] = (start, start + v['size']) + start += v['size'] + self.out_channels = start + + def to_representation(self, x: sp.SparseTensor) -> List[Strivec]: + """ + Convert a batch of network outputs to 3D representations. + + Args: + x: The [N x * x C] sparse tensor output by the network. + + Returns: + list of representations + """ + ret = [] + for i in range(x.shape[0]): + representation = Strivec( + sh_degree=0, + resolution=self.resolution, + aabb=[-0.5, -0.5, -0.5, 1, 1, 1], + rank=self.rep_config['rank'], + dim=self.rep_config['dim'], + device='cuda', + ) + representation.density_shift = 0.0 + representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution + representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda') + for k, v in self.layout.items(): + setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])) + representation.trivec = representation.trivec + 1 + ret.append(representation) + return ret + + def forward(self, x: sp.SparseTensor) -> List[Strivec]: + h = super().forward(x) + h = h.type(x.dtype) + h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:])) + h = self.out_layer(h) + return self.to_representation(h) + + +class ElasticSLatRadianceFieldDecoder(SparseTransformerElasticMixin, SLatRadianceFieldDecoder): + """ + Slat VAE Radiance Field Decoder with elastic memory management. + Used for training with low VRAM. + """ + pass diff --git a/third_party/TRELLIS/trellis/models/structured_latent_vae/encoder.py b/third_party/TRELLIS/trellis/models/structured_latent_vae/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..77f6fb193ea2bee41fd0b77cd06e37bd14bf6793 --- /dev/null +++ b/third_party/TRELLIS/trellis/models/structured_latent_vae/encoder.py @@ -0,0 +1,80 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from ...modules import sparse as sp +from .base import SparseTransformerBase +from ..sparse_elastic_mixin import SparseTransformerElasticMixin + + +class SLatEncoder(SparseTransformerBase): + def __init__( + self, + resolution: int, + in_channels: int, + model_channels: int, + latent_channels: int, + num_blocks: int, + num_heads: Optional[int] = None, + num_head_channels: Optional[int] = 64, + mlp_ratio: float = 4, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin", + window_size: int = 8, + pe_mode: Literal["ape", "rope"] = "ape", + use_fp16: bool = False, + use_checkpoint: bool = False, + qk_rms_norm: bool = False, + ): + super().__init__( + in_channels=in_channels, + model_channels=model_channels, + num_blocks=num_blocks, + num_heads=num_heads, + num_head_channels=num_head_channels, + mlp_ratio=mlp_ratio, + attn_mode=attn_mode, + window_size=window_size, + pe_mode=pe_mode, + use_fp16=use_fp16, + use_checkpoint=use_checkpoint, + qk_rms_norm=qk_rms_norm, + ) + self.resolution = resolution + self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels) + + self.initialize_weights() + if use_fp16: + self.convert_to_fp16() + + def initialize_weights(self) -> None: + super().initialize_weights() + # Zero-out output layers: + nn.init.constant_(self.out_layer.weight, 0) + nn.init.constant_(self.out_layer.bias, 0) + + def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False): + h = super().forward(x) + h = h.type(x.dtype) + h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:])) + h = self.out_layer(h) + + # Sample from the posterior distribution + mean, logvar = h.feats.chunk(2, dim=-1) + if sample_posterior: + std = torch.exp(0.5 * logvar) + z = mean + std * torch.randn_like(std) + else: + z = mean + z = h.replace(z) + + if return_raw: + return z, mean, logvar + else: + return z + + +class ElasticSLatEncoder(SparseTransformerElasticMixin, SLatEncoder): + """ + SLat VAE encoder with elastic memory management. + Used for training with low VRAM. + """ diff --git a/third_party/TRELLIS/trellis/modules/attention/__init__.py b/third_party/TRELLIS/trellis/modules/attention/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..f452320d5dbc4c0aa1664e33f76c56ff4bbe2039 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/attention/__init__.py @@ -0,0 +1,36 @@ +from typing import * + +BACKEND = 'flash_attn' +DEBUG = False + +def __from_env(): + import os + + global BACKEND + global DEBUG + + env_attn_backend = os.environ.get('ATTN_BACKEND') + env_sttn_debug = os.environ.get('ATTN_DEBUG') + + if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']: + BACKEND = env_attn_backend + if env_sttn_debug is not None: + DEBUG = env_sttn_debug == '1' + + print(f"[ATTENTION] Using backend: {BACKEND}") + + +__from_env() + + +def set_backend(backend: Literal['xformers', 'flash_attn']): + global BACKEND + BACKEND = backend + +def set_debug(debug: bool): + global DEBUG + DEBUG = debug + + +from .full_attn import * +from .modules import * diff --git a/third_party/TRELLIS/trellis/modules/attention/full_attn.py b/third_party/TRELLIS/trellis/modules/attention/full_attn.py new file mode 100755 index 0000000000000000000000000000000000000000..d9ebf6380a78906d4c6e969c63223fb7b398e5a7 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/attention/full_attn.py @@ -0,0 +1,140 @@ +from typing import * +import torch +import math +from . import DEBUG, BACKEND + +if BACKEND == 'xformers': + import xformers.ops as xops +elif BACKEND == 'flash_attn': + import flash_attn +elif BACKEND == 'sdpa': + from torch.nn.functional import scaled_dot_product_attention as sdpa +elif BACKEND == 'naive': + pass +else: + raise ValueError(f"Unknown attention backend: {BACKEND}") + + +__all__ = [ + 'scaled_dot_product_attention', +] + + +def _naive_sdpa(q, k, v): + """ + Naive implementation of scaled dot product attention. + """ + q = q.permute(0, 2, 1, 3) # [N, H, L, C] + k = k.permute(0, 2, 1, 3) # [N, H, L, C] + v = v.permute(0, 2, 1, 3) # [N, H, L, C] + scale_factor = 1 / math.sqrt(q.size(-1)) + attn_weight = q @ k.transpose(-2, -1) * scale_factor + attn_weight = torch.softmax(attn_weight, dim=-1) + out = attn_weight @ v + out = out.permute(0, 2, 1, 3) # [N, L, H, C] + return out + + +@overload +def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor: + """ + Apply scaled dot product attention. + + Args: + qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs. + """ + ... + +@overload +def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor: + """ + Apply scaled dot product attention. + + Args: + q (torch.Tensor): A [N, L, H, C] tensor containing Qs. + kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs. + """ + ... + +@overload +def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor: + """ + Apply scaled dot product attention. + + Args: + q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs. + k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks. + v (torch.Tensor): A [N, L, H, Co] tensor containing Vs. + + Note: + k and v are assumed to have the same coordinate map. + """ + ... + +def scaled_dot_product_attention(*args, **kwargs): + arg_names_dict = { + 1: ['qkv'], + 2: ['q', 'kv'], + 3: ['q', 'k', 'v'] + } + num_all_args = len(args) + len(kwargs) + assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3" + for key in arg_names_dict[num_all_args][len(args):]: + assert key in kwargs, f"Missing argument {key}" + + if num_all_args == 1: + qkv = args[0] if len(args) > 0 else kwargs['qkv'] + assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]" + device = qkv.device + + elif num_all_args == 2: + q = args[0] if len(args) > 0 else kwargs['q'] + kv = args[1] if len(args) > 1 else kwargs['kv'] + assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}" + assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]" + assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]" + device = q.device + + elif num_all_args == 3: + q = args[0] if len(args) > 0 else kwargs['q'] + k = args[1] if len(args) > 1 else kwargs['k'] + v = args[2] if len(args) > 2 else kwargs['v'] + assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}" + assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]" + assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]" + assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]" + device = q.device + + if BACKEND == 'xformers': + if num_all_args == 1: + q, k, v = qkv.unbind(dim=2) + elif num_all_args == 2: + k, v = kv.unbind(dim=2) + out = xops.memory_efficient_attention(q, k, v) + elif BACKEND == 'flash_attn': + if num_all_args == 1: + out = flash_attn.flash_attn_qkvpacked_func(qkv) + elif num_all_args == 2: + out = flash_attn.flash_attn_kvpacked_func(q, kv) + elif num_all_args == 3: + out = flash_attn.flash_attn_func(q, k, v) + elif BACKEND == 'sdpa': + if num_all_args == 1: + q, k, v = qkv.unbind(dim=2) + elif num_all_args == 2: + k, v = kv.unbind(dim=2) + q = q.permute(0, 2, 1, 3) # [N, H, L, C] + k = k.permute(0, 2, 1, 3) # [N, H, L, C] + v = v.permute(0, 2, 1, 3) # [N, H, L, C] + out = sdpa(q, k, v) # [N, H, L, C] + out = out.permute(0, 2, 1, 3) # [N, L, H, C] + elif BACKEND == 'naive': + if num_all_args == 1: + q, k, v = qkv.unbind(dim=2) + elif num_all_args == 2: + k, v = kv.unbind(dim=2) + out = _naive_sdpa(q, k, v) + else: + raise ValueError(f"Unknown attention module: {BACKEND}") + + return out diff --git a/third_party/TRELLIS/trellis/modules/attention/modules.py b/third_party/TRELLIS/trellis/modules/attention/modules.py new file mode 100755 index 0000000000000000000000000000000000000000..dbe6235c27134f0477e48d3e12de3068c6a500ef --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/attention/modules.py @@ -0,0 +1,146 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from .full_attn import scaled_dot_product_attention + + +class MultiHeadRMSNorm(nn.Module): + def __init__(self, dim: int, heads: int): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(heads, dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype) + + +class RotaryPositionEmbedder(nn.Module): + def __init__(self, hidden_size: int, in_channels: int = 3): + super().__init__() + assert hidden_size % 2 == 0, "Hidden size must be divisible by 2" + self.hidden_size = hidden_size + self.in_channels = in_channels + self.freq_dim = hidden_size // in_channels // 2 + self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim + self.freqs = 1.0 / (10000 ** self.freqs) + + def _get_phases(self, indices: torch.Tensor) -> torch.Tensor: + self.freqs = self.freqs.to(indices.device) + phases = torch.outer(indices, self.freqs) + phases = torch.polar(torch.ones_like(phases), phases) + return phases + + def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor: + x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) + x_rotated = x_complex * phases + x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype) + return x_embed + + def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + q (sp.SparseTensor): [..., N, D] tensor of queries + k (sp.SparseTensor): [..., N, D] tensor of keys + indices (torch.Tensor): [..., N, C] tensor of spatial positions + """ + if indices is None: + indices = torch.arange(q.shape[-2], device=q.device) + if len(q.shape) > 2: + indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,)) + + phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1) + if phases.shape[1] < self.hidden_size // 2: + phases = torch.cat([phases, torch.polar( + torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device), + torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device) + )], dim=-1) + q_embed = self._rotary_embedding(q, phases) + k_embed = self._rotary_embedding(k, phases) + return q_embed, k_embed + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels: int, + num_heads: int, + ctx_channels: Optional[int]=None, + type: Literal["self", "cross"] = "self", + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + qkv_bias: bool = True, + use_rope: bool = False, + qk_rms_norm: bool = False, + ): + super().__init__() + assert channels % num_heads == 0 + assert type in ["self", "cross"], f"Invalid attention type: {type}" + assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}" + assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention" + + if attn_mode == "windowed": + raise NotImplementedError("Windowed attention is not yet implemented") + + self.channels = channels + self.head_dim = channels // num_heads + self.ctx_channels = ctx_channels if ctx_channels is not None else channels + self.num_heads = num_heads + self._type = type + self.attn_mode = attn_mode + self.window_size = window_size + self.shift_window = shift_window + self.use_rope = use_rope + self.qk_rms_norm = qk_rms_norm + + if self._type == "self": + self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias) + else: + self.to_q = nn.Linear(channels, channels, bias=qkv_bias) + self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias) + + if self.qk_rms_norm: + self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads) + self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads) + + self.to_out = nn.Linear(channels, channels) + + if use_rope: + self.rope = RotaryPositionEmbedder(channels) + + def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor: + B, L, C = x.shape + if self._type == "self": + qkv = self.to_qkv(x) + qkv = qkv.reshape(B, L, 3, self.num_heads, -1) + if self.use_rope: + q, k, v = qkv.unbind(dim=2) + q, k = self.rope(q, k, indices) + qkv = torch.stack([q, k, v], dim=2) + if self.attn_mode == "full": + if self.qk_rms_norm: + q, k, v = qkv.unbind(dim=2) + q = self.q_rms_norm(q) + k = self.k_rms_norm(k) + h = scaled_dot_product_attention(q, k, v) + else: + h = scaled_dot_product_attention(qkv) + elif self.attn_mode == "windowed": + raise NotImplementedError("Windowed attention is not yet implemented") + else: + Lkv = context.shape[1] + q = self.to_q(x) + kv = self.to_kv(context) + q = q.reshape(B, L, self.num_heads, -1) + kv = kv.reshape(B, Lkv, 2, self.num_heads, -1) + if self.qk_rms_norm: + q = self.q_rms_norm(q) + k, v = kv.unbind(dim=2) + k = self.k_rms_norm(k) + h = scaled_dot_product_attention(q, k, v) + else: + h = scaled_dot_product_attention(q, kv) + h = h.reshape(B, L, -1) + h = self.to_out(h) + return h diff --git a/third_party/TRELLIS/trellis/modules/norm.py b/third_party/TRELLIS/trellis/modules/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..09035726081fb7afda2c62504d5474cfa483c58f --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/norm.py @@ -0,0 +1,25 @@ +import torch +import torch.nn as nn + + +class LayerNorm32(nn.LayerNorm): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return super().forward(x.float()).type(x.dtype) + + +class GroupNorm32(nn.GroupNorm): + """ + A GroupNorm layer that converts to float32 before the forward pass. + """ + def forward(self, x: torch.Tensor) -> torch.Tensor: + return super().forward(x.float()).type(x.dtype) + + +class ChannelLayerNorm32(LayerNorm32): + def forward(self, x: torch.Tensor) -> torch.Tensor: + DIM = x.dim() + x = x.permute(0, *range(2, DIM), 1).contiguous() + x = super().forward(x) + x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous() + return x + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/sparse/__init__.py b/third_party/TRELLIS/trellis/modules/sparse/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..726756c16dcfe0f04de0d2ea5bdce499fa220160 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/__init__.py @@ -0,0 +1,102 @@ +from typing import * + +BACKEND = 'spconv' +DEBUG = False +ATTN = 'flash_attn' + +def __from_env(): + import os + + global BACKEND + global DEBUG + global ATTN + + env_sparse_backend = os.environ.get('SPARSE_BACKEND') + env_sparse_debug = os.environ.get('SPARSE_DEBUG') + env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND') + if env_sparse_attn is None: + env_sparse_attn = os.environ.get('ATTN_BACKEND') + + if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']: + BACKEND = env_sparse_backend + if env_sparse_debug is not None: + DEBUG = env_sparse_debug == '1' + if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']: + ATTN = env_sparse_attn + + print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}") + + +__from_env() + + +def set_backend(backend: Literal['spconv', 'torchsparse']): + global BACKEND + BACKEND = backend + +def set_debug(debug: bool): + global DEBUG + DEBUG = debug + +def set_attn(attn: Literal['xformers', 'flash_attn']): + global ATTN + ATTN = attn + + +import importlib + +__attributes = { + 'SparseTensor': 'basic', + 'sparse_batch_broadcast': 'basic', + 'sparse_batch_op': 'basic', + 'sparse_cat': 'basic', + 'sparse_unbind': 'basic', + 'SparseGroupNorm': 'norm', + 'SparseLayerNorm': 'norm', + 'SparseGroupNorm32': 'norm', + 'SparseLayerNorm32': 'norm', + 'SparseReLU': 'nonlinearity', + 'SparseSiLU': 'nonlinearity', + 'SparseGELU': 'nonlinearity', + 'SparseActivation': 'nonlinearity', + 'SparseLinear': 'linear', + 'sparse_scaled_dot_product_attention': 'attention', + 'SerializeMode': 'attention', + 'sparse_serialized_scaled_dot_product_self_attention': 'attention', + 'sparse_windowed_scaled_dot_product_self_attention': 'attention', + 'SparseMultiHeadAttention': 'attention', + 'SparseConv3d': 'conv', + 'SparseInverseConv3d': 'conv', + 'SparseDownsample': 'spatial', + 'SparseUpsample': 'spatial', + 'SparseSubdivide' : 'spatial' +} + +__submodules = ['transformer'] + +__all__ = list(__attributes.keys()) + __submodules + +def __getattr__(name): + if name not in globals(): + if name in __attributes: + module_name = __attributes[name] + module = importlib.import_module(f".{module_name}", __name__) + globals()[name] = getattr(module, name) + elif name in __submodules: + module = importlib.import_module(f".{name}", __name__) + globals()[name] = module + else: + raise AttributeError(f"module {__name__} has no attribute {name}") + return globals()[name] + + +# For Pylance +if __name__ == '__main__': + from .basic import * + from .norm import * + from .nonlinearity import * + from .linear import * + from .attention import * + from .conv import * + from .spatial import * + import transformer diff --git a/third_party/TRELLIS/trellis/modules/sparse/attention/__init__.py b/third_party/TRELLIS/trellis/modules/sparse/attention/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..32b3c2c837c613e41755ac4c85f9ed057a6f5bfb --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/attention/__init__.py @@ -0,0 +1,4 @@ +from .full_attn import * +from .serialized_attn import * +from .windowed_attn import * +from .modules import * diff --git a/third_party/TRELLIS/trellis/modules/sparse/attention/full_attn.py b/third_party/TRELLIS/trellis/modules/sparse/attention/full_attn.py new file mode 100755 index 0000000000000000000000000000000000000000..e9e27aeb98419621f3f9999fd3b11eebf2b90a40 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/attention/full_attn.py @@ -0,0 +1,215 @@ +from typing import * +import torch +from .. import SparseTensor +from .. import DEBUG, ATTN + +if ATTN == 'xformers': + import xformers.ops as xops +elif ATTN == 'flash_attn': + import flash_attn +else: + raise ValueError(f"Unknown attention module: {ATTN}") + + +__all__ = [ + 'sparse_scaled_dot_product_attention', +] + + +@overload +def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs. + """ + ... + +@overload +def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs. + kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs. + """ + ... + +@overload +def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + q (SparseTensor): A [N, L, H, C] dense tensor containing Qs. + kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs. + """ + ... + +@overload +def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs. + k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks. + v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs. + + Note: + k and v are assumed to have the same coordinate map. + """ + ... + +@overload +def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs. + k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks. + v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs. + """ + ... + +@overload +def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor: + """ + Apply scaled dot product attention to a sparse tensor. + + Args: + q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs. + k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks. + v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs. + """ + ... + +def sparse_scaled_dot_product_attention(*args, **kwargs): + arg_names_dict = { + 1: ['qkv'], + 2: ['q', 'kv'], + 3: ['q', 'k', 'v'] + } + num_all_args = len(args) + len(kwargs) + assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3" + for key in arg_names_dict[num_all_args][len(args):]: + assert key in kwargs, f"Missing argument {key}" + + if num_all_args == 1: + qkv = args[0] if len(args) > 0 else kwargs['qkv'] + assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}" + assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]" + device = qkv.device + + s = qkv + q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])] + kv_seqlen = q_seqlen + qkv = qkv.feats # [T, 3, H, C] + + elif num_all_args == 2: + q = args[0] if len(args) > 0 else kwargs['q'] + kv = args[1] if len(args) > 1 else kwargs['kv'] + assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \ + isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \ + f"Invalid types, got {type(q)} and {type(kv)}" + assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}" + device = q.device + + if isinstance(q, SparseTensor): + assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]" + s = q + q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])] + q = q.feats # [T_Q, H, C] + else: + assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]" + s = None + N, L, H, C = q.shape + q_seqlen = [L] * N + q = q.reshape(N * L, H, C) # [T_Q, H, C] + + if isinstance(kv, SparseTensor): + assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]" + kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])] + kv = kv.feats # [T_KV, 2, H, C] + else: + assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]" + N, L, _, H, C = kv.shape + kv_seqlen = [L] * N + kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C] + + elif num_all_args == 3: + q = args[0] if len(args) > 0 else kwargs['q'] + k = args[1] if len(args) > 1 else kwargs['k'] + v = args[2] if len(args) > 2 else kwargs['v'] + assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \ + isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \ + f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}" + assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}" + device = q.device + + if isinstance(q, SparseTensor): + assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]" + s = q + q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])] + q = q.feats # [T_Q, H, Ci] + else: + assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]" + s = None + N, L, H, CI = q.shape + q_seqlen = [L] * N + q = q.reshape(N * L, H, CI) # [T_Q, H, Ci] + + if isinstance(k, SparseTensor): + assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]" + assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]" + kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])] + k = k.feats # [T_KV, H, Ci] + v = v.feats # [T_KV, H, Co] + else: + assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]" + assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]" + N, L, H, CI, CO = *k.shape, v.shape[-1] + kv_seqlen = [L] * N + k = k.reshape(N * L, H, CI) # [T_KV, H, Ci] + v = v.reshape(N * L, H, CO) # [T_KV, H, Co] + + if DEBUG: + if s is not None: + for i in range(s.shape[0]): + assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch" + if num_all_args in [2, 3]: + assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch" + if num_all_args == 3: + assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch" + assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch" + + if ATTN == 'xformers': + if num_all_args == 1: + q, k, v = qkv.unbind(dim=1) + elif num_all_args == 2: + k, v = kv.unbind(dim=1) + q = q.unsqueeze(0) + k = k.unsqueeze(0) + v = v.unsqueeze(0) + mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen) + out = xops.memory_efficient_attention(q, k, v, mask)[0] + elif ATTN == 'flash_attn': + cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device) + if num_all_args in [2, 3]: + cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device) + if num_all_args == 1: + out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen)) + elif num_all_args == 2: + out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen)) + elif num_all_args == 3: + out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen)) + else: + raise ValueError(f"Unknown attention module: {ATTN}") + + if s is not None: + return s.replace(out) + else: + return out.reshape(N, L, H, -1) diff --git a/third_party/TRELLIS/trellis/modules/sparse/attention/modules.py b/third_party/TRELLIS/trellis/modules/sparse/attention/modules.py new file mode 100755 index 0000000000000000000000000000000000000000..5d2fe782b0947700e308e9ec0325e7e91c84e3c2 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/attention/modules.py @@ -0,0 +1,139 @@ +from typing import * +import torch +import torch.nn as nn +import torch.nn.functional as F +from .. import SparseTensor +from .full_attn import sparse_scaled_dot_product_attention +from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention +from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention +from ...attention import RotaryPositionEmbedder + + +class SparseMultiHeadRMSNorm(nn.Module): + def __init__(self, dim: int, heads: int): + super().__init__() + self.scale = dim ** 0.5 + self.gamma = nn.Parameter(torch.ones(heads, dim)) + + def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]: + x_type = x.dtype + x = x.float() + if isinstance(x, SparseTensor): + x = x.replace(F.normalize(x.feats, dim=-1)) + else: + x = F.normalize(x, dim=-1) + return (x * self.gamma * self.scale).to(x_type) + + +class SparseMultiHeadAttention(nn.Module): + def __init__( + self, + channels: int, + num_heads: int, + ctx_channels: Optional[int] = None, + type: Literal["self", "cross"] = "self", + attn_mode: Literal["full", "serialized", "windowed"] = "full", + window_size: Optional[int] = None, + shift_sequence: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + serialize_mode: Optional[SerializeMode] = None, + qkv_bias: bool = True, + use_rope: bool = False, + qk_rms_norm: bool = False, + ): + super().__init__() + assert channels % num_heads == 0 + assert type in ["self", "cross"], f"Invalid attention type: {type}" + assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}" + assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention" + assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention" + self.channels = channels + self.ctx_channels = ctx_channels if ctx_channels is not None else channels + self.num_heads = num_heads + self._type = type + self.attn_mode = attn_mode + self.window_size = window_size + self.shift_sequence = shift_sequence + self.shift_window = shift_window + self.serialize_mode = serialize_mode + self.use_rope = use_rope + self.qk_rms_norm = qk_rms_norm + + if self._type == "self": + self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias) + else: + self.to_q = nn.Linear(channels, channels, bias=qkv_bias) + self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias) + + if self.qk_rms_norm: + self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads) + self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads) + + self.to_out = nn.Linear(channels, channels) + + if use_rope: + self.rope = RotaryPositionEmbedder(channels) + + @staticmethod + def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]: + if isinstance(x, SparseTensor): + return x.replace(module(x.feats)) + else: + return module(x) + + @staticmethod + def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]: + if isinstance(x, SparseTensor): + return x.reshape(*shape) + else: + return x.reshape(*x.shape[:2], *shape) + + def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]: + if isinstance(x, SparseTensor): + x_feats = x.feats.unsqueeze(0) + else: + x_feats = x + x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1) + return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats + + def _rope(self, qkv: SparseTensor) -> SparseTensor: + q, k, v = qkv.feats.unbind(dim=1) # [T, H, C] + q, k = self.rope(q, k, qkv.coords[:, 1:]) + qkv = qkv.replace(torch.stack([q, k, v], dim=1)) + return qkv + + def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]: + if self._type == "self": + qkv = self._linear(self.to_qkv, x) + qkv = self._fused_pre(qkv, num_fused=3) + if self.use_rope: + qkv = self._rope(qkv) + if self.qk_rms_norm: + q, k, v = qkv.unbind(dim=1) + q = self.q_rms_norm(q) + k = self.k_rms_norm(k) + qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1)) + if self.attn_mode == "full": + h = sparse_scaled_dot_product_attention(qkv) + elif self.attn_mode == "serialized": + h = sparse_serialized_scaled_dot_product_self_attention( + qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window + ) + elif self.attn_mode == "windowed": + h = sparse_windowed_scaled_dot_product_self_attention( + qkv, self.window_size, shift_window=self.shift_window + ) + else: + q = self._linear(self.to_q, x) + q = self._reshape_chs(q, (self.num_heads, -1)) + kv = self._linear(self.to_kv, context) + kv = self._fused_pre(kv, num_fused=2) + if self.qk_rms_norm: + q = self.q_rms_norm(q) + k, v = kv.unbind(dim=1) + k = self.k_rms_norm(k) + kv = kv.replace(torch.stack([k.feats, v.feats], dim=1)) + h = sparse_scaled_dot_product_attention(q, kv) + h = self._reshape_chs(h, (-1,)) + h = self._linear(self.to_out, h) + return h diff --git a/third_party/TRELLIS/trellis/modules/sparse/attention/serialized_attn.py b/third_party/TRELLIS/trellis/modules/sparse/attention/serialized_attn.py new file mode 100755 index 0000000000000000000000000000000000000000..5950b75b2f5a6d6e79ab6d472b8501aaa5ec4a26 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/attention/serialized_attn.py @@ -0,0 +1,193 @@ +from typing import * +from enum import Enum +import torch +import math +from .. import SparseTensor +from .. import DEBUG, ATTN + +if ATTN == 'xformers': + import xformers.ops as xops +elif ATTN == 'flash_attn': + import flash_attn +else: + raise ValueError(f"Unknown attention module: {ATTN}") + + +__all__ = [ + 'sparse_serialized_scaled_dot_product_self_attention', +] + + +class SerializeMode(Enum): + Z_ORDER = 0 + Z_ORDER_TRANSPOSED = 1 + HILBERT = 2 + HILBERT_TRANSPOSED = 3 + + +SerializeModes = [ + SerializeMode.Z_ORDER, + SerializeMode.Z_ORDER_TRANSPOSED, + SerializeMode.HILBERT, + SerializeMode.HILBERT_TRANSPOSED +] + + +def calc_serialization( + tensor: SparseTensor, + window_size: int, + serialize_mode: SerializeMode = SerializeMode.Z_ORDER, + shift_sequence: int = 0, + shift_window: Tuple[int, int, int] = (0, 0, 0) +) -> Tuple[torch.Tensor, torch.Tensor, List[int]]: + """ + Calculate serialization and partitioning for a set of coordinates. + + Args: + tensor (SparseTensor): The input tensor. + window_size (int): The window size to use. + serialize_mode (SerializeMode): The serialization mode to use. + shift_sequence (int): The shift of serialized sequence. + shift_window (Tuple[int, int, int]): The shift of serialized coordinates. + + Returns: + (torch.Tensor, torch.Tensor): Forwards and backwards indices. + """ + fwd_indices = [] + bwd_indices = [] + seq_lens = [] + seq_batch_indices = [] + offsets = [0] + + if 'vox2seq' not in globals(): + import vox2seq + + # Serialize the input + serialize_coords = tensor.coords[:, 1:].clone() + serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3) + if serialize_mode == SerializeMode.Z_ORDER: + code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2]) + elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED: + code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2]) + elif serialize_mode == SerializeMode.HILBERT: + code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2]) + elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED: + code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2]) + else: + raise ValueError(f"Unknown serialize mode: {serialize_mode}") + + for bi, s in enumerate(tensor.layout): + num_points = s.stop - s.start + num_windows = (num_points + window_size - 1) // window_size + valid_window_size = num_points / num_windows + to_ordered = torch.argsort(code[s.start:s.stop]) + if num_windows == 1: + fwd_indices.append(to_ordered) + bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device))) + fwd_indices[-1] += s.start + bwd_indices[-1] += offsets[-1] + seq_lens.append(num_points) + seq_batch_indices.append(bi) + offsets.append(offsets[-1] + seq_lens[-1]) + else: + # Partition the input + offset = 0 + mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)] + split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)] + bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device) + for i in range(num_windows): + mid = mids[i] + valid_start = split[i] + valid_end = split[i + 1] + padded_start = math.floor(mid - 0.5 * window_size) + padded_end = padded_start + window_size + fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points]) + offset += valid_start - padded_start + bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device)) + offset += padded_end - valid_start + fwd_indices[-1] += s.start + seq_lens.extend([window_size] * num_windows) + seq_batch_indices.extend([bi] * num_windows) + bwd_indices.append(bwd_index + offsets[-1]) + offsets.append(offsets[-1] + num_windows * window_size) + + fwd_indices = torch.cat(fwd_indices) + bwd_indices = torch.cat(bwd_indices) + + return fwd_indices, bwd_indices, seq_lens, seq_batch_indices + + +def sparse_serialized_scaled_dot_product_self_attention( + qkv: SparseTensor, + window_size: int, + serialize_mode: SerializeMode = SerializeMode.Z_ORDER, + shift_sequence: int = 0, + shift_window: Tuple[int, int, int] = (0, 0, 0) +) -> SparseTensor: + """ + Apply serialized scaled dot product self attention to a sparse tensor. + + Args: + qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs. + window_size (int): The window size to use. + serialize_mode (SerializeMode): The serialization mode to use. + shift_sequence (int): The shift of serialized sequence. + shift_window (Tuple[int, int, int]): The shift of serialized coordinates. + shift (int): The shift to use. + """ + assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]" + + serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}' + serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name) + if serialization_spatial_cache is None: + fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window) + qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices)) + else: + fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache + + M = fwd_indices.shape[0] + T = qkv.feats.shape[0] + H = qkv.feats.shape[2] + C = qkv.feats.shape[3] + + qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C] + + if DEBUG: + start = 0 + qkv_coords = qkv.coords[fwd_indices] + for i in range(len(seq_lens)): + assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch" + start += seq_lens[i] + + if all([seq_len == window_size for seq_len in seq_lens]): + B = len(seq_lens) + N = window_size + qkv_feats = qkv_feats.reshape(B, N, 3, H, C) + if ATTN == 'xformers': + q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C] + out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C] + elif ATTN == 'flash_attn': + out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C] + else: + raise ValueError(f"Unknown attention module: {ATTN}") + out = out.reshape(B * N, H, C) # [M, H, C] + else: + if ATTN == 'xformers': + q, k, v = qkv_feats.unbind(dim=1) # [M, H, C] + q = q.unsqueeze(0) # [1, M, H, C] + k = k.unsqueeze(0) # [1, M, H, C] + v = v.unsqueeze(0) # [1, M, H, C] + mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens) + out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C] + elif ATTN == 'flash_attn': + cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \ + .to(qkv.device).int() + out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C] + + out = out[bwd_indices] # [T, H, C] + + if DEBUG: + qkv_coords = qkv_coords[bwd_indices] + assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch" + + return qkv.replace(out) diff --git a/third_party/TRELLIS/trellis/modules/sparse/attention/windowed_attn.py b/third_party/TRELLIS/trellis/modules/sparse/attention/windowed_attn.py new file mode 100755 index 0000000000000000000000000000000000000000..cd642c5252e29a3a5e59fad7ed3880b7b00bcf9a --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/attention/windowed_attn.py @@ -0,0 +1,135 @@ +from typing import * +import torch +import math +from .. import SparseTensor +from .. import DEBUG, ATTN + +if ATTN == 'xformers': + import xformers.ops as xops +elif ATTN == 'flash_attn': + import flash_attn +else: + raise ValueError(f"Unknown attention module: {ATTN}") + + +__all__ = [ + 'sparse_windowed_scaled_dot_product_self_attention', +] + + +def calc_window_partition( + tensor: SparseTensor, + window_size: Union[int, Tuple[int, ...]], + shift_window: Union[int, Tuple[int, ...]] = 0 +) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]: + """ + Calculate serialization and partitioning for a set of coordinates. + + Args: + tensor (SparseTensor): The input tensor. + window_size (int): The window size to use. + shift_window (Tuple[int, ...]): The shift of serialized coordinates. + + Returns: + (torch.Tensor): Forwards indices. + (torch.Tensor): Backwards indices. + (List[int]): Sequence lengths. + (List[int]): Sequence batch indices. + """ + DIM = tensor.coords.shape[1] - 1 + shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window + window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size + shifted_coords = tensor.coords.clone().detach() + shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0) + + MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist() + NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)] + OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1] + + shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0) + shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1) + fwd_indices = torch.argsort(shifted_indices) + bwd_indices = torch.empty_like(fwd_indices) + bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device) + seq_lens = torch.bincount(shifted_indices) + seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0] + mask = seq_lens != 0 + seq_lens = seq_lens[mask].tolist() + seq_batch_indices = seq_batch_indices[mask].tolist() + + return fwd_indices, bwd_indices, seq_lens, seq_batch_indices + + +def sparse_windowed_scaled_dot_product_self_attention( + qkv: SparseTensor, + window_size: int, + shift_window: Tuple[int, int, int] = (0, 0, 0) +) -> SparseTensor: + """ + Apply windowed scaled dot product self attention to a sparse tensor. + + Args: + qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs. + window_size (int): The window size to use. + shift_window (Tuple[int, int, int]): The shift of serialized coordinates. + shift (int): The shift to use. + """ + assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]" + + serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}' + serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name) + if serialization_spatial_cache is None: + fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window) + qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices)) + else: + fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache + + M = fwd_indices.shape[0] + T = qkv.feats.shape[0] + H = qkv.feats.shape[2] + C = qkv.feats.shape[3] + + qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C] + + if DEBUG: + start = 0 + qkv_coords = qkv.coords[fwd_indices] + for i in range(len(seq_lens)): + seq_coords = qkv_coords[start:start+seq_lens[i]] + assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch" + assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \ + f"SparseWindowedScaledDotProductSelfAttention: window size exceeded" + start += seq_lens[i] + + if all([seq_len == window_size for seq_len in seq_lens]): + B = len(seq_lens) + N = window_size + qkv_feats = qkv_feats.reshape(B, N, 3, H, C) + if ATTN == 'xformers': + q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C] + out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C] + elif ATTN == 'flash_attn': + out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C] + else: + raise ValueError(f"Unknown attention module: {ATTN}") + out = out.reshape(B * N, H, C) # [M, H, C] + else: + if ATTN == 'xformers': + q, k, v = qkv_feats.unbind(dim=1) # [M, H, C] + q = q.unsqueeze(0) # [1, M, H, C] + k = k.unsqueeze(0) # [1, M, H, C] + v = v.unsqueeze(0) # [1, M, H, C] + mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens) + out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C] + elif ATTN == 'flash_attn': + cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \ + .to(qkv.device).int() + out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C] + + out = out[bwd_indices] # [T, H, C] + + if DEBUG: + qkv_coords = qkv_coords[bwd_indices] + assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch" + + return qkv.replace(out) diff --git a/third_party/TRELLIS/trellis/modules/sparse/basic.py b/third_party/TRELLIS/trellis/modules/sparse/basic.py new file mode 100755 index 0000000000000000000000000000000000000000..8837f44052f6d573d09e3bfb897e659e10516bb5 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/basic.py @@ -0,0 +1,459 @@ +from typing import * +import torch +import torch.nn as nn +from . import BACKEND, DEBUG +SparseTensorData = None # Lazy import + + +__all__ = [ + 'SparseTensor', + 'sparse_batch_broadcast', + 'sparse_batch_op', + 'sparse_cat', + 'sparse_unbind', +] + + +class SparseTensor: + """ + Sparse tensor with support for both torchsparse and spconv backends. + + Parameters: + - feats (torch.Tensor): Features of the sparse tensor. + - coords (torch.Tensor): Coordinates of the sparse tensor. + - shape (torch.Size): Shape of the sparse tensor. + - layout (List[slice]): Layout of the sparse tensor for each batch + - data (SparseTensorData): Sparse tensor data used for convolusion + + NOTE: + - Data corresponding to a same batch should be contiguous. + - Coords should be in [0, 1023] + """ + @overload + def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ... + + @overload + def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ... + + def __init__(self, *args, **kwargs): + # Lazy import of sparse tensor backend + global SparseTensorData + if SparseTensorData is None: + import importlib + if BACKEND == 'torchsparse': + SparseTensorData = importlib.import_module('torchsparse').SparseTensor + elif BACKEND == 'spconv': + SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor + + method_id = 0 + if len(args) != 0: + method_id = 0 if isinstance(args[0], torch.Tensor) else 1 + else: + method_id = 1 if 'data' in kwargs else 0 + + if method_id == 0: + feats, coords, shape, layout = args + (None,) * (4 - len(args)) + if 'feats' in kwargs: + feats = kwargs['feats'] + del kwargs['feats'] + if 'coords' in kwargs: + coords = kwargs['coords'] + del kwargs['coords'] + if 'shape' in kwargs: + shape = kwargs['shape'] + del kwargs['shape'] + if 'layout' in kwargs: + layout = kwargs['layout'] + del kwargs['layout'] + + if shape is None: + shape = self.__cal_shape(feats, coords) + if layout is None: + layout = self.__cal_layout(coords, shape[0]) + if BACKEND == 'torchsparse': + self.data = SparseTensorData(feats, coords, **kwargs) + elif BACKEND == 'spconv': + spatial_shape = list(coords.max(0)[0] + 1)[1:] + self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs) + self.data._features = feats + elif method_id == 1: + data, shape, layout = args + (None,) * (3 - len(args)) + if 'data' in kwargs: + data = kwargs['data'] + del kwargs['data'] + if 'shape' in kwargs: + shape = kwargs['shape'] + del kwargs['shape'] + if 'layout' in kwargs: + layout = kwargs['layout'] + del kwargs['layout'] + + self.data = data + if shape is None: + shape = self.__cal_shape(self.feats, self.coords) + if layout is None: + layout = self.__cal_layout(self.coords, shape[0]) + + self._shape = shape + self._layout = layout + self._scale = kwargs.get('scale', (1, 1, 1)) + self._spatial_cache = kwargs.get('spatial_cache', {}) + + if DEBUG: + try: + assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}" + assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}" + assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}" + for i in range(self.shape[0]): + assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous" + except Exception as e: + print('Debugging information:') + print(f"- Shape: {self.shape}") + print(f"- Layout: {self.layout}") + print(f"- Scale: {self._scale}") + print(f"- Coords: {self.coords}") + raise e + + def __cal_shape(self, feats, coords): + shape = [] + shape.append(coords[:, 0].max().item() + 1) + shape.extend([*feats.shape[1:]]) + return torch.Size(shape) + + def __cal_layout(self, coords, batch_size): + seq_len = torch.bincount(coords[:, 0], minlength=batch_size) + offset = torch.cumsum(seq_len, dim=0) + layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)] + return layout + + @property + def shape(self) -> torch.Size: + return self._shape + + def dim(self) -> int: + return len(self.shape) + + @property + def layout(self) -> List[slice]: + return self._layout + + @property + def feats(self) -> torch.Tensor: + if BACKEND == 'torchsparse': + return self.data.F + elif BACKEND == 'spconv': + return self.data.features + + @feats.setter + def feats(self, value: torch.Tensor): + if BACKEND == 'torchsparse': + self.data.F = value + elif BACKEND == 'spconv': + self.data.features = value + + @property + def coords(self) -> torch.Tensor: + if BACKEND == 'torchsparse': + return self.data.C + elif BACKEND == 'spconv': + return self.data.indices + + @coords.setter + def coords(self, value: torch.Tensor): + if BACKEND == 'torchsparse': + self.data.C = value + elif BACKEND == 'spconv': + self.data.indices = value + + @property + def dtype(self): + return self.feats.dtype + + @property + def device(self): + return self.feats.device + + @overload + def to(self, dtype: torch.dtype) -> 'SparseTensor': ... + + @overload + def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ... + + def to(self, *args, **kwargs) -> 'SparseTensor': + device = None + dtype = None + if len(args) == 2: + device, dtype = args + elif len(args) == 1: + if isinstance(args[0], torch.dtype): + dtype = args[0] + else: + device = args[0] + if 'dtype' in kwargs: + assert dtype is None, "to() received multiple values for argument 'dtype'" + dtype = kwargs['dtype'] + if 'device' in kwargs: + assert device is None, "to() received multiple values for argument 'device'" + device = kwargs['device'] + + new_feats = self.feats.to(device=device, dtype=dtype) + new_coords = self.coords.to(device=device) + return self.replace(new_feats, new_coords) + + def type(self, dtype): + new_feats = self.feats.type(dtype) + return self.replace(new_feats) + + def cpu(self) -> 'SparseTensor': + new_feats = self.feats.cpu() + new_coords = self.coords.cpu() + return self.replace(new_feats, new_coords) + + def cuda(self) -> 'SparseTensor': + new_feats = self.feats.cuda() + new_coords = self.coords.cuda() + return self.replace(new_feats, new_coords) + + def half(self) -> 'SparseTensor': + new_feats = self.feats.half() + return self.replace(new_feats) + + def float(self) -> 'SparseTensor': + new_feats = self.feats.float() + return self.replace(new_feats) + + def detach(self) -> 'SparseTensor': + new_coords = self.coords.detach() + new_feats = self.feats.detach() + return self.replace(new_feats, new_coords) + + def dense(self) -> torch.Tensor: + if BACKEND == 'torchsparse': + return self.data.dense() + elif BACKEND == 'spconv': + return self.data.dense() + + def reshape(self, *shape) -> 'SparseTensor': + new_feats = self.feats.reshape(self.feats.shape[0], *shape) + return self.replace(new_feats) + + def unbind(self, dim: int) -> List['SparseTensor']: + return sparse_unbind(self, dim) + + def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor': + new_shape = [self.shape[0]] + new_shape.extend(feats.shape[1:]) + if BACKEND == 'torchsparse': + new_data = SparseTensorData( + feats=feats, + coords=self.data.coords if coords is None else coords, + stride=self.data.stride, + spatial_range=self.data.spatial_range, + ) + new_data._caches = self.data._caches + elif BACKEND == 'spconv': + new_data = SparseTensorData( + self.data.features.reshape(self.data.features.shape[0], -1), + self.data.indices, + self.data.spatial_shape, + self.data.batch_size, + self.data.grid, + self.data.voxel_num, + self.data.indice_dict + ) + new_data._features = feats + new_data.benchmark = self.data.benchmark + new_data.benchmark_record = self.data.benchmark_record + new_data.thrust_allocator = self.data.thrust_allocator + new_data._timer = self.data._timer + new_data.force_algo = self.data.force_algo + new_data.int8_scale = self.data.int8_scale + if coords is not None: + new_data.indices = coords + new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache) + return new_tensor + + @staticmethod + def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor': + N, C = dim + x = torch.arange(aabb[0], aabb[3] + 1) + y = torch.arange(aabb[1], aabb[4] + 1) + z = torch.arange(aabb[2], aabb[5] + 1) + coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3) + coords = torch.cat([ + torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1), + coords.repeat(N, 1), + ], dim=1).to(dtype=torch.int32, device=device) + feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device) + return SparseTensor(feats=feats, coords=coords) + + def __merge_sparse_cache(self, other: 'SparseTensor') -> dict: + new_cache = {} + for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())): + if k in self._spatial_cache: + new_cache[k] = self._spatial_cache[k] + if k in other._spatial_cache: + if k not in new_cache: + new_cache[k] = other._spatial_cache[k] + else: + new_cache[k].update(other._spatial_cache[k]) + return new_cache + + def __neg__(self) -> 'SparseTensor': + return self.replace(-self.feats) + + def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor': + if isinstance(other, torch.Tensor): + try: + other = torch.broadcast_to(other, self.shape) + other = sparse_batch_broadcast(self, other) + except: + pass + if isinstance(other, SparseTensor): + other = other.feats + new_feats = op(self.feats, other) + new_tensor = self.replace(new_feats) + if isinstance(other, SparseTensor): + new_tensor._spatial_cache = self.__merge_sparse_cache(other) + return new_tensor + + def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.add) + + def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.add) + + def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.sub) + + def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, lambda x, y: torch.sub(y, x)) + + def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.mul) + + def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.mul) + + def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, torch.div) + + def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor': + return self.__elemwise__(other, lambda x, y: torch.div(y, x)) + + def __getitem__(self, idx): + if isinstance(idx, int): + idx = [idx] + elif isinstance(idx, slice): + idx = range(*idx.indices(self.shape[0])) + elif isinstance(idx, torch.Tensor): + if idx.dtype == torch.bool: + assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}" + idx = idx.nonzero().squeeze(1) + elif idx.dtype in [torch.int32, torch.int64]: + assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}" + else: + raise ValueError(f"Unknown index type: {idx.dtype}") + else: + raise ValueError(f"Unknown index type: {type(idx)}") + + coords = [] + feats = [] + for new_idx, old_idx in enumerate(idx): + coords.append(self.coords[self.layout[old_idx]].clone()) + coords[-1][:, 0] = new_idx + feats.append(self.feats[self.layout[old_idx]]) + coords = torch.cat(coords, dim=0).contiguous() + feats = torch.cat(feats, dim=0).contiguous() + return SparseTensor(feats=feats, coords=coords) + + def register_spatial_cache(self, key, value) -> None: + """ + Register a spatial cache. + The spatial cache can be any thing you want to cache. + The registery and retrieval of the cache is based on current scale. + """ + scale_key = str(self._scale) + if scale_key not in self._spatial_cache: + self._spatial_cache[scale_key] = {} + self._spatial_cache[scale_key][key] = value + + def get_spatial_cache(self, key=None): + """ + Get a spatial cache. + """ + scale_key = str(self._scale) + cur_scale_cache = self._spatial_cache.get(scale_key, {}) + if key is None: + return cur_scale_cache + return cur_scale_cache.get(key, None) + + +def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor: + """ + Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation. + + Args: + input (torch.Tensor): 1D tensor to broadcast. + target (SparseTensor): Sparse tensor to broadcast to. + op (callable): Operation to perform after broadcasting. Defaults to torch.add. + """ + coords, feats = input.coords, input.feats + broadcasted = torch.zeros_like(feats) + for k in range(input.shape[0]): + broadcasted[input.layout[k]] = other[k] + return broadcasted + + +def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor: + """ + Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation. + + Args: + input (torch.Tensor): 1D tensor to broadcast. + target (SparseTensor): Sparse tensor to broadcast to. + op (callable): Operation to perform after broadcasting. Defaults to torch.add. + """ + return input.replace(op(input.feats, sparse_batch_broadcast(input, other))) + + +def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor: + """ + Concatenate a list of sparse tensors. + + Args: + inputs (List[SparseTensor]): List of sparse tensors to concatenate. + """ + if dim == 0: + start = 0 + coords = [] + for input in inputs: + coords.append(input.coords.clone()) + coords[-1][:, 0] += start + start += input.shape[0] + coords = torch.cat(coords, dim=0) + feats = torch.cat([input.feats for input in inputs], dim=0) + output = SparseTensor( + coords=coords, + feats=feats, + ) + else: + feats = torch.cat([input.feats for input in inputs], dim=dim) + output = inputs[0].replace(feats) + + return output + + +def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]: + """ + Unbind a sparse tensor along a dimension. + + Args: + input (SparseTensor): Sparse tensor to unbind. + dim (int): Dimension to unbind. + """ + if dim == 0: + return [input[i] for i in range(input.shape[0])] + else: + feats = input.feats.unbind(dim) + return [input.replace(f) for f in feats] diff --git a/third_party/TRELLIS/trellis/modules/sparse/conv/__init__.py b/third_party/TRELLIS/trellis/modules/sparse/conv/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..340a87126a8de574ee0276feb96b49824a2ce234 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/conv/__init__.py @@ -0,0 +1,21 @@ +from .. import BACKEND + + +SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native' + +def __from_env(): + import os + + global SPCONV_ALGO + env_spconv_algo = os.environ.get('SPCONV_ALGO') + if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']: + SPCONV_ALGO = env_spconv_algo + print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}") + + +__from_env() + +if BACKEND == 'torchsparse': + from .conv_torchsparse import * +elif BACKEND == 'spconv': + from .conv_spconv import * diff --git a/third_party/TRELLIS/trellis/modules/sparse/conv/conv_spconv.py b/third_party/TRELLIS/trellis/modules/sparse/conv/conv_spconv.py new file mode 100755 index 0000000000000000000000000000000000000000..524bcd4a845b2d6bd090a5f74bc8859978727528 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/conv/conv_spconv.py @@ -0,0 +1,80 @@ +import torch +import torch.nn as nn +from .. import SparseTensor +from .. import DEBUG +from . import SPCONV_ALGO + +class SparseConv3d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=None, bias=True, indice_key=None): + super(SparseConv3d, self).__init__() + if 'spconv' not in globals(): + import spconv.pytorch as spconv + algo = None + if SPCONV_ALGO == 'native': + algo = spconv.ConvAlgo.Native + elif SPCONV_ALGO == 'implicit_gemm': + algo = spconv.ConvAlgo.MaskImplicitGemm + if stride == 1 and (padding is None): + self.conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, dilation=dilation, bias=bias, indice_key=indice_key, algo=algo) + else: + self.conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, indice_key=indice_key, algo=algo) + self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride) + self.padding = padding + + def forward(self, x: SparseTensor) -> SparseTensor: + spatial_changed = any(s != 1 for s in self.stride) or (self.padding is not None) + new_data = self.conv(x.data) + new_shape = [x.shape[0], self.conv.out_channels] + new_layout = None if spatial_changed else x.layout + + if spatial_changed and (x.shape[0] != 1): + # spconv was non-1 stride will break the contiguous of the output tensor, sort by the coords + fwd = new_data.indices[:, 0].argsort() + bwd = torch.zeros_like(fwd).scatter_(0, fwd, torch.arange(fwd.shape[0], device=fwd.device)) + sorted_feats = new_data.features[fwd] + sorted_coords = new_data.indices[fwd] + unsorted_data = new_data + new_data = spconv.SparseConvTensor(sorted_feats, sorted_coords, unsorted_data.spatial_shape, unsorted_data.batch_size) # type: ignore + + out = SparseTensor( + new_data, shape=torch.Size(new_shape), layout=new_layout, + scale=tuple([s * stride for s, stride in zip(x._scale, self.stride)]), + spatial_cache=x._spatial_cache, + ) + + if spatial_changed and (x.shape[0] != 1): + out.register_spatial_cache(f'conv_{self.stride}_unsorted_data', unsorted_data) + out.register_spatial_cache(f'conv_{self.stride}_sort_bwd', bwd) + + return out + + +class SparseInverseConv3d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None): + super(SparseInverseConv3d, self).__init__() + if 'spconv' not in globals(): + import spconv.pytorch as spconv + self.conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, bias=bias, indice_key=indice_key) + self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride) + + def forward(self, x: SparseTensor) -> SparseTensor: + spatial_changed = any(s != 1 for s in self.stride) + if spatial_changed: + # recover the original spconv order + data = x.get_spatial_cache(f'conv_{self.stride}_unsorted_data') + bwd = x.get_spatial_cache(f'conv_{self.stride}_sort_bwd') + data = data.replace_feature(x.feats[bwd]) + if DEBUG: + assert torch.equal(data.indices, x.coords[bwd]), 'Recover the original order failed' + else: + data = x.data + + new_data = self.conv(data) + new_shape = [x.shape[0], self.conv.out_channels] + new_layout = None if spatial_changed else x.layout + out = SparseTensor( + new_data, shape=torch.Size(new_shape), layout=new_layout, + scale=tuple([s // stride for s, stride in zip(x._scale, self.stride)]), + spatial_cache=x._spatial_cache, + ) + return out diff --git a/third_party/TRELLIS/trellis/modules/sparse/conv/conv_torchsparse.py b/third_party/TRELLIS/trellis/modules/sparse/conv/conv_torchsparse.py new file mode 100755 index 0000000000000000000000000000000000000000..1d612582d4b31f90aca3c00b693bbbc2550dc62c --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/conv/conv_torchsparse.py @@ -0,0 +1,38 @@ +import torch +import torch.nn as nn +from .. import SparseTensor + + +class SparseConv3d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None): + super(SparseConv3d, self).__init__() + if 'torchsparse' not in globals(): + import torchsparse + self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias) + + def forward(self, x: SparseTensor) -> SparseTensor: + out = self.conv(x.data) + new_shape = [x.shape[0], self.conv.out_channels] + out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None) + out._spatial_cache = x._spatial_cache + out._scale = tuple([s * stride for s, stride in zip(x._scale, self.conv.stride)]) + return out + + +class SparseInverseConv3d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None): + super(SparseInverseConv3d, self).__init__() + if 'torchsparse' not in globals(): + import torchsparse + self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias, transposed=True) + + def forward(self, x: SparseTensor) -> SparseTensor: + out = self.conv(x.data) + new_shape = [x.shape[0], self.conv.out_channels] + out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None) + out._spatial_cache = x._spatial_cache + out._scale = tuple([s // stride for s, stride in zip(x._scale, self.conv.stride)]) + return out + + + diff --git a/third_party/TRELLIS/trellis/modules/sparse/linear.py b/third_party/TRELLIS/trellis/modules/sparse/linear.py new file mode 100755 index 0000000000000000000000000000000000000000..a854e77ce87d1a190b9730d91f363a821ff250bd --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/linear.py @@ -0,0 +1,15 @@ +import torch +import torch.nn as nn +from . import SparseTensor + +__all__ = [ + 'SparseLinear' +] + + +class SparseLinear(nn.Linear): + def __init__(self, in_features, out_features, bias=True): + super(SparseLinear, self).__init__(in_features, out_features, bias) + + def forward(self, input: SparseTensor) -> SparseTensor: + return input.replace(super().forward(input.feats)) diff --git a/third_party/TRELLIS/trellis/modules/sparse/nonlinearity.py b/third_party/TRELLIS/trellis/modules/sparse/nonlinearity.py new file mode 100755 index 0000000000000000000000000000000000000000..f200098dd82011a3aeee1688b9eb17018fa78295 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/nonlinearity.py @@ -0,0 +1,35 @@ +import torch +import torch.nn as nn +from . import SparseTensor + +__all__ = [ + 'SparseReLU', + 'SparseSiLU', + 'SparseGELU', + 'SparseActivation' +] + + +class SparseReLU(nn.ReLU): + def forward(self, input: SparseTensor) -> SparseTensor: + return input.replace(super().forward(input.feats)) + + +class SparseSiLU(nn.SiLU): + def forward(self, input: SparseTensor) -> SparseTensor: + return input.replace(super().forward(input.feats)) + + +class SparseGELU(nn.GELU): + def forward(self, input: SparseTensor) -> SparseTensor: + return input.replace(super().forward(input.feats)) + + +class SparseActivation(nn.Module): + def __init__(self, activation: nn.Module): + super().__init__() + self.activation = activation + + def forward(self, input: SparseTensor) -> SparseTensor: + return input.replace(self.activation(input.feats)) + diff --git a/third_party/TRELLIS/trellis/modules/sparse/norm.py b/third_party/TRELLIS/trellis/modules/sparse/norm.py new file mode 100755 index 0000000000000000000000000000000000000000..6b38a36682c098210000dc31d68ddc31ccd2929d --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/norm.py @@ -0,0 +1,58 @@ +import torch +import torch.nn as nn +from . import SparseTensor +from . import DEBUG + +__all__ = [ + 'SparseGroupNorm', + 'SparseLayerNorm', + 'SparseGroupNorm32', + 'SparseLayerNorm32', +] + + +class SparseGroupNorm(nn.GroupNorm): + def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): + super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine) + + def forward(self, input: SparseTensor) -> SparseTensor: + nfeats = torch.zeros_like(input.feats) + for k in range(input.shape[0]): + if DEBUG: + assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch" + bfeats = input.feats[input.layout[k]] + bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1) + bfeats = super().forward(bfeats) + bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0) + nfeats[input.layout[k]] = bfeats + return input.replace(nfeats) + + +class SparseLayerNorm(nn.LayerNorm): + def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): + super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine) + + def forward(self, input: SparseTensor) -> SparseTensor: + nfeats = torch.zeros_like(input.feats) + for k in range(input.shape[0]): + bfeats = input.feats[input.layout[k]] + bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1) + bfeats = super().forward(bfeats) + bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0) + nfeats[input.layout[k]] = bfeats + return input.replace(nfeats) + + +class SparseGroupNorm32(SparseGroupNorm): + """ + A GroupNorm layer that converts to float32 before the forward pass. + """ + def forward(self, x: SparseTensor) -> SparseTensor: + return super().forward(x.float()).type(x.dtype) + +class SparseLayerNorm32(SparseLayerNorm): + """ + A LayerNorm layer that converts to float32 before the forward pass. + """ + def forward(self, x: SparseTensor) -> SparseTensor: + return super().forward(x.float()).type(x.dtype) diff --git a/third_party/TRELLIS/trellis/modules/sparse/spatial.py b/third_party/TRELLIS/trellis/modules/sparse/spatial.py new file mode 100755 index 0000000000000000000000000000000000000000..ad7121473f335b307e2f7ea5f05c964d3aec0440 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/spatial.py @@ -0,0 +1,110 @@ +from typing import * +import torch +import torch.nn as nn +from . import SparseTensor + +__all__ = [ + 'SparseDownsample', + 'SparseUpsample', + 'SparseSubdivide' +] + + +class SparseDownsample(nn.Module): + """ + Downsample a sparse tensor by a factor of `factor`. + Implemented as average pooling. + """ + def __init__(self, factor: Union[int, Tuple[int, ...], List[int]]): + super(SparseDownsample, self).__init__() + self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor + + def forward(self, input: SparseTensor) -> SparseTensor: + DIM = input.coords.shape[-1] - 1 + factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM + assert DIM == len(factor), 'Input coordinates must have the same dimension as the downsample factor.' + + coord = list(input.coords.unbind(dim=-1)) + for i, f in enumerate(factor): + coord[i+1] = coord[i+1] // f + + MAX = [coord[i+1].max().item() + 1 for i in range(DIM)] + OFFSET = torch.cumprod(torch.tensor(MAX[::-1]), 0).tolist()[::-1] + [1] + code = sum([c * o for c, o in zip(coord, OFFSET)]) + code, idx = code.unique(return_inverse=True) + + new_feats = torch.scatter_reduce( + torch.zeros(code.shape[0], input.feats.shape[1], device=input.feats.device, dtype=input.feats.dtype), + dim=0, + index=idx.unsqueeze(1).expand(-1, input.feats.shape[1]), + src=input.feats, + reduce='mean' + ) + new_coords = torch.stack( + [code // OFFSET[0]] + + [(code // OFFSET[i+1]) % MAX[i] for i in range(DIM)], + dim=-1 + ) + out = SparseTensor(new_feats, new_coords, input.shape,) + out._scale = tuple([s // f for s, f in zip(input._scale, factor)]) + out._spatial_cache = input._spatial_cache + + out.register_spatial_cache(f'upsample_{factor}_coords', input.coords) + out.register_spatial_cache(f'upsample_{factor}_layout', input.layout) + out.register_spatial_cache(f'upsample_{factor}_idx', idx) + + return out + + +class SparseUpsample(nn.Module): + """ + Upsample a sparse tensor by a factor of `factor`. + Implemented as nearest neighbor interpolation. + """ + def __init__(self, factor: Union[int, Tuple[int, int, int], List[int]]): + super(SparseUpsample, self).__init__() + self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor + + def forward(self, input: SparseTensor) -> SparseTensor: + DIM = input.coords.shape[-1] - 1 + factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM + assert DIM == len(factor), 'Input coordinates must have the same dimension as the upsample factor.' + + new_coords = input.get_spatial_cache(f'upsample_{factor}_coords') + new_layout = input.get_spatial_cache(f'upsample_{factor}_layout') + idx = input.get_spatial_cache(f'upsample_{factor}_idx') + if any([x is None for x in [new_coords, new_layout, idx]]): + raise ValueError('Upsample cache not found. SparseUpsample must be paired with SparseDownsample.') + new_feats = input.feats[idx] + out = SparseTensor(new_feats, new_coords, input.shape, new_layout) + out._scale = tuple([s * f for s, f in zip(input._scale, factor)]) + out._spatial_cache = input._spatial_cache + return out + +class SparseSubdivide(nn.Module): + """ + Upsample a sparse tensor by a factor of `factor`. + Implemented as nearest neighbor interpolation. + """ + def __init__(self): + super(SparseSubdivide, self).__init__() + + def forward(self, input: SparseTensor) -> SparseTensor: + DIM = input.coords.shape[-1] - 1 + # upsample scale=2^DIM + n_cube = torch.ones([2] * DIM, device=input.device, dtype=torch.int) + n_coords = torch.nonzero(n_cube) + n_coords = torch.cat([torch.zeros_like(n_coords[:, :1]), n_coords], dim=-1) + factor = n_coords.shape[0] + assert factor == 2 ** DIM + # print(n_coords.shape) + new_coords = input.coords.clone() + new_coords[:, 1:] *= 2 + new_coords = new_coords.unsqueeze(1) + n_coords.unsqueeze(0).to(new_coords.dtype) + + new_feats = input.feats.unsqueeze(1).expand(input.feats.shape[0], factor, *input.feats.shape[1:]) + out = SparseTensor(new_feats.flatten(0, 1), new_coords.flatten(0, 1), input.shape) + out._scale = input._scale * 2 + out._spatial_cache = input._spatial_cache + return out + diff --git a/third_party/TRELLIS/trellis/modules/sparse/transformer/__init__.py b/third_party/TRELLIS/trellis/modules/sparse/transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b08b0d4e5bc24060a2cdc8df75d06dce122972bd --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/transformer/__init__.py @@ -0,0 +1,2 @@ +from .blocks import * +from .modulated import * \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/sparse/transformer/blocks.py b/third_party/TRELLIS/trellis/modules/sparse/transformer/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..9d037a49bf83e1c2dfb2f8c4b23d2e9d6c51e9f0 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/transformer/blocks.py @@ -0,0 +1,151 @@ +from typing import * +import torch +import torch.nn as nn +from ..basic import SparseTensor +from ..linear import SparseLinear +from ..nonlinearity import SparseGELU +from ..attention import SparseMultiHeadAttention, SerializeMode +from ...norm import LayerNorm32 + + +class SparseFeedForwardNet(nn.Module): + def __init__(self, channels: int, mlp_ratio: float = 4.0): + super().__init__() + self.mlp = nn.Sequential( + SparseLinear(channels, int(channels * mlp_ratio)), + SparseGELU(approximate="tanh"), + SparseLinear(int(channels * mlp_ratio), channels), + ) + + def forward(self, x: SparseTensor) -> SparseTensor: + return self.mlp(x) + + +class SparseTransformerBlock(nn.Module): + """ + Sparse Transformer block (MSA + FFN). + """ + def __init__( + self, + channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full", + window_size: Optional[int] = None, + shift_sequence: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + serialize_mode: Optional[SerializeMode] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.attn = SparseMultiHeadAttention( + channels, + num_heads=num_heads, + attn_mode=attn_mode, + window_size=window_size, + shift_sequence=shift_sequence, + shift_window=shift_window, + serialize_mode=serialize_mode, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.mlp = SparseFeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: SparseTensor) -> SparseTensor: + h = x.replace(self.norm1(x.feats)) + h = self.attn(h) + x = x + h + h = x.replace(self.norm2(x.feats)) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: SparseTensor) -> SparseTensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False) + else: + return self._forward(x) + + +class SparseTransformerCrossBlock(nn.Module): + """ + Sparse Transformer cross-attention block (MSA + MCA + FFN). + """ + def __init__( + self, + channels: int, + ctx_channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full", + window_size: Optional[int] = None, + shift_sequence: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + serialize_mode: Optional[SerializeMode] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.self_attn = SparseMultiHeadAttention( + channels, + num_heads=num_heads, + type="self", + attn_mode=attn_mode, + window_size=window_size, + shift_sequence=shift_sequence, + shift_window=shift_window, + serialize_mode=serialize_mode, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.cross_attn = SparseMultiHeadAttention( + channels, + ctx_channels=ctx_channels, + num_heads=num_heads, + type="cross", + attn_mode="full", + qkv_bias=qkv_bias, + qk_rms_norm=qk_rms_norm_cross, + ) + self.mlp = SparseFeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor): + h = x.replace(self.norm1(x.feats)) + h = self.self_attn(h) + x = x + h + h = x.replace(self.norm2(x.feats)) + h = self.cross_attn(h, context) + x = x + h + h = x.replace(self.norm3(x.feats)) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: SparseTensor, context: torch.Tensor): + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False) + else: + return self._forward(x, context) diff --git a/third_party/TRELLIS/trellis/modules/sparse/transformer/modulated.py b/third_party/TRELLIS/trellis/modules/sparse/transformer/modulated.py new file mode 100644 index 0000000000000000000000000000000000000000..4a8416559f39acbed9e5996e9891c97f95c80c8f --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/sparse/transformer/modulated.py @@ -0,0 +1,166 @@ +from typing import * +import torch +import torch.nn as nn +from ..basic import SparseTensor +from ..attention import SparseMultiHeadAttention, SerializeMode +from ...norm import LayerNorm32 +from .blocks import SparseFeedForwardNet + + +class ModulatedSparseTransformerBlock(nn.Module): + """ + Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning. + """ + def __init__( + self, + channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full", + window_size: Optional[int] = None, + shift_sequence: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + serialize_mode: Optional[SerializeMode] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qkv_bias: bool = True, + share_mod: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.attn = SparseMultiHeadAttention( + channels, + num_heads=num_heads, + attn_mode=attn_mode, + window_size=window_size, + shift_sequence=shift_sequence, + shift_window=shift_window, + serialize_mode=serialize_mode, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.mlp = SparseFeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + if not share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(channels, 6 * channels, bias=True) + ) + + def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor: + if self.share_mod: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1) + else: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1) + h = x.replace(self.norm1(x.feats)) + h = h * (1 + scale_msa) + shift_msa + h = self.attn(h) + h = h * gate_msa + x = x + h + h = x.replace(self.norm2(x.feats)) + h = h * (1 + scale_mlp) + shift_mlp + h = self.mlp(h) + h = h * gate_mlp + x = x + h + return x + + def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False) + else: + return self._forward(x, mod) + + +class ModulatedSparseTransformerCrossBlock(nn.Module): + """ + Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning. + """ + def __init__( + self, + channels: int, + ctx_channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full", + window_size: Optional[int] = None, + shift_sequence: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + serialize_mode: Optional[SerializeMode] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + qkv_bias: bool = True, + share_mod: bool = False, + + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6) + self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.self_attn = SparseMultiHeadAttention( + channels, + num_heads=num_heads, + type="self", + attn_mode=attn_mode, + window_size=window_size, + shift_sequence=shift_sequence, + shift_window=shift_window, + serialize_mode=serialize_mode, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.cross_attn = SparseMultiHeadAttention( + channels, + ctx_channels=ctx_channels, + num_heads=num_heads, + type="cross", + attn_mode="full", + qkv_bias=qkv_bias, + qk_rms_norm=qk_rms_norm_cross, + ) + self.mlp = SparseFeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + if not share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(channels, 6 * channels, bias=True) + ) + + def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor: + if self.share_mod: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1) + else: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1) + h = x.replace(self.norm1(x.feats)) + h = h * (1 + scale_msa) + shift_msa + h = self.self_attn(h) + h = h * gate_msa + x = x + h + h = x.replace(self.norm2(x.feats)) + h = self.cross_attn(h, context) + x = x + h + h = x.replace(self.norm3(x.feats)) + h = h * (1 + scale_mlp) + shift_mlp + h = self.mlp(h) + h = h * gate_mlp + x = x + h + return x + + def forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False) + else: + return self._forward(x, mod, context) diff --git a/third_party/TRELLIS/trellis/modules/spatial.py b/third_party/TRELLIS/trellis/modules/spatial.py new file mode 100644 index 0000000000000000000000000000000000000000..79e268d36c2ba49b0275744022a1a1e19983dae3 --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/spatial.py @@ -0,0 +1,48 @@ +import torch + + +def pixel_shuffle_3d(x: torch.Tensor, scale_factor: int) -> torch.Tensor: + """ + 3D pixel shuffle. + """ + B, C, H, W, D = x.shape + C_ = C // scale_factor**3 + x = x.reshape(B, C_, scale_factor, scale_factor, scale_factor, H, W, D) + x = x.permute(0, 1, 5, 2, 6, 3, 7, 4) + x = x.reshape(B, C_, H*scale_factor, W*scale_factor, D*scale_factor) + return x + + +def patchify(x: torch.Tensor, patch_size: int): + """ + Patchify a tensor. + + Args: + x (torch.Tensor): (N, C, *spatial) tensor + patch_size (int): Patch size + """ + DIM = x.dim() - 2 + for d in range(2, DIM + 2): + assert x.shape[d] % patch_size == 0, f"Dimension {d} of input tensor must be divisible by patch size, got {x.shape[d]} and {patch_size}" + + x = x.reshape(*x.shape[:2], *sum([[x.shape[d] // patch_size, patch_size] for d in range(2, DIM + 2)], [])) + x = x.permute(0, 1, *([2 * i + 3 for i in range(DIM)] + [2 * i + 2 for i in range(DIM)])) + x = x.reshape(x.shape[0], x.shape[1] * (patch_size ** DIM), *(x.shape[-DIM:])) + return x + + +def unpatchify(x: torch.Tensor, patch_size: int): + """ + Unpatchify a tensor. + + Args: + x (torch.Tensor): (N, C, *spatial) tensor + patch_size (int): Patch size + """ + DIM = x.dim() - 2 + assert x.shape[1] % (patch_size ** DIM) == 0, f"Second dimension of input tensor must be divisible by patch size to unpatchify, got {x.shape[1]} and {patch_size ** DIM}" + + x = x.reshape(x.shape[0], x.shape[1] // (patch_size ** DIM), *([patch_size] * DIM), *(x.shape[-DIM:])) + x = x.permute(0, 1, *(sum([[2 + DIM + i, 2 + i] for i in range(DIM)], []))) + x = x.reshape(x.shape[0], x.shape[1], *[x.shape[2 + 2 * i] * patch_size for i in range(DIM)]) + return x diff --git a/third_party/TRELLIS/trellis/modules/transformer/.ipynb_checkpoints/blocks-checkpoint.py b/third_party/TRELLIS/trellis/modules/transformer/.ipynb_checkpoints/blocks-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..c37eb7ed92f4aacfc9e974a63b247589d95977da --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/transformer/.ipynb_checkpoints/blocks-checkpoint.py @@ -0,0 +1,182 @@ +from typing import * +import torch +import torch.nn as nn +from ..attention import MultiHeadAttention +from ..norm import LayerNorm32 + + +class AbsolutePositionEmbedder(nn.Module): + """ + Embeds spatial positions into vector representations. + """ + def __init__(self, channels: int, in_channels: int = 3): + super().__init__() + self.channels = channels + self.in_channels = in_channels + self.freq_dim = channels // in_channels // 2 + self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim + self.freqs = 1.0 / (10000 ** self.freqs) + + def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor: + """ + Create sinusoidal position embeddings. + + Args: + x: a 1-D Tensor of N indices + + Returns: + an (N, D) Tensor of positional embeddings. + """ + self.freqs = self.freqs.to(x.device) + out = torch.outer(x, self.freqs) + out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1) + return out + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (torch.Tensor): (N, D) tensor of spatial positions + """ + N, D = x.shape + assert D == self.in_channels, "Input dimension must match number of input channels" + embed = self._sin_cos_embedding(x.reshape(-1)) + embed = embed.reshape(N, -1) + if embed.shape[1] < self.channels: + embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1) + return embed + + +class FeedForwardNet(nn.Module): + def __init__(self, channels: int, mlp_ratio: float = 4.0): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(channels, int(channels * mlp_ratio)), + nn.GELU(approximate="tanh"), + nn.Linear(int(channels * mlp_ratio), channels), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.mlp(x) + + +class TransformerBlock(nn.Module): + """ + Transformer block (MSA + FFN). + """ + def __init__( + self, + channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[int] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.attn = MultiHeadAttention( + channels, + num_heads=num_heads, + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.norm1(x) + h = self.attn(h) + x = x + h + h = self.norm2(x) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False) + else: + return self._forward(x) + + +class TransformerCrossBlock(nn.Module): + """ + Transformer cross-attention block (MSA + MCA + FFN). + """ + def __init__( + self, + channels: int, + ctx_channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.self_attn = MultiHeadAttention( + channels, + num_heads=num_heads, + type="self", + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.cross_attn = MultiHeadAttention( + channels, + ctx_channels=ctx_channels, + num_heads=num_heads, + type="cross", + attn_mode="full", + qkv_bias=qkv_bias, + qk_rms_norm=qk_rms_norm_cross, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: torch.Tensor, context: torch.Tensor): + h = self.norm1(x) + h = self.self_attn(h) + x = x + h + h = self.norm2(x) + h = self.cross_attn(h, context) + x = x + h + h = self.norm3(x) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: torch.Tensor, context: torch.Tensor): + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False) + else: + return self._forward(x, context) + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/transformer/__init__.py b/third_party/TRELLIS/trellis/modules/transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b08b0d4e5bc24060a2cdc8df75d06dce122972bd --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/transformer/__init__.py @@ -0,0 +1,2 @@ +from .blocks import * +from .modulated import * \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/transformer/blocks.py b/third_party/TRELLIS/trellis/modules/transformer/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..c37eb7ed92f4aacfc9e974a63b247589d95977da --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/transformer/blocks.py @@ -0,0 +1,182 @@ +from typing import * +import torch +import torch.nn as nn +from ..attention import MultiHeadAttention +from ..norm import LayerNorm32 + + +class AbsolutePositionEmbedder(nn.Module): + """ + Embeds spatial positions into vector representations. + """ + def __init__(self, channels: int, in_channels: int = 3): + super().__init__() + self.channels = channels + self.in_channels = in_channels + self.freq_dim = channels // in_channels // 2 + self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim + self.freqs = 1.0 / (10000 ** self.freqs) + + def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor: + """ + Create sinusoidal position embeddings. + + Args: + x: a 1-D Tensor of N indices + + Returns: + an (N, D) Tensor of positional embeddings. + """ + self.freqs = self.freqs.to(x.device) + out = torch.outer(x, self.freqs) + out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1) + return out + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (torch.Tensor): (N, D) tensor of spatial positions + """ + N, D = x.shape + assert D == self.in_channels, "Input dimension must match number of input channels" + embed = self._sin_cos_embedding(x.reshape(-1)) + embed = embed.reshape(N, -1) + if embed.shape[1] < self.channels: + embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1) + return embed + + +class FeedForwardNet(nn.Module): + def __init__(self, channels: int, mlp_ratio: float = 4.0): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(channels, int(channels * mlp_ratio)), + nn.GELU(approximate="tanh"), + nn.Linear(int(channels * mlp_ratio), channels), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.mlp(x) + + +class TransformerBlock(nn.Module): + """ + Transformer block (MSA + FFN). + """ + def __init__( + self, + channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[int] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.attn = MultiHeadAttention( + channels, + num_heads=num_heads, + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: torch.Tensor) -> torch.Tensor: + h = self.norm1(x) + h = self.attn(h) + x = x + h + h = self.norm2(x) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False) + else: + return self._forward(x) + + +class TransformerCrossBlock(nn.Module): + """ + Transformer cross-attention block (MSA + MCA + FFN). + """ + def __init__( + self, + channels: int, + ctx_channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + qkv_bias: bool = True, + ln_affine: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6) + self.self_attn = MultiHeadAttention( + channels, + num_heads=num_heads, + type="self", + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.cross_attn = MultiHeadAttention( + channels, + ctx_channels=ctx_channels, + num_heads=num_heads, + type="cross", + attn_mode="full", + qkv_bias=qkv_bias, + qk_rms_norm=qk_rms_norm_cross, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + + def _forward(self, x: torch.Tensor, context: torch.Tensor): + h = self.norm1(x) + h = self.self_attn(h) + x = x + h + h = self.norm2(x) + h = self.cross_attn(h, context) + x = x + h + h = self.norm3(x) + h = self.mlp(h) + x = x + h + return x + + def forward(self, x: torch.Tensor, context: torch.Tensor): + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False) + else: + return self._forward(x, context) + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/transformer/modulated.py b/third_party/TRELLIS/trellis/modules/transformer/modulated.py new file mode 100644 index 0000000000000000000000000000000000000000..d4aeca0689e68f656b08f7aa822b7be839aa727d --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/transformer/modulated.py @@ -0,0 +1,157 @@ +from typing import * +import torch +import torch.nn as nn +from ..attention import MultiHeadAttention +from ..norm import LayerNorm32 +from .blocks import FeedForwardNet + + +class ModulatedTransformerBlock(nn.Module): + """ + Transformer block (MSA + FFN) with adaptive layer norm conditioning. + """ + def __init__( + self, + channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qkv_bias: bool = True, + share_mod: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.attn = MultiHeadAttention( + channels, + num_heads=num_heads, + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + if not share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(channels, 6 * channels, bias=True) + ) + + def _forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor: + if self.share_mod: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1) + else: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1) + h = self.norm1(x) + h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1) + h = self.attn(h) + h = h * gate_msa.unsqueeze(1) + x = x + h + h = self.norm2(x) + h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1) + h = self.mlp(h) + h = h * gate_mlp.unsqueeze(1) + x = x + h + return x + + def forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor: + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False) + else: + return self._forward(x, mod) + + +class ModulatedTransformerCrossBlock(nn.Module): + """ + Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning. + """ + def __init__( + self, + channels: int, + ctx_channels: int, + num_heads: int, + mlp_ratio: float = 4.0, + attn_mode: Literal["full", "windowed"] = "full", + window_size: Optional[int] = None, + shift_window: Optional[Tuple[int, int, int]] = None, + use_checkpoint: bool = False, + use_rope: bool = False, + qk_rms_norm: bool = False, + qk_rms_norm_cross: bool = False, + qkv_bias: bool = True, + share_mod: bool = False, + ): + super().__init__() + self.use_checkpoint = use_checkpoint + self.share_mod = share_mod + self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6) + self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6) + self.self_attn = MultiHeadAttention( + channels, + num_heads=num_heads, + type="self", + attn_mode=attn_mode, + window_size=window_size, + shift_window=shift_window, + qkv_bias=qkv_bias, + use_rope=use_rope, + qk_rms_norm=qk_rms_norm, + ) + self.cross_attn = MultiHeadAttention( + channels, + ctx_channels=ctx_channels, + num_heads=num_heads, + type="cross", + attn_mode="full", + qkv_bias=qkv_bias, + qk_rms_norm=qk_rms_norm_cross, + ) + self.mlp = FeedForwardNet( + channels, + mlp_ratio=mlp_ratio, + ) + if not share_mod: + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(channels, 6 * channels, bias=True) + ) + + def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor): + if self.share_mod: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1) + else: + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1) + h = self.norm1(x) + h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1) + h = self.self_attn(h) + h = h * gate_msa.unsqueeze(1) + x = x + h + h = self.norm2(x) + h = self.cross_attn(h, context) + x = x + h + h = self.norm3(x) + h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1) + h = self.mlp(h) + h = h * gate_mlp.unsqueeze(1) + x = x + h + return x + + def forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor): + if self.use_checkpoint: + return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False) + else: + return self._forward(x, mod, context) + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/modules/utils.py b/third_party/TRELLIS/trellis/modules/utils.py new file mode 100755 index 0000000000000000000000000000000000000000..f0afb1b6c767aa2ad00bad96649fb30315e696ea --- /dev/null +++ b/third_party/TRELLIS/trellis/modules/utils.py @@ -0,0 +1,54 @@ +import torch.nn as nn +from ..modules import sparse as sp + +FP16_MODULES = ( + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nn.ConvTranspose1d, + nn.ConvTranspose2d, + nn.ConvTranspose3d, + nn.Linear, + sp.SparseConv3d, + sp.SparseInverseConv3d, + sp.SparseLinear, +) + +def convert_module_to_f16(l): + """ + Convert primitive modules to float16. + """ + if isinstance(l, FP16_MODULES): + for p in l.parameters(): + p.data = p.data.half() + + +def convert_module_to_f32(l): + """ + Convert primitive modules to float32, undoing convert_module_to_f16(). + """ + if isinstance(l, FP16_MODULES): + for p in l.parameters(): + p.data = p.data.float() + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def modulate(x, shift, scale): + return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) diff --git a/third_party/TRELLIS/trellis/pipelines/__init__.py b/third_party/TRELLIS/trellis/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92b063773c5da1b1b34b68958fd650844b5ad55e --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/__init__.py @@ -0,0 +1,47 @@ +from . import samplers +from .trellis_image_to_3d import TrellisImageTo3DPipeline +from .trellis_text_to_3d import TrellisTextTo3DPipeline + + +def from_pretrained(path: str): + """ + Load a pipeline from a model folder or a Hugging Face model hub. + + Args: + path: The path to the model. Can be either local path or a Hugging Face model name. + """ + import os + import json + is_local = os.path.exists(f"{path}/pipeline.json") + + if is_local: + config_file = f"{path}/pipeline.json" + else: + from huggingface_hub import hf_hub_download + config_file = hf_hub_download(path, "pipeline.json") + + with open(config_file, 'r') as f: + config = json.load(f) + return globals()[config['name']].from_pretrained(path) + + +def from_pretrained(path: str): + """ + Load a pipeline from a model folder or a Hugging Face model hub. + + Args: + path: The path to the model. Can be either local path or a Hugging Face model name. + """ + import os + import json + is_local = os.path.exists(f"{path}/pipeline.json") + + if is_local: + config_file = f"{path}/pipeline.json" + else: + from huggingface_hub import hf_hub_download + config_file = hf_hub_download(path, "pipeline.json") + + with open(config_file, 'r') as f: + config = json.load(f) + return globals()[config['name']].from_pretrained(path) diff --git a/third_party/TRELLIS/trellis/pipelines/base.py b/third_party/TRELLIS/trellis/pipelines/base.py new file mode 100644 index 0000000000000000000000000000000000000000..fd89a585762e31a42187cadf4e73b98258ed5864 --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/base.py @@ -0,0 +1,68 @@ +from typing import * +import torch +import torch.nn as nn +from .. import models + + +class Pipeline: + """ + A base class for pipelines. + """ + def __init__( + self, + models: dict[str, nn.Module] = None, + ): + if models is None: + return + self.models = models + for model in self.models.values(): + model.eval() + + @staticmethod + def from_pretrained(path: str) -> "Pipeline": + """ + Load a pretrained model. + """ + import os + import json + is_local = os.path.exists(f"{path}/pipeline.json") + + if is_local: + config_file = f"{path}/pipeline.json" + else: + from huggingface_hub import hf_hub_download + config_file = hf_hub_download(path, "pipeline.json") + + with open(config_file, 'r') as f: + args = json.load(f)['args'] + + _models = {} + for k, v in args['models'].items(): + try: + _models[k] = models.from_pretrained(f"{path}/{v}") + except: + _models[k] = models.from_pretrained(v) + + new_pipeline = Pipeline(_models) + new_pipeline._pretrained_args = args + return new_pipeline + + @property + def device(self) -> torch.device: + for model in self.models.values(): + if hasattr(model, 'device'): + return model.device + for model in self.models.values(): + if hasattr(model, 'parameters'): + return next(model.parameters()).device + raise RuntimeError("No device found.") + + def to(self, device: torch.device) -> None: + for model in self.models.values(): + model.to(device) + + def cuda(self) -> None: + self.to(torch.device("cuda")) + + def cpu(self) -> None: + self.to(torch.device("cpu")) \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/pipelines/samplers/__init__.py b/third_party/TRELLIS/trellis/pipelines/samplers/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..54d412fc5d8eb662081a92a56ad078243988c2f9 --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/samplers/__init__.py @@ -0,0 +1,2 @@ +from .base import Sampler +from .flow_euler import FlowEulerSampler, FlowEulerCfgSampler, FlowEulerGuidanceIntervalSampler \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/pipelines/samplers/base.py b/third_party/TRELLIS/trellis/pipelines/samplers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..1966ce787009a5ee0c1ed06dce491525ff1dbcbf --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/samplers/base.py @@ -0,0 +1,20 @@ +from typing import * +from abc import ABC, abstractmethod + + +class Sampler(ABC): + """ + A base class for samplers. + """ + + @abstractmethod + def sample( + self, + model, + **kwargs + ): + """ + Sample from a model. + """ + pass + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/pipelines/samplers/classifier_free_guidance_mixin.py b/third_party/TRELLIS/trellis/pipelines/samplers/classifier_free_guidance_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..5701b25f5d7a2197612eb256f8ee13e8c489da1f --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/samplers/classifier_free_guidance_mixin.py @@ -0,0 +1,12 @@ +from typing import * + + +class ClassifierFreeGuidanceSamplerMixin: + """ + A mixin class for samplers that apply classifier-free guidance. + """ + + def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, **kwargs): + pred = super()._inference_model(model, x_t, t, cond, **kwargs) + neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs) + return (1 + cfg_strength) * pred - cfg_strength * neg_pred diff --git a/third_party/TRELLIS/trellis/pipelines/samplers/flow_euler.py b/third_party/TRELLIS/trellis/pipelines/samplers/flow_euler.py new file mode 100644 index 0000000000000000000000000000000000000000..7801279601538801e3c4731106a96861ecb5e37a --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/samplers/flow_euler.py @@ -0,0 +1,205 @@ +from typing import * +import torch +import numpy as np +from tqdm import tqdm +from easydict import EasyDict as edict +from .base import Sampler +from .classifier_free_guidance_mixin import ClassifierFreeGuidanceSamplerMixin +from .guidance_interval_mixin import GuidanceIntervalSamplerMixin + + +class FlowEulerSampler(Sampler): + """ + Generate samples from a flow-matching model using Euler sampling. + + Args: + sigma_min: The minimum scale of noise in flow. + """ + def __init__( + self, + sigma_min: float, + ): + self.sigma_min = sigma_min + + def _eps_to_xstart(self, x_t, t, eps): + assert x_t.shape == eps.shape + return (x_t - (self.sigma_min + (1 - self.sigma_min) * t) * eps) / (1 - t) + + def _xstart_to_eps(self, x_t, t, x_0): + assert x_t.shape == x_0.shape + return (x_t - (1 - t) * x_0) / (self.sigma_min + (1 - self.sigma_min) * t) + + def _v_to_xstart_eps(self, x_t, t, v): + assert x_t.shape == v.shape + eps = (1 - t) * v + x_t + x_0 = (1 - self.sigma_min) * x_t - (self.sigma_min + (1 - self.sigma_min) * t) * v + return x_0, eps + + def _inference_model(self, model, x_t, t, cond=None, **kwargs): + t = torch.tensor([1000 * t] * x_t.shape[0], device=x_t.device, dtype=torch.float32) + + if cond is not None and cond.shape[0] == 1 and x_t.shape[0] > 1: + cond = cond.repeat(x_t.shape[0], *([1] * (len(cond.shape) - 1))) + + return model(x_t, t, cond, **kwargs) + + def _get_model_prediction(self, model, x_t, t, cond=None, **kwargs): + pred_v = self._inference_model(model, x_t, t, cond, **kwargs) + pred_x_0, pred_eps = self._v_to_xstart_eps(x_t=x_t, t=t, v=pred_v) + return pred_x_0, pred_eps, pred_v + + @torch.no_grad() + def sample_once( + self, + model, + x_t, + t: float, + t_prev: float, + cond: Optional[Any] = None, + **kwargs + ): + """ + Sample x_{t-1} from the model using Euler method. + + Args: + model: The model to sample from. + x_t: The [N x C x ...] tensor of noisy inputs at time t. + t: The current timestep. + t_prev: The previous timestep. + cond: conditional information. + **kwargs: Additional arguments for model inference. + + Returns: + a dict containing the following + - 'pred_x_prev': x_{t-1}. + - 'pred_x_0': a prediction of x_0. + """ + pred_x_0, pred_eps, pred_v = self._get_model_prediction(model, x_t, t, cond, **kwargs) + pred_x_prev = x_t - (t - t_prev) * pred_v + return edict({"pred_x_prev": pred_x_prev, "pred_x_0": pred_x_0}) + + @torch.no_grad() + def sample( + self, + model, + noise, + cond: Optional[Any] = None, + steps: int = 50, + rescale_t: float = 1.0, + verbose: bool = True, + **kwargs + ): + """ + Generate samples from the model using Euler method. + + Args: + model: The model to sample from. + noise: The initial noise tensor. + cond: conditional information. + steps: The number of steps to sample. + rescale_t: The rescale factor for t. + verbose: If True, show a progress bar. + **kwargs: Additional arguments for model_inference. + + Returns: + a dict containing the following + - 'samples': the model samples. + - 'pred_x_t': a list of prediction of x_t. + - 'pred_x_0': a list of prediction of x_0. + """ + + sample = noise + t_seq = np.linspace(1, 0, steps + 1) + t_seq = rescale_t * t_seq / (1 + (rescale_t - 1) * t_seq) + t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(steps)) + + ret = edict({"samples": None, "pred_x_t": [], "pred_x_0": []}) + for t, t_prev in t_pairs: #tqdm(t_pairs, desc="Sampling", disable=not verbose): + out = self.sample_once(model, sample, t, t_prev, cond, **kwargs) + sample = out.pred_x_prev + ret.pred_x_t.append(out.pred_x_prev) + ret.pred_x_0.append(out.pred_x_0) + ret.samples = sample + return ret + + +class FlowEulerCfgSampler(ClassifierFreeGuidanceSamplerMixin, FlowEulerSampler): + """ + Generate samples from a flow-matching model using Euler sampling with classifier-free guidance. + """ + @torch.no_grad() + def sample( + self, + model, + noise, + cond, + neg_cond, + steps: int = 50, + rescale_t: float = 1.0, + cfg_strength: float = 3.0, + verbose: bool = True, + **kwargs + ): + """ + Generate samples from the model using Euler method. + + Args: + model: The model to sample from. + noise: The initial noise tensor. + cond: conditional information. + neg_cond: negative conditional information. + steps: The number of steps to sample. + rescale_t: The rescale factor for t. + cfg_strength: The strength of classifier-free guidance. + verbose: If True, show a progress bar. + **kwargs: Additional arguments for model_inference. + + Returns: + a dict containing the following + - 'samples': the model samples. + - 'pred_x_t': a list of prediction of x_t. + - 'pred_x_0': a list of prediction of x_0. + """ + return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, **kwargs) + + +class FlowEulerGuidanceIntervalSampler(GuidanceIntervalSamplerMixin, FlowEulerSampler): + """ + Generate samples from a flow-matching model using Euler sampling with classifier-free guidance and interval. + """ + @torch.no_grad() + def sample( + self, + model, + noise, + cond, + neg_cond, + steps: int = 50, + rescale_t: float = 1.0, + cfg_strength: float = 3.0, + cfg_interval: Tuple[float, float] = (0.0, 1.0), + verbose: bool = True, + **kwargs + ): + """ + Generate samples from the model using Euler method. + + Args: + model: The model to sample from. + noise: The initial noise tensor. + cond: conditional information. + neg_cond: negative conditional information. + steps: The number of steps to sample. + rescale_t: The rescale factor for t. + cfg_strength: The strength of classifier-free guidance. + cfg_interval: The interval for classifier-free guidance. + verbose: If True, show a progress bar. + **kwargs: Additional arguments for model_inference. + + Returns: + a dict containing the following + - 'samples': the model samples. + - 'pred_x_t': a list of prediction of x_t. + - 'pred_x_0': a list of prediction of x_0. + """ + return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, cfg_interval=cfg_interval, **kwargs) diff --git a/third_party/TRELLIS/trellis/pipelines/samplers/guidance_interval_mixin.py b/third_party/TRELLIS/trellis/pipelines/samplers/guidance_interval_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..7074a4d5fea20a8f799416aa6571faca4f9eea06 --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/samplers/guidance_interval_mixin.py @@ -0,0 +1,15 @@ +from typing import * + + +class GuidanceIntervalSamplerMixin: + """ + A mixin class for samplers that apply classifier-free guidance with interval. + """ + + def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs): + if cfg_interval[0] <= t <= cfg_interval[1]: + pred = super()._inference_model(model, x_t, t, cond, **kwargs) + neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs) + return (1 + cfg_strength) * pred - cfg_strength * neg_pred + else: + return super()._inference_model(model, x_t, t, cond, **kwargs) diff --git a/third_party/TRELLIS/trellis/pipelines/trellis_image_to_3d.py b/third_party/TRELLIS/trellis/pipelines/trellis_image_to_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..e40e75bc4d6e0637a735d6e1367116e56cfd730b --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/trellis_image_to_3d.py @@ -0,0 +1,525 @@ +from typing import * +import open3d_pycg as o3d +from contextlib import contextmanager +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from torchvision import transforms +from PIL import Image +import rembg +from .base import Pipeline +from . import samplers +from ..modules import sparse as sp + + +class TrellisImageTo3DPipeline(Pipeline): + """ + Pipeline for inferring Trellis image-to-3D models. + + Args: + models (dict[str, nn.Module]): The models to use in the pipeline. + sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure. + slat_sampler (samplers.Sampler): The sampler for the structured latent. + slat_normalization (dict): The normalization parameters for the structured latent. + image_cond_model (str): The name of the image conditioning model. + """ + def __init__( + self, + models: dict[str, nn.Module] = None, + sparse_structure_sampler: samplers.Sampler = None, + slat_sampler: samplers.Sampler = None, + slat_normalization: dict = None, + image_cond_model: str = None, + ): + if models is None: + return + super().__init__(models) + self.sparse_structure_sampler = sparse_structure_sampler + self.slat_sampler = slat_sampler + self.sparse_structure_sampler_params = {} + self.slat_sampler_params = {} + self.slat_normalization = slat_normalization + self.rembg_session = None + self._init_image_cond_model(image_cond_model) + + @staticmethod + def from_pretrained(path: str) -> "TrellisImageTo3DPipeline": + """ + Load a pretrained model. + + Args: + path (str): The path to the model. Can be either local path or a Hugging Face repository. + """ + pipeline = super(TrellisImageTo3DPipeline, TrellisImageTo3DPipeline).from_pretrained(path) + new_pipeline = TrellisImageTo3DPipeline() + new_pipeline.__dict__ = pipeline.__dict__ + args = pipeline._pretrained_args + + + new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler']['args']) + new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler']['params'] + + new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler']['args']) + new_pipeline.slat_sampler_params = args['slat_sampler']['params'] + + new_pipeline.slat_normalization = args['slat_normalization'] + + new_pipeline._init_image_cond_model(args['image_cond_model']) + + return new_pipeline + + def _init_image_cond_model(self, name: str): + """ + Initialize the image conditioning model. + """ + dinov2_model = torch.hub.load('facebookresearch/dinov2', name, pretrained=True) + dinov2_model.eval() + self.models['image_cond_model'] = dinov2_model + transform = transforms.Compose([ + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + self.image_cond_model_transform = transform + + def voxelize(self, mesh: o3d.geometry.TriangleMesh) -> torch.Tensor: + """ + Voxelize a mesh. + + Args: + mesh (o3d.geometry.TriangleMesh): The mesh to voxelize. + sha256 (str): The SHA256 hash of the mesh. + output_dir (str): The output directory. + """ + vertices = np.asarray(mesh.vertices) + aabb = np.stack([vertices.min(0), vertices.max(0)]) + center = (aabb[0] + aabb[1]) / 2 + scale = (aabb[1] - aabb[0]).max() + vertices = (vertices - center) / scale + vertices = np.clip(vertices, -0.5 + 1e-6, 0.5 - 1e-6) + mesh.vertices = o3d.utility.Vector3dVector(vertices) + voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(mesh, voxel_size=1/64, min_bound=(-0.5, -0.5, -0.5), max_bound=(0.5, 0.5, 0.5)) + vertices = np.array([voxel.grid_index for voxel in voxel_grid.get_voxels()]) + return torch.tensor(vertices).int().cuda() + + def preprocess_image(self, input: Image.Image) -> Image.Image: + """ + Preprocess the input image. + """ + # if has alpha channel, use it directly; otherwise, remove background + has_alpha = False + if input.mode == 'RGBA': + alpha = np.array(input)[:, :, 3] + if not np.all(alpha == 255): + has_alpha = True + if has_alpha: + output = input + else: + input = input.convert('RGB') + max_size = max(input.size) + scale = min(1, 1024 / max_size) + if scale < 1: + input = input.resize((int(input.width * scale), int(input.height * scale)), Image.Resampling.LANCZOS) + if getattr(self, 'rembg_session', None) is None: + self.rembg_session = rembg.new_session('u2net') + output = rembg.remove(input, session=self.rembg_session) + output_np = np.array(output) + alpha = output_np[:, :, 3] + bbox = np.argwhere(alpha > 0.8 * 255) + bbox = np.min(bbox[:, 1]), np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.max(bbox[:, 0]) + center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2 + size = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) + size = int(size * 1.2) + bbox = center[0] - size // 2, center[1] - size // 2, center[0] + size // 2, center[1] + size // 2 + output = output.crop(bbox) # type: ignore + output = output.resize((518, 518), Image.Resampling.LANCZOS) + output = np.array(output).astype(np.float32) / 255 + output = output[:, :, :3] * output[:, :, 3:4] + output = Image.fromarray((output * 255).astype(np.uint8)) + return output + + @torch.no_grad() + def encode_image(self, image: Union[torch.Tensor, list[Image.Image]]) -> torch.Tensor: + """ + Encode the image. + + Args: + image (Union[torch.Tensor, list[Image.Image]]): The image to encode + + Returns: + torch.Tensor: The encoded features. + """ + if isinstance(image, torch.Tensor): + assert image.ndim == 4, "Image tensor should be batched (B, C, H, W)" + elif isinstance(image, list): + assert all(isinstance(i, Image.Image) for i in image), "Image list should be list of PIL images" + image = [i.resize((518, 518), Image.LANCZOS) for i in image] + image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image] + image = [torch.from_numpy(i).permute(2, 0, 1).float() for i in image] + image = torch.stack(image).to(self.device) + else: + raise ValueError(f"Unsupported type of image: {type(image)}") + + image = self.image_cond_model_transform(image).to(self.device) + features = self.models['image_cond_model'](image, is_training=True)['x_prenorm'] + patchtokens = F.layer_norm(features, features.shape[-1:]) + return patchtokens + + def get_cond(self, image: Union[torch.Tensor, list[Image.Image]]) -> dict: + """ + Get the conditioning information for the model. + + Args: + image (Union[torch.Tensor, list[Image.Image]]): The image prompts. + + Returns: + dict: The conditioning information + """ + cond = self.encode_image(image) + neg_cond = torch.zeros_like(cond) + return { + 'cond': cond, + 'neg_cond': neg_cond, + } + # torch.Size([1, 1374, 1024]) + + + def sample_sparse_structure( + self, + cond: dict, + num_samples: int = 1, + sampler_params: dict = {}, + ) -> torch.Tensor: + """ + Sample sparse structures with the given conditioning. + + Args: + cond (dict): The conditioning information. + num_samples (int): The number of samples to generate. + sampler_params (dict): Additional parameters for the sampler. + """ + # Sample occupancy latent + flow_model = self.models['sparse_structure_flow_model'] + reso = flow_model.resolution + noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso).to(self.device) + sampler_params = {**self.sparse_structure_sampler_params, **sampler_params} + z_s = self.sparse_structure_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + # Decode occupancy latent + decoder = self.models['sparse_structure_decoder'] + coords = torch.argwhere(decoder(z_s)>0)[:, [0, 2, 3, 4]].int() + + return coords + + def decode_slat( + self, + slat: sp.SparseTensor, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + ) -> dict: + """ + Decode the structured latent. + + Args: + slat (sp.SparseTensor): The structured latent. + formats (List[str]): The formats to decode the structured latent to. + + Returns: + dict: The decoded structured latent. + """ + ret = {} + if 'mesh' in formats: + ret['mesh'] = self.models['slat_decoder_mesh'](slat) + if 'gaussian' in formats: + ret['gaussian'] = self.models['slat_decoder_gs'](slat) + if 'radiance_field' in formats: + ret['radiance_field'] = self.models['slat_decoder_rf'](slat) + return ret + + def sample_slat( + self, + cond: dict, + coords: torch.Tensor, + sampler_params: dict = {}, + ) -> sp.SparseTensor: + """ + Sample structured latent with the given conditioning. + + Args: + cond (dict): The conditioning information. + coords (torch.Tensor): The coordinates of the sparse structure. + sampler_params (dict): Additional parameters for the sampler. + """ + # Sample structured latent + flow_model = self.models['slat_flow_model'] + noise = sp.SparseTensor( + feats=torch.randn(coords.shape[0], flow_model.in_channels).to(self.device), + coords=coords, + ) + sampler_params = {**self.slat_sampler_params, **sampler_params} + slat = self.slat_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device) + mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device) + slat = slat * std + mean + + return slat + + @torch.no_grad() + def run_diffusion_step( + self, + seed: int = 42, + noise: sp.SparseTensor = None, + cond: dict = None, + sampler_params: dict = {}, + **kwargs, + ) -> dict: + """ + Run the texture generation(2nd stage) pipeline. + + Args: + binary_voxel (np.ndarray): The input binary voxel. + image (Image.Image or a list of Image.Image): The image prompt(s). + num_samples (int): The number of samples to generate. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + preprocess_image (bool): Whether to preprocess the image. + """ + + torch.manual_seed(seed) + + # Sample structured latent + flow_model = self.models['slat_flow_model'] + sampler_params = {**self.slat_sampler_params, **sampler_params} + slat = self.slat_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device) + mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device) + slat = slat * std + mean + return slat + + @torch.no_grad() + def run_partial_denoising( + self, + seed: int = 42, + noise: sp.SparseTensor = None, + image: Image.Image = None, + sampler_params: dict = {}, + formats: List[str] = ["mesh", "gaussian", "radiance_field"], + **kwargs, + ) -> dict: + """ + Run the texture generation(2nd stage) pipeline. + + Args: + binary_voxel (np.ndarray): The input binary voxel. + image (Image.Image or a list of Image.Image): The image prompt(s). + num_samples (int): The number of samples to generate. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + preprocess_image (bool): Whether to preprocess the image. + """ + + torch.manual_seed(seed) + + cond = torch.zeros((1, 1374, 1024)).cuda() + neg_cond = torch.zeros_like(cond) + cond = { + 'cond': cond, + 'neg_cond': neg_cond, + } + + image = self.preprocess_image(image) + cond = self.get_cond([image]) + + # Sample structured latent + flow_model = self.models['slat_flow_model'] + sampler_params = {**self.slat_sampler_params, **sampler_params} + slat = self.slat_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device) + mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device) + slat = slat * std + mean + return self.decode_slat(slat, formats) + + @torch.no_grad() + def run_detail_variation( + self, + mesh: o3d.geometry.TriangleMesh, + image: Image.Image, + num_samples: int = 1, + seed: int = 42, + slat_sampler_params: dict = {}, + formats: List[str] = ["mesh", "gaussian", "radiance_field"], + preprocess_image: bool = True, + **kwargs, + ) -> dict: + """ + Run the texture generation(2nd stage) pipeline. + + Args: + binary_voxel (np.ndarray): The input binary voxel. + image (Image.Image or a list of Image.Image): The image prompt(s). + num_samples (int): The number of samples to generate. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + preprocess_image (bool): Whether to preprocess the image. + """ + + if preprocess_image: + image = self.preprocess_image(image) + cond = self.get_cond([image]) + + coords = self.voxelize(mesh) + coords = torch.cat([ + torch.arange(num_samples).repeat_interleave(coords.shape[0], 0)[:, None].int().cuda(), + coords.repeat(num_samples, 1) + ], 1) + torch.manual_seed(seed) + slat = self.sample_slat(cond, coords, slat_sampler_params) + return self.decode_slat(slat, formats) + + @torch.no_grad() + def run( + self, + image: Image.Image, + num_samples: int = 1, + seed: int = 42, + sparse_structure_sampler_params: dict = {}, + slat_sampler_params: dict = {}, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + preprocess_image: bool = True, + ) -> dict: + """ + Run the pipeline. + + Args: + image (Image.Image): The image prompt. + num_samples (int): The number of samples to generate. + seed (int): The random seed. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + formats (List[str]): The formats to decode the structured latent to. + preprocess_image (bool): Whether to preprocess the image. + """ + if preprocess_image: + image = self.preprocess_image(image) + cond = self.get_cond([image]) + torch.manual_seed(seed) + coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params) + slat = self.sample_slat(cond, coords, slat_sampler_params) + return self.decode_slat(slat, formats) + + @contextmanager + def inject_sampler_multi_image( + self, + sampler_name: str, + num_images: int, + num_steps: int, + mode: Literal['stochastic', 'multidiffusion'] = 'stochastic', + ): + """ + Inject a sampler with multiple images as condition. + + Args: + sampler_name (str): The name of the sampler to inject. + num_images (int): The number of images to condition on. + num_steps (int): The number of steps to run the sampler for. + """ + sampler = getattr(self, sampler_name) + setattr(sampler, f'_old_inference_model', sampler._inference_model) + + if mode == 'stochastic': + if num_images > num_steps: + print(f"\033[93mWarning: number of conditioning images is greater than number of steps for {sampler_name}. " + "This may lead to performance degradation.\033[0m") + + cond_indices = (np.arange(num_steps) % num_images).tolist() + def _new_inference_model(self, model, x_t, t, cond, **kwargs): + cond_idx = cond_indices.pop(0) + cond_i = cond[cond_idx:cond_idx+1] + return self._old_inference_model(model, x_t, t, cond=cond_i, **kwargs) + + elif mode =='multidiffusion': + from .samplers import FlowEulerSampler + def _new_inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs): + if cfg_interval[0] <= t <= cfg_interval[1]: + preds = [] + for i in range(len(cond)): + preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs)) + pred = sum(preds) / len(preds) + neg_pred = FlowEulerSampler._inference_model(self, model, x_t, t, neg_cond, **kwargs) + return (1 + cfg_strength) * pred - cfg_strength * neg_pred + else: + preds = [] + for i in range(len(cond)): + preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs)) + pred = sum(preds) / len(preds) + return pred + + else: + raise ValueError(f"Unsupported mode: {mode}") + + sampler._inference_model = _new_inference_model.__get__(sampler, type(sampler)) + + yield + + sampler._inference_model = sampler._old_inference_model + delattr(sampler, f'_old_inference_model') + + @torch.no_grad() + def run_multi_image( + self, + images: List[Image.Image], + num_samples: int = 1, + seed: int = 42, + sparse_structure_sampler_params: dict = {}, + slat_sampler_params: dict = {}, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + preprocess_image: bool = True, + mode: Literal['stochastic', 'multidiffusion'] = 'stochastic', + ) -> dict: + """ + Run the pipeline with multiple images as condition + + Args: + images (List[Image.Image]): The multi-view images of the assets + num_samples (int): The number of samples to generate. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + preprocess_image (bool): Whether to preprocess the image. + """ + if preprocess_image: + images = [self.preprocess_image(image) for image in images] + cond = self.get_cond(images) + cond['neg_cond'] = cond['neg_cond'][:1] + torch.manual_seed(seed) + ss_steps = {**self.sparse_structure_sampler_params, **sparse_structure_sampler_params}.get('steps') + with self.inject_sampler_multi_image('sparse_structure_sampler', len(images), ss_steps, mode=mode): + coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params) + slat_steps = {**self.slat_sampler_params, **slat_sampler_params}.get('steps') + with self.inject_sampler_multi_image('slat_sampler', len(images), slat_steps, mode=mode): + slat = self.sample_slat(cond, coords, slat_sampler_params) + return self.decode_slat(slat, formats) \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/pipelines/trellis_text_to_3d.py b/third_party/TRELLIS/trellis/pipelines/trellis_text_to_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..19941267c7ead83a89fb477ba1225a341a6aaa59 --- /dev/null +++ b/third_party/TRELLIS/trellis/pipelines/trellis_text_to_3d.py @@ -0,0 +1,278 @@ +from typing import * +import torch +import torch.nn as nn +import numpy as np +from transformers import CLIPTextModel, AutoTokenizer +import open3d_pycg as o3d +from .base import Pipeline +from . import samplers +from ..modules import sparse as sp + + +class TrellisTextTo3DPipeline(Pipeline): + """ + Pipeline for inferring Trellis text-to-3D models. + + Args: + models (dict[str, nn.Module]): The models to use in the pipeline. + sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure. + slat_sampler (samplers.Sampler): The sampler for the structured latent. + slat_normalization (dict): The normalization parameters for the structured latent. + text_cond_model (str): The name of the text conditioning model. + """ + def __init__( + self, + models: dict[str, nn.Module] = None, + sparse_structure_sampler: samplers.Sampler = None, + slat_sampler: samplers.Sampler = None, + slat_normalization: dict = None, + text_cond_model: str = None, + ): + if models is None: + return + super().__init__(models) + self.sparse_structure_sampler = sparse_structure_sampler + self.slat_sampler = slat_sampler + self.sparse_structure_sampler_params = {} + self.slat_sampler_params = {} + self.slat_normalization = slat_normalization + self._init_text_cond_model(text_cond_model) + + @staticmethod + def from_pretrained(path: str) -> "TrellisTextTo3DPipeline": + """ + Load a pretrained model. + + Args: + path (str): The path to the model. Can be either local path or a Hugging Face repository. + """ + pipeline = super(TrellisTextTo3DPipeline, TrellisTextTo3DPipeline).from_pretrained(path) + new_pipeline = TrellisTextTo3DPipeline() + new_pipeline.__dict__ = pipeline.__dict__ + args = pipeline._pretrained_args + + new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler']['args']) + new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler']['params'] + + new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler']['args']) + new_pipeline.slat_sampler_params = args['slat_sampler']['params'] + + new_pipeline.slat_normalization = args['slat_normalization'] + + new_pipeline._init_text_cond_model(args['text_cond_model']) + + return new_pipeline + + def _init_text_cond_model(self, name: str): + """ + Initialize the text conditioning model. + """ + # load model + model = CLIPTextModel.from_pretrained(name) + tokenizer = AutoTokenizer.from_pretrained(name) + model.eval() + model = model.cuda() + self.text_cond_model = { + 'model': model, + 'tokenizer': tokenizer, + } + self.text_cond_model['null_cond'] = self.encode_text(['']) + + @torch.no_grad() + def encode_text(self, text: List[str]) -> torch.Tensor: + """ + Encode the text. + """ + assert isinstance(text, list) and all(isinstance(t, str) for t in text), "text must be a list of strings" + encoding = self.text_cond_model['tokenizer'](text, max_length=77, padding='max_length', truncation=True, return_tensors='pt') + tokens = encoding['input_ids'].cuda() + embeddings = self.text_cond_model['model'](input_ids=tokens).last_hidden_state + + return embeddings + + def get_cond(self, prompt: List[str]) -> dict: + """ + Get the conditioning information for the model. + + Args: + prompt (List[str]): The text prompt. + + Returns: + dict: The conditioning information + """ + cond = self.encode_text(prompt) + neg_cond = self.text_cond_model['null_cond'] + return { + 'cond': cond, + 'neg_cond': neg_cond, + } + + def sample_sparse_structure( + self, + cond: dict, + num_samples: int = 1, + sampler_params: dict = {}, + ) -> torch.Tensor: + """ + Sample sparse structures with the given conditioning. + + Args: + cond (dict): The conditioning information. + num_samples (int): The number of samples to generate. + sampler_params (dict): Additional parameters for the sampler. + """ + # Sample occupancy latent + flow_model = self.models['sparse_structure_flow_model'] + reso = flow_model.resolution + noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso).to(self.device) + sampler_params = {**self.sparse_structure_sampler_params, **sampler_params} + z_s = self.sparse_structure_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + # Decode occupancy latent + decoder = self.models['sparse_structure_decoder'] + coords = torch.argwhere(decoder(z_s)>0)[:, [0, 2, 3, 4]].int() + + return coords + + def decode_slat( + self, + slat: sp.SparseTensor, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + ) -> dict: + """ + Decode the structured latent. + + Args: + slat (sp.SparseTensor): The structured latent. + formats (List[str]): The formats to decode the structured latent to. + + Returns: + dict: The decoded structured latent. + """ + ret = {} + if 'mesh' in formats: + ret['mesh'] = self.models['slat_decoder_mesh'](slat) + if 'gaussian' in formats: + ret['gaussian'] = self.models['slat_decoder_gs'](slat) + if 'radiance_field' in formats: + ret['radiance_field'] = self.models['slat_decoder_rf'](slat) + return ret + + def sample_slat( + self, + cond: dict, + coords: torch.Tensor, + sampler_params: dict = {}, + ) -> sp.SparseTensor: + """ + Sample structured latent with the given conditioning. + + Args: + cond (dict): The conditioning information. + coords (torch.Tensor): The coordinates of the sparse structure. + sampler_params (dict): Additional parameters for the sampler. + """ + # Sample structured latent + flow_model = self.models['slat_flow_model'] + noise = sp.SparseTensor( + feats=torch.randn(coords.shape[0], flow_model.in_channels).to(self.device), + coords=coords, + ) + sampler_params = {**self.slat_sampler_params, **sampler_params} + slat = self.slat_sampler.sample( + flow_model, + noise, + **cond, + **sampler_params, + verbose=True + ).samples + + std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device) + mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device) + slat = slat * std + mean + + return slat + + @torch.no_grad() + def run( + self, + prompt: str, + num_samples: int = 1, + seed: int = 42, + sparse_structure_sampler_params: dict = {}, + slat_sampler_params: dict = {}, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + ) -> dict: + """ + Run the pipeline. + + Args: + prompt (str): The text prompt. + num_samples (int): The number of samples to generate. + seed (int): The random seed. + sparse_structure_sampler_params (dict): Additional parameters for the sparse structure sampler. + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + formats (List[str]): The formats to decode the structured latent to. + """ + cond = self.get_cond([prompt]) + torch.manual_seed(seed) + coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params) + slat = self.sample_slat(cond, coords, slat_sampler_params) + return self.decode_slat(slat, formats) + + def voxelize(self, mesh: o3d.geometry.TriangleMesh) -> torch.Tensor: + """ + Voxelize a mesh. + + Args: + mesh (o3d.geometry.TriangleMesh): The mesh to voxelize. + sha256 (str): The SHA256 hash of the mesh. + output_dir (str): The output directory. + """ + vertices = np.asarray(mesh.vertices) + aabb = np.stack([vertices.min(0), vertices.max(0)]) + center = (aabb[0] + aabb[1]) / 2 + scale = (aabb[1] - aabb[0]).max() + vertices = (vertices - center) / scale + vertices = np.clip(vertices, -0.5 + 1e-6, 0.5 - 1e-6) + mesh.vertices = o3d.utility.Vector3dVector(vertices) + voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds(mesh, voxel_size=1/64, min_bound=(-0.5, -0.5, -0.5), max_bound=(0.5, 0.5, 0.5)) + vertices = np.array([voxel.grid_index for voxel in voxel_grid.get_voxels()]) + return torch.tensor(vertices).int().cuda() + + @torch.no_grad() + def run_variant( + self, + mesh: o3d.geometry.TriangleMesh, + prompt: str, + num_samples: int = 1, + seed: int = 42, + slat_sampler_params: dict = {}, + formats: List[str] = ['mesh', 'gaussian', 'radiance_field'], + ) -> dict: + """ + Run the pipeline for making variants of an asset. + + Args: + mesh (o3d.geometry.TriangleMesh): The base mesh. + prompt (str): The text prompt. + num_samples (int): The number of samples to generate. + seed (int): The random seed + slat_sampler_params (dict): Additional parameters for the structured latent sampler. + formats (List[str]): The formats to decode the structured latent to. + """ + cond = self.get_cond([prompt]) + coords = self.voxelize(mesh) + coords = torch.cat([ + torch.arange(num_samples).repeat_interleave(coords.shape[0], 0)[:, None].int().cuda(), + coords.repeat(num_samples, 1) + ], 1) + torch.manual_seed(seed) + slat = self.sample_slat(cond, coords, slat_sampler_params) + return self.decode_slat(slat, formats) \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/renderers/__init__.py b/third_party/TRELLIS/trellis/renderers/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..0339355c56b8d17f72e926650d140a658452fbe9 --- /dev/null +++ b/third_party/TRELLIS/trellis/renderers/__init__.py @@ -0,0 +1,31 @@ +import importlib + +__attributes = { + 'OctreeRenderer': 'octree_renderer', + 'GaussianRenderer': 'gaussian_render', + 'MeshRenderer': 'mesh_renderer', +} + +__submodules = [] + +__all__ = list(__attributes.keys()) + __submodules + +def __getattr__(name): + if name not in globals(): + if name in __attributes: + module_name = __attributes[name] + module = importlib.import_module(f".{module_name}", __name__) + globals()[name] = getattr(module, name) + elif name in __submodules: + module = importlib.import_module(f".{name}", __name__) + globals()[name] = module + else: + raise AttributeError(f"module {__name__} has no attribute {name}") + return globals()[name] + + +# For Pylance +if __name__ == '__main__': + from .octree_renderer import OctreeRenderer + from .gaussian_render import GaussianRenderer + from .mesh_renderer import MeshRenderer \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/renderers/gaussian_render.py b/third_party/TRELLIS/trellis/renderers/gaussian_render.py new file mode 100755 index 0000000000000000000000000000000000000000..57108e3cccf6aab8e3059431557c461de46aff1a --- /dev/null +++ b/third_party/TRELLIS/trellis/renderers/gaussian_render.py @@ -0,0 +1,231 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +import math +from easydict import EasyDict as edict +import numpy as np +from ..representations.gaussian import Gaussian +from .sh_utils import eval_sh +import torch.nn.functional as F +from easydict import EasyDict as edict + + +def intrinsics_to_projection( + intrinsics: torch.Tensor, + near: float, + far: float, + ) -> torch.Tensor: + """ + OpenCV intrinsics to OpenGL perspective matrix + + Args: + intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix + near (float): near plane to clip + far (float): far plane to clip + Returns: + (torch.Tensor): [4, 4] OpenGL perspective matrix + """ + fx, fy = intrinsics[0, 0], intrinsics[1, 1] + cx, cy = intrinsics[0, 2], intrinsics[1, 2] + ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device) + ret[0, 0] = 2 * fx + ret[1, 1] = 2 * fy + ret[0, 2] = 2 * cx - 1 + ret[1, 2] = - 2 * cy + 1 + ret[2, 2] = far / (far - near) + ret[2, 3] = near * far / (near - far) + ret[3, 2] = 1. + return ret + + +def render(viewpoint_camera, pc : Gaussian, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + # lazy import + if 'GaussianRasterizer' not in globals(): + from diff_gaussian_rasterization import GaussianRasterizer, GaussianRasterizationSettings + + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0 + try: + screenspace_points.retain_grad() + except: + pass + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + kernel_size = pipe.kernel_size + subpixel_offset = torch.zeros((int(viewpoint_camera.image_height), int(viewpoint_camera.image_width), 2), dtype=torch.float32, device="cuda") + + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + kernel_size=kernel_size, + subpixel_offset=subpixel_offset, + bg=bg_color, + scale_modifier=scaling_modifier, + viewmatrix=viewpoint_camera.world_view_transform, + projmatrix=viewpoint_camera.full_proj_transform, + sh_degree=pc.active_sh_degree, + campos=viewpoint_camera.camera_center, + prefiltered=False, + debug=pipe.debug + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + means3D = pc.get_xyz + means2D = screenspace_points + opacity = pc.get_opacity + + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling + rotations = pc.get_rotation + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + shs = None + colors_precomp = None + if override_color is None: + if pipe.convert_SHs_python: + shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2) + dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1)) + dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True) + sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) + colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) + else: + shs = pc.get_features + else: + colors_precomp = override_color + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D = means3D, + means2D = means2D, + shs = shs, + colors_precomp = colors_precomp, + opacities = opacity, + scales = scales, + rotations = rotations, + cov3D_precomp = cov3D_precomp + ) + + # Those Gaussians that were frustum culled or had a radius of 0 were not visible. + # They will be excluded from value updates used in the splitting criteria. + return edict({"render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter" : radii > 0, + "radii": radii}) + + +class GaussianRenderer: + """ + Renderer for the Voxel representation. + + Args: + rendering_options (dict): Rendering options. + """ + + def __init__(self, rendering_options={}) -> None: + self.pipe = edict({ + "kernel_size": 0.1, + "convert_SHs_python": False, + "compute_cov3D_python": False, + "scale_modifier": 1.0, + "debug": False + }) + self.rendering_options = edict({ + "resolution": None, + "near": None, + "far": None, + "ssaa": 1, + "bg_color": 'random', + }) + self.rendering_options.update(rendering_options) + self.bg_color = None + + def render( + self, + gausssian: Gaussian, + extrinsics: torch.Tensor, + intrinsics: torch.Tensor, + colors_overwrite: torch.Tensor = None + ) -> edict: + """ + Render the gausssian. + + Args: + gaussian : gaussianmodule + extrinsics (torch.Tensor): (4, 4) camera extrinsics + intrinsics (torch.Tensor): (3, 3) camera intrinsics + colors_overwrite (torch.Tensor): (N, 3) override color + + Returns: + edict containing: + color (torch.Tensor): (3, H, W) rendered color image + """ + resolution = self.rendering_options["resolution"] + near = self.rendering_options["near"] + far = self.rendering_options["far"] + ssaa = self.rendering_options["ssaa"] + + if self.rendering_options["bg_color"] == 'random': + self.bg_color = torch.zeros(3, dtype=torch.float32, device="cuda") + if np.random.rand() < 0.5: + self.bg_color += 1 + else: + self.bg_color = torch.tensor(self.rendering_options["bg_color"], dtype=torch.float32, device="cuda") + + view = extrinsics + perspective = intrinsics_to_projection(intrinsics, near, far) + camera = torch.inverse(view)[:3, 3] + focalx = intrinsics[0, 0] + focaly = intrinsics[1, 1] + fovx = 2 * torch.atan(0.5 / focalx) + fovy = 2 * torch.atan(0.5 / focaly) + + camera_dict = edict({ + "image_height": resolution * ssaa, + "image_width": resolution * ssaa, + "FoVx": fovx, + "FoVy": fovy, + "znear": near, + "zfar": far, + "world_view_transform": view.T.contiguous(), + "projection_matrix": perspective.T.contiguous(), + "full_proj_transform": (perspective @ view).T.contiguous(), + "camera_center": camera + }) + + # Render + render_ret = render(camera_dict, gausssian, self.pipe, self.bg_color, override_color=colors_overwrite, scaling_modifier=self.pipe.scale_modifier) + + if ssaa > 1: + render_ret.render = F.interpolate(render_ret.render[None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze() + + ret = edict({ + 'color': render_ret['render'] + }) + return ret diff --git a/third_party/TRELLIS/trellis/renderers/mesh_renderer.py b/third_party/TRELLIS/trellis/renderers/mesh_renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..b504fa4d140c68ef3c611669ea075000d9723a04 --- /dev/null +++ b/third_party/TRELLIS/trellis/renderers/mesh_renderer.py @@ -0,0 +1,133 @@ +import torch +import nvdiffrast.torch as dr +from easydict import EasyDict as edict +from ..representations.mesh import MeshExtractResult +import torch.nn.functional as F + + +def intrinsics_to_projection( + intrinsics: torch.Tensor, + near: float, + far: float, + ) -> torch.Tensor: + """ + OpenCV intrinsics to OpenGL perspective matrix + + Args: + intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix + near (float): near plane to clip + far (float): far plane to clip + Returns: + (torch.Tensor): [4, 4] OpenGL perspective matrix + """ + fx, fy = intrinsics[0, 0], intrinsics[1, 1] + cx, cy = intrinsics[0, 2], intrinsics[1, 2] + ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device) + ret[0, 0] = 2 * fx + ret[1, 1] = 2 * fy + ret[0, 2] = 2 * cx - 1 + ret[1, 2] = - 2 * cy + 1 + ret[2, 2] = far / (far - near) + ret[2, 3] = near * far / (near - far) + ret[3, 2] = 1. + return ret + + +class MeshRenderer: + """ + Renderer for the Mesh representation. + + Args: + rendering_options (dict): Rendering options. + glctx (nvdiffrast.torch.RasterizeGLContext): RasterizeGLContext object for CUDA/OpenGL interop. + """ + def __init__(self, rendering_options={}, device='cuda'): + self.rendering_options = edict({ + "resolution": None, + "near": None, + "far": None, + "ssaa": 1 + }) + self.rendering_options.update(rendering_options) + self.glctx = dr.RasterizeCudaContext(device=device) + self.device=device + + def render( + self, + mesh : MeshExtractResult, + extrinsics: torch.Tensor, + intrinsics: torch.Tensor, + return_types = ["mask", "normal", "depth"] + ) -> edict: + """ + Render the mesh. + + Args: + mesh : meshmodel + extrinsics (torch.Tensor): (4, 4) camera extrinsics + intrinsics (torch.Tensor): (3, 3) camera intrinsics + return_types (list): list of return types, can be "mask", "depth", "normal_map", "normal", "color" + + Returns: + edict based on return_types containing: + color (torch.Tensor): [3, H, W] rendered color image + depth (torch.Tensor): [H, W] rendered depth image + normal (torch.Tensor): [3, H, W] rendered normal image + normal_map (torch.Tensor): [3, H, W] rendered normal map image + mask (torch.Tensor): [H, W] rendered mask image + """ + resolution = self.rendering_options["resolution"] + near = self.rendering_options["near"] + far = self.rendering_options["far"] + ssaa = self.rendering_options["ssaa"] + + if mesh.vertices.shape[0] == 0 or mesh.faces.shape[0] == 0: + default_img = torch.zeros((1, resolution, resolution, 3), dtype=torch.float32, device=self.device) + ret_dict = {k : default_img if k in ['normal', 'normal_map', 'color'] else default_img[..., :1] for k in return_types} + return ret_dict + + perspective = intrinsics_to_projection(intrinsics, near, far) + + RT = extrinsics.unsqueeze(0) + full_proj = (perspective @ extrinsics).unsqueeze(0) + + vertices = mesh.vertices.unsqueeze(0) + + vertices_homo = torch.cat([vertices, torch.ones_like(vertices[..., :1])], dim=-1) + vertices_camera = torch.bmm(vertices_homo, RT.transpose(-1, -2)) + vertices_clip = torch.bmm(vertices_homo, full_proj.transpose(-1, -2)) + faces_int = mesh.faces.int() + rast, _ = dr.rasterize( + self.glctx, vertices_clip, faces_int, (resolution * ssaa, resolution * ssaa)) + + out_dict = edict() + for type in return_types: + img = None + if type == "mask" : + img = dr.antialias((rast[..., -1:] > 0).float(), rast, vertices_clip, faces_int) + elif type == "depth": + img = dr.interpolate(vertices_camera[..., 2:3].contiguous(), rast, faces_int)[0] + img = dr.antialias(img, rast, vertices_clip, faces_int) + elif type == "normal" : + img = dr.interpolate( + mesh.face_normal.reshape(1, -1, 3), rast, + torch.arange(mesh.faces.shape[0] * 3, device=self.device, dtype=torch.int).reshape(-1, 3) + )[0] + img = dr.antialias(img, rast, vertices_clip, faces_int) + # normalize norm pictures + img = (img + 1) / 2 + elif type == "normal_map" : + img = dr.interpolate(mesh.vertex_attrs[:, 3:].contiguous(), rast, faces_int)[0] + img = dr.antialias(img, rast, vertices_clip, faces_int) + elif type == "color" : + img = dr.interpolate(mesh.vertex_attrs[:, :3].contiguous(), rast, faces_int)[0] + img = dr.antialias(img, rast, vertices_clip, faces_int) + + if ssaa > 1: + img = F.interpolate(img.permute(0, 3, 1, 2), (resolution, resolution), mode='bilinear', align_corners=False, antialias=True) + img = img.squeeze() + else: + img = img.permute(0, 3, 1, 2).squeeze() + out_dict[type] = img + + return out_dict diff --git a/third_party/TRELLIS/trellis/renderers/octree_renderer.py b/third_party/TRELLIS/trellis/renderers/octree_renderer.py new file mode 100755 index 0000000000000000000000000000000000000000..136069cdb0645b5759d5d17f7815612a1dfc7bea --- /dev/null +++ b/third_party/TRELLIS/trellis/renderers/octree_renderer.py @@ -0,0 +1,300 @@ +import numpy as np +import torch +import torch.nn.functional as F +import math +import cv2 +from scipy.stats import qmc +from easydict import EasyDict as edict +from ..representations.octree import DfsOctree + + +def intrinsics_to_projection( + intrinsics: torch.Tensor, + near: float, + far: float, + ) -> torch.Tensor: + """ + OpenCV intrinsics to OpenGL perspective matrix + + Args: + intrinsics (torch.Tensor): [3, 3] OpenCV intrinsics matrix + near (float): near plane to clip + far (float): far plane to clip + Returns: + (torch.Tensor): [4, 4] OpenGL perspective matrix + """ + fx, fy = intrinsics[0, 0], intrinsics[1, 1] + cx, cy = intrinsics[0, 2], intrinsics[1, 2] + ret = torch.zeros((4, 4), dtype=intrinsics.dtype, device=intrinsics.device) + ret[0, 0] = 2 * fx + ret[1, 1] = 2 * fy + ret[0, 2] = 2 * cx - 1 + ret[1, 2] = - 2 * cy + 1 + ret[2, 2] = far / (far - near) + ret[2, 3] = near * far / (near - far) + ret[3, 2] = 1. + return ret + + +def render(viewpoint_camera, octree : DfsOctree, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, used_rank = None, colors_overwrite = None, aux=None, halton_sampler=None): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + # lazy import + if 'OctreeTrivecRasterizer' not in globals(): + from diffoctreerast import OctreeVoxelRasterizer, OctreeGaussianRasterizer, OctreeTrivecRasterizer, OctreeDecoupolyRasterizer + + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + raster_settings = edict( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + viewmatrix=viewpoint_camera.world_view_transform, + projmatrix=viewpoint_camera.full_proj_transform, + sh_degree=octree.active_sh_degree, + campos=viewpoint_camera.camera_center, + with_distloss=pipe.with_distloss, + jitter=pipe.jitter, + debug=pipe.debug, + ) + + positions = octree.get_xyz + if octree.primitive == "voxel": + densities = octree.get_density + elif octree.primitive == "gaussian": + opacities = octree.get_opacity + elif octree.primitive == "trivec": + trivecs = octree.get_trivec + densities = octree.get_density + raster_settings.density_shift = octree.density_shift + elif octree.primitive == "decoupoly": + decoupolys_V, decoupolys_g = octree.get_decoupoly + densities = octree.get_density + raster_settings.density_shift = octree.density_shift + else: + raise ValueError(f"Unknown primitive {octree.primitive}") + depths = octree.get_depth + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + colors_precomp = None + shs = octree.get_features + if octree.primitive in ["voxel", "gaussian"] and colors_overwrite is not None: + colors_precomp = colors_overwrite + shs = None + + ret = edict() + + if octree.primitive == "voxel": + renderer = OctreeVoxelRasterizer(raster_settings=raster_settings) + rgb, depth, alpha, distloss = renderer( + positions = positions, + densities = densities, + shs = shs, + colors_precomp = colors_precomp, + depths = depths, + aabb = octree.aabb, + aux = aux, + ) + ret['rgb'] = rgb + ret['depth'] = depth + ret['alpha'] = alpha + ret['distloss'] = distloss + elif octree.primitive == "gaussian": + renderer = OctreeGaussianRasterizer(raster_settings=raster_settings) + rgb, depth, alpha = renderer( + positions = positions, + opacities = opacities, + shs = shs, + colors_precomp = colors_precomp, + depths = depths, + aabb = octree.aabb, + aux = aux, + ) + ret['rgb'] = rgb + ret['depth'] = depth + ret['alpha'] = alpha + elif octree.primitive == "trivec": + raster_settings.used_rank = used_rank if used_rank is not None else trivecs.shape[1] + renderer = OctreeTrivecRasterizer(raster_settings=raster_settings) + rgb, depth, alpha, percent_depth = renderer( + positions = positions, + trivecs = trivecs, + densities = densities, + shs = shs, + colors_precomp = colors_precomp, + colors_overwrite = colors_overwrite, + depths = depths, + aabb = octree.aabb, + aux = aux, + halton_sampler = halton_sampler, + ) + ret['percent_depth'] = percent_depth + ret['rgb'] = rgb + ret['depth'] = depth + ret['alpha'] = alpha + elif octree.primitive == "decoupoly": + raster_settings.used_rank = used_rank if used_rank is not None else decoupolys_V.shape[1] + renderer = OctreeDecoupolyRasterizer(raster_settings=raster_settings) + rgb, depth, alpha = renderer( + positions = positions, + decoupolys_V = decoupolys_V, + decoupolys_g = decoupolys_g, + densities = densities, + shs = shs, + colors_precomp = colors_precomp, + depths = depths, + aabb = octree.aabb, + aux = aux, + ) + ret['rgb'] = rgb + ret['depth'] = depth + ret['alpha'] = alpha + + return ret + + +class OctreeRenderer: + """ + Renderer for the Voxel representation. + + Args: + rendering_options (dict): Rendering options. + """ + + def __init__(self, rendering_options={}) -> None: + try: + import diffoctreerast + except ImportError: + print("\033[93m[WARNING] diffoctreerast is not installed. The renderer will be disabled.\033[0m") + self.unsupported = True + else: + self.unsupported = False + + self.pipe = edict({ + "with_distloss": False, + "with_aux": False, + "scale_modifier": 1.0, + "used_rank": None, + "jitter": False, + "debug": False, + }) + self.rendering_options = edict({ + "resolution": None, + "near": None, + "far": None, + "ssaa": 1, + "bg_color": 'random', + }) + self.halton_sampler = qmc.Halton(2, scramble=False) + self.rendering_options.update(rendering_options) + self.bg_color = None + + def render( + self, + octree: DfsOctree, + extrinsics: torch.Tensor, + intrinsics: torch.Tensor, + colors_overwrite: torch.Tensor = None, + ) -> edict: + """ + Render the octree. + + Args: + octree (Octree): octree + extrinsics (torch.Tensor): (4, 4) camera extrinsics + intrinsics (torch.Tensor): (3, 3) camera intrinsics + colors_overwrite (torch.Tensor): (N, 3) override color + + Returns: + edict containing: + color (torch.Tensor): (3, H, W) rendered color + depth (torch.Tensor): (H, W) rendered depth + alpha (torch.Tensor): (H, W) rendered alpha + distloss (Optional[torch.Tensor]): (H, W) rendered distance loss + percent_depth (Optional[torch.Tensor]): (H, W) rendered percent depth + aux (Optional[edict]): auxiliary tensors + """ + resolution = self.rendering_options["resolution"] + near = self.rendering_options["near"] + far = self.rendering_options["far"] + ssaa = self.rendering_options["ssaa"] + + if self.unsupported: + image = np.zeros((512, 512, 3), dtype=np.uint8) + text_bbox = cv2.getTextSize("Unsupported", cv2.FONT_HERSHEY_SIMPLEX, 2, 3)[0] + origin = (512 - text_bbox[0]) // 2, (512 - text_bbox[1]) // 2 + image = cv2.putText(image, "Unsupported", origin, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3, cv2.LINE_AA) + return { + 'color': torch.tensor(image, dtype=torch.float32).permute(2, 0, 1) / 255, + } + + if self.rendering_options["bg_color"] == 'random': + self.bg_color = torch.zeros(3, dtype=torch.float32, device="cuda") + if np.random.rand() < 0.5: + self.bg_color += 1 + else: + self.bg_color = torch.tensor(self.rendering_options["bg_color"], dtype=torch.float32, device="cuda") + + if self.pipe["with_aux"]: + aux = { + 'grad_color2': torch.zeros((octree.num_leaf_nodes, 3), dtype=torch.float32, requires_grad=True, device="cuda") + 0, + 'contributions': torch.zeros((octree.num_leaf_nodes, 1), dtype=torch.float32, requires_grad=True, device="cuda") + 0, + } + for k in aux.keys(): + aux[k].requires_grad_() + aux[k].retain_grad() + else: + aux = None + + view = extrinsics + perspective = intrinsics_to_projection(intrinsics, near, far) + camera = torch.inverse(view)[:3, 3] + focalx = intrinsics[0, 0] + focaly = intrinsics[1, 1] + fovx = 2 * torch.atan(0.5 / focalx) + fovy = 2 * torch.atan(0.5 / focaly) + + camera_dict = edict({ + "image_height": resolution * ssaa, + "image_width": resolution * ssaa, + "FoVx": fovx, + "FoVy": fovy, + "znear": near, + "zfar": far, + "world_view_transform": view.T.contiguous(), + "projection_matrix": perspective.T.contiguous(), + "full_proj_transform": (perspective @ view).T.contiguous(), + "camera_center": camera + }) + + # Render + render_ret = render(camera_dict, octree, self.pipe, self.bg_color, aux=aux, colors_overwrite=colors_overwrite, scaling_modifier=self.pipe.scale_modifier, used_rank=self.pipe.used_rank, halton_sampler=self.halton_sampler) + + if ssaa > 1: + render_ret.rgb = F.interpolate(render_ret.rgb[None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze() + render_ret.depth = F.interpolate(render_ret.depth[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze() + render_ret.alpha = F.interpolate(render_ret.alpha[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze() + if hasattr(render_ret, 'percent_depth'): + render_ret.percent_depth = F.interpolate(render_ret.percent_depth[None, None], size=(resolution, resolution), mode='bilinear', align_corners=False, antialias=True).squeeze() + + ret = edict({ + 'color': render_ret.rgb, + 'depth': render_ret.depth, + 'alpha': render_ret.alpha, + }) + if self.pipe["with_distloss"] and 'distloss' in render_ret: + ret['distloss'] = render_ret.distloss + if self.pipe["with_aux"]: + ret['aux'] = aux + if hasattr(render_ret, 'percent_depth'): + ret['percent_depth'] = render_ret.percent_depth + return ret diff --git a/third_party/TRELLIS/trellis/renderers/sh_utils.py b/third_party/TRELLIS/trellis/renderers/sh_utils.py new file mode 100755 index 0000000000000000000000000000000000000000..bbca7d192aa3a7edf8c5b2d24dee535eac765785 --- /dev/null +++ b/third_party/TRELLIS/trellis/renderers/sh_utils.py @@ -0,0 +1,118 @@ +# Copyright 2021 The PlenOctree Authors. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import torch + +C0 = 0.28209479177387814 +C1 = 0.4886025119029199 +C2 = [ + 1.0925484305920792, + -1.0925484305920792, + 0.31539156525252005, + -1.0925484305920792, + 0.5462742152960396 +] +C3 = [ + -0.5900435899266435, + 2.890611442640554, + -0.4570457994644658, + 0.3731763325901154, + -0.4570457994644658, + 1.445305721320277, + -0.5900435899266435 +] +C4 = [ + 2.5033429417967046, + -1.7701307697799304, + 0.9461746957575601, + -0.6690465435572892, + 0.10578554691520431, + -0.6690465435572892, + 0.47308734787878004, + -1.7701307697799304, + 0.6258357354491761, +] + + +def eval_sh(deg, sh, dirs): + """ + Evaluate spherical harmonics at unit directions + using hardcoded SH polynomials. + Works with torch/np/jnp. + ... Can be 0 or more batch dimensions. + Args: + deg: int SH deg. Currently, 0-3 supported + sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] + dirs: jnp.ndarray unit directions [..., 3] + Returns: + [..., C] + """ + assert deg <= 4 and deg >= 0 + coeff = (deg + 1) ** 2 + assert sh.shape[-1] >= coeff + + result = C0 * sh[..., 0] + if deg > 0: + x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3] + result = (result - + C1 * y * sh[..., 1] + + C1 * z * sh[..., 2] - + C1 * x * sh[..., 3]) + + if deg > 1: + xx, yy, zz = x * x, y * y, z * z + xy, yz, xz = x * y, y * z, x * z + result = (result + + C2[0] * xy * sh[..., 4] + + C2[1] * yz * sh[..., 5] + + C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] + + C2[3] * xz * sh[..., 7] + + C2[4] * (xx - yy) * sh[..., 8]) + + if deg > 2: + result = (result + + C3[0] * y * (3 * xx - yy) * sh[..., 9] + + C3[1] * xy * z * sh[..., 10] + + C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] + + C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] + + C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] + + C3[5] * z * (xx - yy) * sh[..., 14] + + C3[6] * x * (xx - 3 * yy) * sh[..., 15]) + + if deg > 3: + result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] + + C4[1] * yz * (3 * xx - yy) * sh[..., 17] + + C4[2] * xy * (7 * zz - 1) * sh[..., 18] + + C4[3] * yz * (7 * zz - 3) * sh[..., 19] + + C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] + + C4[5] * xz * (7 * zz - 3) * sh[..., 21] + + C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] + + C4[7] * xz * (xx - 3 * yy) * sh[..., 23] + + C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24]) + return result + +def RGB2SH(rgb): + return (rgb - 0.5) / C0 + +def SH2RGB(sh): + return sh * C0 + 0.5 \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/__init__.py b/third_party/TRELLIS/trellis/representations/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..549ffdb97e87181552e9b3e086766f873e4bfb5e --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/__init__.py @@ -0,0 +1,4 @@ +from .radiance_field import Strivec +from .octree import DfsOctree as Octree +from .gaussian import Gaussian +from .mesh import MeshExtractResult diff --git a/third_party/TRELLIS/trellis/representations/gaussian/__init__.py b/third_party/TRELLIS/trellis/representations/gaussian/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e3de6e180bd732836af876d748255595be2d4d74 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/gaussian/__init__.py @@ -0,0 +1 @@ +from .gaussian_model import Gaussian \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/gaussian/gaussian_model.py b/third_party/TRELLIS/trellis/representations/gaussian/gaussian_model.py new file mode 100755 index 0000000000000000000000000000000000000000..54ba16f1550e8edb1728605202cc31b6dd805d90 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/gaussian/gaussian_model.py @@ -0,0 +1,209 @@ +import torch +import numpy as np +from plyfile import PlyData, PlyElement +from .general_utils import inverse_sigmoid, strip_symmetric, build_scaling_rotation +import utils3d + + +class Gaussian: + def __init__( + self, + aabb : list, + sh_degree : int = 0, + mininum_kernel_size : float = 0.0, + scaling_bias : float = 0.01, + opacity_bias : float = 0.1, + scaling_activation : str = "exp", + device='cuda' + ): + self.init_params = { + 'aabb': aabb, + 'sh_degree': sh_degree, + 'mininum_kernel_size': mininum_kernel_size, + 'scaling_bias': scaling_bias, + 'opacity_bias': opacity_bias, + 'scaling_activation': scaling_activation, + } + + self.sh_degree = sh_degree + self.active_sh_degree = sh_degree + self.mininum_kernel_size = mininum_kernel_size + self.scaling_bias = scaling_bias + self.opacity_bias = opacity_bias + self.scaling_activation_type = scaling_activation + self.device = device + self.aabb = torch.tensor(aabb, dtype=torch.float32, device=device) + self.setup_functions() + + self._xyz = None + self._features_dc = None + self._features_rest = None + self._scaling = None + self._rotation = None + self._opacity = None + + def setup_functions(self): + def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): + L = build_scaling_rotation(scaling_modifier * scaling, rotation) + actual_covariance = L @ L.transpose(1, 2) + symm = strip_symmetric(actual_covariance) + return symm + + if self.scaling_activation_type == "exp": + self.scaling_activation = torch.exp + self.inverse_scaling_activation = torch.log + elif self.scaling_activation_type == "softplus": + self.scaling_activation = torch.nn.functional.softplus + self.inverse_scaling_activation = lambda x: x + torch.log(-torch.expm1(-x)) + + self.covariance_activation = build_covariance_from_scaling_rotation + + self.opacity_activation = torch.sigmoid + self.inverse_opacity_activation = inverse_sigmoid + + self.rotation_activation = torch.nn.functional.normalize + + self.scale_bias = self.inverse_scaling_activation(torch.tensor(self.scaling_bias)).cuda() + self.rots_bias = torch.zeros((4)).cuda() + self.rots_bias[0] = 1 + self.opacity_bias = self.inverse_opacity_activation(torch.tensor(self.opacity_bias)).cuda() + + @property + def get_scaling(self): + scales = self.scaling_activation(self._scaling + self.scale_bias) + scales = torch.square(scales) + self.mininum_kernel_size ** 2 + scales = torch.sqrt(scales) + return scales + + @property + def get_rotation(self): + return self.rotation_activation(self._rotation + self.rots_bias[None, :]) + + @property + def get_xyz(self): + return self._xyz * self.aabb[None, 3:] + self.aabb[None, :3] + + @property + def get_features(self): + return torch.cat((self._features_dc, self._features_rest), dim=2) if self._features_rest is not None else self._features_dc + + @property + def get_opacity(self): + return self.opacity_activation(self._opacity + self.opacity_bias) + + def get_covariance(self, scaling_modifier = 1): + return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation + self.rots_bias[None, :]) + + def from_scaling(self, scales): + scales = torch.sqrt(torch.square(scales) - self.mininum_kernel_size ** 2) + self._scaling = self.inverse_scaling_activation(scales) - self.scale_bias + + def from_rotation(self, rots): + self._rotation = rots - self.rots_bias[None, :] + + def from_xyz(self, xyz): + self._xyz = (xyz - self.aabb[None, :3]) / self.aabb[None, 3:] + + def from_features(self, features): + self._features_dc = features + + def from_opacity(self, opacities): + self._opacity = self.inverse_opacity_activation(opacities) - self.opacity_bias + + def construct_list_of_attributes(self): + l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] + # All channels except the 3 DC + for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]): + l.append('f_dc_{}'.format(i)) + l.append('opacity') + for i in range(self._scaling.shape[1]): + l.append('scale_{}'.format(i)) + for i in range(self._rotation.shape[1]): + l.append('rot_{}'.format(i)) + return l + + def save_ply(self, path, transform=[[1, 0, 0], [0, 0, -1], [0, 1, 0]]): + xyz = self.get_xyz.detach().cpu().numpy() + normals = np.zeros_like(xyz) + f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() + opacities = inverse_sigmoid(self.get_opacity).detach().cpu().numpy() + scale = torch.log(self.get_scaling).detach().cpu().numpy() + rotation = (self._rotation + self.rots_bias[None, :]).detach().cpu().numpy() + + if transform is not None: + transform = np.array(transform) + xyz = np.matmul(xyz, transform.T) + rotation = utils3d.numpy.quaternion_to_matrix(rotation) + rotation = np.matmul(transform, rotation) + rotation = utils3d.numpy.matrix_to_quaternion(rotation) + + dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()] + + elements = np.empty(xyz.shape[0], dtype=dtype_full) + attributes = np.concatenate((xyz, normals, f_dc, opacities, scale, rotation), axis=1) + elements[:] = list(map(tuple, attributes)) + el = PlyElement.describe(elements, 'vertex') + PlyData([el]).write(path) + + def load_ply(self, path, transform=[[1, 0, 0], [0, 0, -1], [0, 1, 0]]): + plydata = PlyData.read(path) + + xyz = np.stack((np.asarray(plydata.elements[0]["x"]), + np.asarray(plydata.elements[0]["y"]), + np.asarray(plydata.elements[0]["z"])), axis=1) + opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis] + + features_dc = np.zeros((xyz.shape[0], 3, 1)) + features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"]) + features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"]) + features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"]) + + if self.sh_degree > 0: + extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")] + extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) + assert len(extra_f_names)==3*(self.sh_degree + 1) ** 2 - 3 + features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) + for idx, attr_name in enumerate(extra_f_names): + features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name]) + # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) + features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)) + + scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")] + scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) + scales = np.zeros((xyz.shape[0], len(scale_names))) + for idx, attr_name in enumerate(scale_names): + scales[:, idx] = np.asarray(plydata.elements[0][attr_name]) + + rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] + rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) + rots = np.zeros((xyz.shape[0], len(rot_names))) + for idx, attr_name in enumerate(rot_names): + rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) + + if transform is not None: + transform = np.array(transform) + xyz = np.matmul(xyz, transform) + rotation = utils3d.numpy.quaternion_to_matrix(rotation) + rotation = np.matmul(rotation, transform) + rotation = utils3d.numpy.matrix_to_quaternion(rotation) + + # convert to actual gaussian attributes + xyz = torch.tensor(xyz, dtype=torch.float, device=self.device) + features_dc = torch.tensor(features_dc, dtype=torch.float, device=self.device).transpose(1, 2).contiguous() + if self.sh_degree > 0: + features_extra = torch.tensor(features_extra, dtype=torch.float, device=self.device).transpose(1, 2).contiguous() + opacities = torch.sigmoid(torch.tensor(opacities, dtype=torch.float, device=self.device)) + scales = torch.exp(torch.tensor(scales, dtype=torch.float, device=self.device)) + rots = torch.tensor(rots, dtype=torch.float, device=self.device) + + # convert to _hidden attributes + self._xyz = (xyz - self.aabb[None, :3]) / self.aabb[None, 3:] + self._features_dc = features_dc + if self.sh_degree > 0: + self._features_rest = features_extra + else: + self._features_rest = None + self._opacity = self.inverse_opacity_activation(opacities) - self.opacity_bias + self._scaling = self.inverse_scaling_activation(torch.sqrt(torch.square(scales) - self.mininum_kernel_size ** 2)) - self.scale_bias + self._rotation = rots - self.rots_bias[None, :] + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/gaussian/general_utils.py b/third_party/TRELLIS/trellis/representations/gaussian/general_utils.py new file mode 100755 index 0000000000000000000000000000000000000000..541c0825229a2d86e84460b765879f86f724a59d --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/gaussian/general_utils.py @@ -0,0 +1,133 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +import sys +from datetime import datetime +import numpy as np +import random + +def inverse_sigmoid(x): + return torch.log(x/(1-x)) + +def PILtoTorch(pil_image, resolution): + resized_image_PIL = pil_image.resize(resolution) + resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0 + if len(resized_image.shape) == 3: + return resized_image.permute(2, 0, 1) + else: + return resized_image.unsqueeze(dim=-1).permute(2, 0, 1) + +def get_expon_lr_func( + lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 +): + """ + Copied from Plenoxels + + Continuous learning rate decay function. Adapted from JaxNeRF + The returned rate is lr_init when step=0 and lr_final when step=max_steps, and + is log-linearly interpolated elsewhere (equivalent to exponential decay). + If lr_delay_steps>0 then the learning rate will be scaled by some smooth + function of lr_delay_mult, such that the initial learning rate is + lr_init*lr_delay_mult at the beginning of optimization but will be eased back + to the normal learning rate when steps>lr_delay_steps. + :param conf: config subtree 'lr' or similar + :param max_steps: int, the number of steps during optimization. + :return HoF which takes step as input + """ + + def helper(step): + if step < 0 or (lr_init == 0.0 and lr_final == 0.0): + # Disable this parameter + return 0.0 + if lr_delay_steps > 0: + # A kind of reverse cosine decay. + delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( + 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1) + ) + else: + delay_rate = 1.0 + t = np.clip(step / max_steps, 0, 1) + log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) + return delay_rate * log_lerp + + return helper + +def strip_lowerdiag(L): + uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda") + + uncertainty[:, 0] = L[:, 0, 0] + uncertainty[:, 1] = L[:, 0, 1] + uncertainty[:, 2] = L[:, 0, 2] + uncertainty[:, 3] = L[:, 1, 1] + uncertainty[:, 4] = L[:, 1, 2] + uncertainty[:, 5] = L[:, 2, 2] + return uncertainty + +def strip_symmetric(sym): + return strip_lowerdiag(sym) + +def build_rotation(r): + norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3]) + + q = r / norm[:, None] + + R = torch.zeros((q.size(0), 3, 3), device='cuda') + + r = q[:, 0] + x = q[:, 1] + y = q[:, 2] + z = q[:, 3] + + R[:, 0, 0] = 1 - 2 * (y*y + z*z) + R[:, 0, 1] = 2 * (x*y - r*z) + R[:, 0, 2] = 2 * (x*z + r*y) + R[:, 1, 0] = 2 * (x*y + r*z) + R[:, 1, 1] = 1 - 2 * (x*x + z*z) + R[:, 1, 2] = 2 * (y*z - r*x) + R[:, 2, 0] = 2 * (x*z - r*y) + R[:, 2, 1] = 2 * (y*z + r*x) + R[:, 2, 2] = 1 - 2 * (x*x + y*y) + return R + +def build_scaling_rotation(s, r): + L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") + R = build_rotation(r) + + L[:,0,0] = s[:,0] + L[:,1,1] = s[:,1] + L[:,2,2] = s[:,2] + + L = R @ L + return L + +def safe_state(silent): + old_f = sys.stdout + class F: + def __init__(self, silent): + self.silent = silent + + def write(self, x): + if not self.silent: + if x.endswith("\n"): + old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S"))))) + else: + old_f.write(x) + + def flush(self): + old_f.flush() + + sys.stdout = F(silent) + + random.seed(0) + np.random.seed(0) + torch.manual_seed(0) + torch.cuda.set_device(torch.device("cuda:0")) diff --git a/third_party/TRELLIS/trellis/representations/mesh/__init__.py b/third_party/TRELLIS/trellis/representations/mesh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38cf35c0853d11cf09bdc228a87ee9d0b2f34b62 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/__init__.py @@ -0,0 +1 @@ +from .cube2mesh import SparseFeatures2Mesh, MeshExtractResult diff --git a/third_party/TRELLIS/trellis/representations/mesh/cube2mesh.py b/third_party/TRELLIS/trellis/representations/mesh/cube2mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..44e8776fafbc21d787e2ba855e4c99bd191a0762 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/cube2mesh.py @@ -0,0 +1,143 @@ +import torch +from ...modules.sparse import SparseTensor +from easydict import EasyDict as edict +from .utils_cube import * +from .flexicubes.flexicubes import FlexiCubes + + +class MeshExtractResult: + def __init__(self, + vertices, + faces, + vertex_attrs=None, + res=64 + ): + self.vertices = vertices + self.faces = faces.long() + self.vertex_attrs = vertex_attrs + self.face_normal = self.comput_face_normals(vertices, faces) + self.res = res + self.success = (vertices.shape[0] != 0 and faces.shape[0] != 0) + + # training only + self.tsdf_v = None + self.tsdf_s = None + self.reg_loss = None + + def comput_face_normals(self, verts, faces): + i0 = faces[..., 0].long() + i1 = faces[..., 1].long() + i2 = faces[..., 2].long() + + v0 = verts[i0, :] + v1 = verts[i1, :] + v2 = verts[i2, :] + face_normals = torch.cross(v1 - v0, v2 - v0, dim=-1) + face_normals = torch.nn.functional.normalize(face_normals, dim=1) + # print(face_normals.min(), face_normals.max(), face_normals.shape) + return face_normals[:, None, :].repeat(1, 3, 1) + + def comput_v_normals(self, verts, faces): + i0 = faces[..., 0].long() + i1 = faces[..., 1].long() + i2 = faces[..., 2].long() + + v0 = verts[i0, :] + v1 = verts[i1, :] + v2 = verts[i2, :] + face_normals = torch.cross(v1 - v0, v2 - v0, dim=-1) + v_normals = torch.zeros_like(verts) + v_normals.scatter_add_(0, i0[..., None].repeat(1, 3), face_normals) + v_normals.scatter_add_(0, i1[..., None].repeat(1, 3), face_normals) + v_normals.scatter_add_(0, i2[..., None].repeat(1, 3), face_normals) + + v_normals = torch.nn.functional.normalize(v_normals, dim=1) + return v_normals + + +class SparseFeatures2Mesh: + def __init__(self, device="cuda", res=64, use_color=True): + ''' + a model to generate a mesh from sparse features structures using flexicube + ''' + super().__init__() + self.device=device + self.res = res + self.mesh_extractor = FlexiCubes(device=device) + self.sdf_bias = -1.0 / res + verts, cube = construct_dense_grid(self.res, self.device) + self.reg_c = cube.to(self.device) + self.reg_v = verts.to(self.device) + self.use_color = use_color + self._calc_layout() + + def _calc_layout(self): + LAYOUTS = { + 'sdf': {'shape': (8, 1), 'size': 8}, + 'deform': {'shape': (8, 3), 'size': 8 * 3}, + 'weights': {'shape': (21,), 'size': 21} + } + if self.use_color: + ''' + 6 channel color including normal map + ''' + LAYOUTS['color'] = {'shape': (8, 6,), 'size': 8 * 6} + self.layouts = edict(LAYOUTS) + start = 0 + for k, v in self.layouts.items(): + v['range'] = (start, start + v['size']) + start += v['size'] + self.feats_channels = start + + def get_layout(self, feats : torch.Tensor, name : str): + if name not in self.layouts: + return None + return feats[:, self.layouts[name]['range'][0]:self.layouts[name]['range'][1]].reshape(-1, *self.layouts[name]['shape']) + + def __call__(self, cubefeats : SparseTensor, training=False): + """ + Generates a mesh based on the specified sparse voxel structures. + Args: + cube_attrs [Nx21] : Sparse Tensor attrs about cube weights + verts_attrs [Nx10] : [0:1] SDF [1:4] deform [4:7] color [7:10] normal + Returns: + return the success tag and ni you loss, + """ + # add sdf bias to verts_attrs + coords = cubefeats.coords[:, 1:] + feats = cubefeats.feats + + sdf, deform, color, weights = [self.get_layout(feats, name) for name in ['sdf', 'deform', 'color', 'weights']] + sdf += self.sdf_bias + v_attrs = [sdf, deform, color] if self.use_color else [sdf, deform] + v_pos, v_attrs, reg_loss = sparse_cube2verts(coords, torch.cat(v_attrs, dim=-1), training=training) + v_attrs_d = get_dense_attrs(v_pos, v_attrs, res=self.res+1, sdf_init=True) + weights_d = get_dense_attrs(coords, weights, res=self.res, sdf_init=False) + if self.use_color: + sdf_d, deform_d, colors_d = v_attrs_d[..., 0], v_attrs_d[..., 1:4], v_attrs_d[..., 4:] + else: + sdf_d, deform_d = v_attrs_d[..., 0], v_attrs_d[..., 1:4] + colors_d = None + + x_nx3 = get_defomed_verts(self.reg_v, deform_d, self.res) + + vertices, faces, L_dev, colors = self.mesh_extractor( + voxelgrid_vertices=x_nx3, + scalar_field=sdf_d, + cube_idx=self.reg_c, + resolution=self.res, + beta=weights_d[:, :12], + alpha=weights_d[:, 12:20], + gamma_f=weights_d[:, 20], + voxelgrid_colors=colors_d, + training=training) + + mesh = MeshExtractResult(vertices=vertices, faces=faces, vertex_attrs=colors, res=self.res) + if training: + if mesh.success: + reg_loss += L_dev.mean() * 0.5 + reg_loss += (weights[:,:20]).abs().mean() * 0.2 + mesh.reg_loss = reg_loss + mesh.tsdf_v = get_defomed_verts(v_pos, v_attrs[:, 1:4], self.res) + mesh.tsdf_s = v_attrs[:, 0] + return mesh diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/LICENSE.txt b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..40e8f765ee25d88128e7b5cd769389c633ba86bb --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/LICENSE.txt @@ -0,0 +1,90 @@ +Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + + +NVIDIA Source Code License for FlexiCubes + + +======================================================================= + +1. Definitions + +โ€œLicensorโ€ means any person or entity that distributes its Work. + +โ€œWorkโ€ means (a) the original work of authorship made available under +this license, which may include software, documentation, or other files, +and (b) any additions to or derivative works thereof that are made +available under this license. + +The terms โ€œreproduce,โ€ โ€œreproduction,โ€ โ€œderivative works,โ€ and +โ€œdistributionโ€ have the meaning as provided under U.S. copyright law; +provided, however, that for the purposes of this license, derivative works +shall not include works that remain separable from, or merely link +(or bind by name) to the interfaces of, the Work. + +Works are โ€œmade availableโ€ under this license by including in or with +the Work either (a) a copyright notice referencing the applicability of +this license to the Work, or (b) a copy of this license. + +2. License Grant + + 2.1 Copyright Grant. Subject to the terms and conditions of this license, + each Licensor grants to you a perpetual, worldwide, non-exclusive, + royalty-free, copyright license to use, reproduce, prepare derivative + works of, publicly display, publicly perform, sublicense and distribute + its Work and any resulting derivative works in any form. + +3. Limitations + + 3.1 Redistribution. You may reproduce or distribute the Work only if + (a) you do so under this license, (b) you include a complete copy of + this license with your distribution, and (c) you retain without + modification any copyright, patent, trademark, or attribution notices + that are present in the Work. + + 3.2 Derivative Works. You may specify that additional or different terms + apply to the use, reproduction, and distribution of your derivative + works of the Work (โ€œYour Termsโ€) only if (a) Your Terms provide that the + use limitation in Section 3.3 applies to your derivative works, and (b) + you identify the specific derivative works that are subject to Your Terms. + Notwithstanding Your Terms, this license (including the redistribution + requirements in Section 3.1) will continue to apply to the Work itself. + + 3.3 Use Limitation. The Work and any derivative works thereof only may be + used or intended for use non-commercially. Notwithstanding the foregoing, + NVIDIA Corporation and its affiliates may use the Work and any derivative + works commercially. As used herein, โ€œnon-commerciallyโ€ means for research + or evaluation purposes only. + + 3.4 Patent Claims. If you bring or threaten to bring a patent claim against + any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) + to enforce any patents that you allege are infringed by any Work, then your + rights under this license from such Licensor (including the grant in + Section 2.1) will terminate immediately. + + 3.5 Trademarks. This license does not grant any rights to use any Licensorโ€™s + or its affiliatesโ€™ names, logos, or trademarks, except as necessary to + reproduce the notices described in this license. + + 3.6 Termination. If you violate any term of this license, then your rights + under this license (including the grant in Section 2.1) will terminate + immediately. + +4. Disclaimer of Warranty. + +THE WORK IS PROVIDED โ€œAS ISโ€ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. +YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. + +5. Limitation of Liability. + +EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, +WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY +LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, +INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, +THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF +GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR +MALFUNCTION, OR ANY OTHER DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN +ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +======================================================================= \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/README.md b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8f8b460651edef71c9636d62868c239defaa73ce --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/README.md @@ -0,0 +1,110 @@ +## Flexible Isosurface Extraction for Gradient-Based Mesh Optimization (FlexiCubes)
Official PyTorch implementation + +![Teaser image]() + +FlexiCubes is a high-quality isosurface representation specifically designed for gradient-based mesh optimization with respect to geometric, visual, or even physical objectives. For more details, please refer to our [paper](https://arxiv.org/abs/2308.05371) and [project page](https://research.nvidia.com/labs/toronto-ai/flexicubes/). + +## Highlights +* [Getting started](https://github.com/nv-tlabs/FlexiCubes#getting-started) +* [Basic workflow](https://github.com/nv-tlabs/FlexiCubes#example-usage) +* [nvdiffrec: image-based reconstruction example](https://github.com/NVlabs/nvdiffrec#news) +* [GET3D: generative AI example](https://github.com/nv-tlabs/GET3D#employing-flexicubes) +* [Bibtex](https://github.com/nv-tlabs/FlexiCubes#citation) + +## Getting Started + +The core functions of FlexiCubes are now in [Kaolin](https://github.com/NVIDIAGameWorks/kaolin/) starting from v0.15.0. See installation instructions [here](https://kaolin.readthedocs.io/en/latest/notes/installation.html) and API documentations [here](https://kaolin.readthedocs.io/en/latest/modules/kaolin.non_commercial.html#kaolin.non_commercial.FlexiCubes) + +The original code of the paper is still visible in `flexicube.py`. + +## Example Usage + +### Gradient-Based Mesh Optimization +We provide examples demonstrating how to use FlexiCubes for reconstructing unknown meshes through gradient-based optimization. Specifically, starting from randomly initialized SDF, we optimize the shape towards the reference mesh by minimizing their geometric difference, measured by multiview mask and depth losses. This workflow is a simplified version of `nvdiffrec` with code largely borrowed from the [nvdiffrec GitHub](https://github.com/NVlabs/nvdiffrec). We use the same pipeline to conduct the analysis in Section 3 and the main experiments described in Section 5 of our paper. We provide a detailed tutorial in `examples/optimization.ipynb`, along with an optimization script in `examples/optimize.py` which accepts command-line arguments. + + +To run the examples, it is suggested to install the Conda environment as detailed below: +```sh +conda create -n flexicubes python=3.9 +conda activate flexicubes +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch +pip install imageio trimesh tqdm matplotlib torch_scatter ninja +pip install git+https://github.com/NVlabs/nvdiffrast/ +pip install kaolin==0.15.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-1.12.0_cu113.html +``` + +Then download the dataset collected by [Myles et al.](https://vcg.isti.cnr.it/Publications/2014/MPZ14/) as follows. We include one shape in 'examples/data/inputmodels/block.obj' if you want to test without downloading the full dataset. + +```sh +cd examples +python download_data.py +``` + +After downloading the data, run shape optimization with the following example command: +```sh +python optimize.py --ref_mesh data/inputmodels/block.obj --out_dir out/block +``` +You can find visualization and output meshes in the `out/block`. Below, we show the initial and final shapes during optimization, with the reference shape on the right. + +block_init + +block_final + + +To further demonstrate the flexibility of our FlexiCubes representation, which can accommodates both reconstruction objectives and regularizers defined on the extracted mesh, you can add a developability regularizer (proposed by [Stein et al.](https://www.cs.cmu.edu/~kmcrane/Projects/DiscreteDevelopable/)) to the previous reconstruction pipeline to encourage fabricability from panels: +```sh +python optimize.py --ref_mesh data/inputmodels/david.obj --out_dir out/david_dev --develop_reg True --iter=1250 +``` + +### Extract mesh from known signed distance field +While not its designated use case, our function can extract a mesh from a known Signed Distance Field (SDF) without optimization. Please refer to the tutorial found in `examples/extraction.ipynb` for details. + +## Tips for using FlexiCubes +### Regularization losses: +We commonly use three regularizers in our mesh optimization pipelines, referenced in lines `L104-L106` in `examples/optimize.py`. The weights of these regularizers should be scaled according to the your application objectives. Initially, it is suggested to employ low weights because strong regularization can hinder convergence. You can incrementally increase the weights if you notice artifacts appearing in the optimized meshes. Specifically: + +* The loss function at `L104` helps to remove floaters in areas of the shape that are not supervised by the application objective, such as internal faces when using image supervision only. +* The L_dev loss at `L105` can be increased if you observe artifacts in flat areas, as illustrated in the image below. +* Generally, the L1 regularizer on flexible weights at `L106` does not have a significant impact during the optimization of a single shape. However, we found it to be effective in stabilizing training in generative pipelines such as GET3D. +Ablating L_dev + +### Resolution of voxel grid vs. tetrahedral grid: +If you are switching from our previous work, DMTet, it's important to note the difference in grid resolution when compared to FlexiCubes. In both implementations, the resolution is defined by the edge length: a grid resolution of `n` means the grid edge length is 1/n for both the voxel and tetrahedral grids. However, a tetrahedral grid with a resolution of `n` contains only `(n/2+1)ยณ` grid vertices, in contrast to the `(n+1)ยณ` vertices in a voxel grid. Consequently, if you are switching from DMTet to FlexiCubes while maintaining the same resolution, you will notice not only a denser output mesh but also a substantial increase in computational cost. To align the triangle count in the output meshes more closely, we recommend adopting a 4:5 resolution ratio between the voxel grid and the tetrahedral grid. For instance, in our paper, `64ยณ` FlexiCubes generate approximately the same number of triangles as `80ยณ` DMTet. + +## Applications +FlexiCubes is now integrated into NVIDIA applications as a drop-in replacement for DMTet. You can visit their GitHub pages to see how FlexiCubes is used in advanced photogrammetry and 3D generative pipelines. + +[Extracting Triangular 3D Models, Materials, and Lighting From Images (nvdiffrec)](https://github.com/NVlabs/nvdiffrec#news) + +[GET3D: A Generative Model of High Quality 3D Textured Shapes Learned from Images](https://github.com/nv-tlabs/GET3D#employing-flexicubes) + + + +## License +Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +This work is made available under the [Nvidia Source Code License](LICENSE.txt). + +For business inquiries, please visit our website and submit the form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/). + +## Citation +```bibtex +@article{shen2023flexicubes, +author = {Shen, Tianchang and Munkberg, Jacob and Hasselgren, Jon and Yin, Kangxue and Wang, Zian + and Chen, Wenzheng and Gojcic, Zan and Fidler, Sanja and Sharp, Nicholas and Gao, Jun}, +title = {Flexible Isosurface Extraction for Gradient-Based Mesh Optimization}, +year = {2023}, +issue_date = {August 2023}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {42}, +number = {4}, +issn = {0730-0301}, +url = {https://doi.org/10.1145/3592430}, +doi = {10.1145/3592430}, +journal = {ACM Trans. Graph.}, +month = {jul}, +articleno = {37}, +numpages = {16} +} +``` diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/flexicubes.py b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/flexicubes.py new file mode 100644 index 0000000000000000000000000000000000000000..15a5960be0aa2a03454ee0dec235961de0cd4564 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/flexicubes.py @@ -0,0 +1,384 @@ +# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. + +import torch +from .tables import * +from kaolin.utils.testing import check_tensor + +__all__ = [ + 'FlexiCubes' +] + + +class FlexiCubes: + def __init__(self, device="cuda"): + + self.device = device + self.dmc_table = torch.tensor(dmc_table, dtype=torch.long, device=device, requires_grad=False) + self.num_vd_table = torch.tensor(num_vd_table, + dtype=torch.long, device=device, requires_grad=False) + self.check_table = torch.tensor( + check_table, + dtype=torch.long, device=device, requires_grad=False) + + self.tet_table = torch.tensor(tet_table, dtype=torch.long, device=device, requires_grad=False) + self.quad_split_1 = torch.tensor([0, 1, 2, 0, 2, 3], dtype=torch.long, device=device, requires_grad=False) + self.quad_split_2 = torch.tensor([0, 1, 3, 3, 1, 2], dtype=torch.long, device=device, requires_grad=False) + self.quad_split_train = torch.tensor( + [0, 1, 1, 2, 2, 3, 3, 0], dtype=torch.long, device=device, requires_grad=False) + + self.cube_corners = torch.tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [ + 1, 0, 1], [0, 1, 1], [1, 1, 1]], dtype=torch.float, device=device) + self.cube_corners_idx = torch.pow(2, torch.arange(8, requires_grad=False)) + self.cube_edges = torch.tensor([0, 1, 1, 5, 4, 5, 0, 4, 2, 3, 3, 7, 6, 7, 2, 6, + 2, 0, 3, 1, 7, 5, 6, 4], dtype=torch.long, device=device, requires_grad=False) + + self.edge_dir_table = torch.tensor([0, 2, 0, 2, 0, 2, 0, 2, 1, 1, 1, 1], + dtype=torch.long, device=device) + self.dir_faces_table = torch.tensor([ + [[5, 4], [3, 2], [4, 5], [2, 3]], + [[5, 4], [1, 0], [4, 5], [0, 1]], + [[3, 2], [1, 0], [2, 3], [0, 1]] + ], dtype=torch.long, device=device) + self.adj_pairs = torch.tensor([0, 1, 1, 3, 3, 2, 2, 0], dtype=torch.long, device=device) + + def __call__(self, voxelgrid_vertices, scalar_field, cube_idx, resolution, qef_reg_scale=1e-3, + weight_scale=0.99, beta=None, alpha=None, gamma_f=None, voxelgrid_colors=None, training=False): + assert torch.is_tensor(voxelgrid_vertices) and \ + check_tensor(voxelgrid_vertices, (None, 3), throw=False), \ + "'voxelgrid_vertices' should be a tensor of shape (num_vertices, 3)" + num_vertices = voxelgrid_vertices.shape[0] + assert torch.is_tensor(scalar_field) and \ + check_tensor(scalar_field, (num_vertices,), throw=False), \ + "'scalar_field' should be a tensor of shape (num_vertices,)" + assert torch.is_tensor(cube_idx) and \ + check_tensor(cube_idx, (None, 8), throw=False), \ + "'cube_idx' should be a tensor of shape (num_cubes, 8)" + num_cubes = cube_idx.shape[0] + assert beta is None or ( + torch.is_tensor(beta) and + check_tensor(beta, (num_cubes, 12), throw=False) + ), "'beta' should be a tensor of shape (num_cubes, 12)" + assert alpha is None or ( + torch.is_tensor(alpha) and + check_tensor(alpha, (num_cubes, 8), throw=False) + ), "'alpha' should be a tensor of shape (num_cubes, 8)" + assert gamma_f is None or ( + torch.is_tensor(gamma_f) and + check_tensor(gamma_f, (num_cubes,), throw=False) + ), "'gamma_f' should be a tensor of shape (num_cubes,)" + + surf_cubes, occ_fx8 = self._identify_surf_cubes(scalar_field, cube_idx) + if surf_cubes.sum() == 0: + return ( + torch.zeros((0, 3), device=self.device), + torch.zeros((0, 3), dtype=torch.long, device=self.device), + torch.zeros((0), device=self.device), + torch.zeros((0, voxelgrid_colors.shape[-1]), device=self.device) if voxelgrid_colors is not None else None + ) + beta, alpha, gamma_f = self._normalize_weights( + beta, alpha, gamma_f, surf_cubes, weight_scale) + + if voxelgrid_colors is not None: + voxelgrid_colors = torch.sigmoid(voxelgrid_colors) + + case_ids = self._get_case_id(occ_fx8, surf_cubes, resolution) + + surf_edges, idx_map, edge_counts, surf_edges_mask = self._identify_surf_edges( + scalar_field, cube_idx, surf_cubes + ) + + vd, L_dev, vd_gamma, vd_idx_map, vd_color = self._compute_vd( + voxelgrid_vertices, cube_idx[surf_cubes], surf_edges, scalar_field, + case_ids, beta, alpha, gamma_f, idx_map, qef_reg_scale, voxelgrid_colors) + vertices, faces, s_edges, edge_indices, vertices_color = self._triangulate( + scalar_field, surf_edges, vd, vd_gamma, edge_counts, idx_map, + vd_idx_map, surf_edges_mask, training, vd_color) + return vertices, faces, L_dev, vertices_color + + def _compute_reg_loss(self, vd, ue, edge_group_to_vd, vd_num_edges): + """ + Regularizer L_dev as in Equation 8 + """ + dist = torch.norm(ue - torch.index_select(input=vd, index=edge_group_to_vd, dim=0), dim=-1) + mean_l2 = torch.zeros_like(vd[:, 0]) + mean_l2 = (mean_l2).index_add_(0, edge_group_to_vd, dist) / vd_num_edges.squeeze(1).float() + mad = (dist - torch.index_select(input=mean_l2, index=edge_group_to_vd, dim=0)).abs() + return mad + + def _normalize_weights(self, beta, alpha, gamma_f, surf_cubes, weight_scale): + """ + Normalizes the given weights to be non-negative. If input weights are None, it creates and returns a set of weights of ones. + """ + n_cubes = surf_cubes.shape[0] + + if beta is not None: + beta = (torch.tanh(beta) * weight_scale + 1) + else: + beta = torch.ones((n_cubes, 12), dtype=torch.float, device=self.device) + + if alpha is not None: + alpha = (torch.tanh(alpha) * weight_scale + 1) + else: + alpha = torch.ones((n_cubes, 8), dtype=torch.float, device=self.device) + + if gamma_f is not None: + gamma_f = torch.sigmoid(gamma_f) * weight_scale + (1 - weight_scale) / 2 + else: + gamma_f = torch.ones((n_cubes), dtype=torch.float, device=self.device) + + return beta[surf_cubes], alpha[surf_cubes], gamma_f[surf_cubes] + + @torch.no_grad() + def _get_case_id(self, occ_fx8, surf_cubes, res): + """ + Obtains the ID of topology cases based on cell corner occupancy. This function resolves the + ambiguity in the Dual Marching Cubes (DMC) configurations as described in Section 1.3 of the + supplementary material. It should be noted that this function assumes a regular grid. + """ + case_ids = (occ_fx8[surf_cubes] * self.cube_corners_idx.to(self.device).unsqueeze(0)).sum(-1) + + problem_config = self.check_table.to(self.device)[case_ids] + to_check = problem_config[..., 0] == 1 + problem_config = problem_config[to_check] + if not isinstance(res, (list, tuple)): + res = [res, res, res] + + # The 'problematic_configs' only contain configurations for surface cubes. Next, we construct a 3D array, + # 'problem_config_full', to store configurations for all cubes (with default config for non-surface cubes). + # This allows efficient checking on adjacent cubes. + problem_config_full = torch.zeros(list(res) + [5], device=self.device, dtype=torch.long) + vol_idx = torch.nonzero(problem_config_full[..., 0] == 0) # N, 3 + vol_idx_problem = vol_idx[surf_cubes][to_check] + problem_config_full[vol_idx_problem[..., 0], vol_idx_problem[..., 1], vol_idx_problem[..., 2]] = problem_config + vol_idx_problem_adj = vol_idx_problem + problem_config[..., 1:4] + + within_range = ( + vol_idx_problem_adj[..., 0] >= 0) & ( + vol_idx_problem_adj[..., 0] < res[0]) & ( + vol_idx_problem_adj[..., 1] >= 0) & ( + vol_idx_problem_adj[..., 1] < res[1]) & ( + vol_idx_problem_adj[..., 2] >= 0) & ( + vol_idx_problem_adj[..., 2] < res[2]) + + vol_idx_problem = vol_idx_problem[within_range] + vol_idx_problem_adj = vol_idx_problem_adj[within_range] + problem_config = problem_config[within_range] + problem_config_adj = problem_config_full[vol_idx_problem_adj[..., 0], + vol_idx_problem_adj[..., 1], vol_idx_problem_adj[..., 2]] + # If two cubes with cases C16 and C19 share an ambiguous face, both cases are inverted. + to_invert = (problem_config_adj[..., 0] == 1) + idx = torch.arange(case_ids.shape[0], device=self.device)[to_check][within_range][to_invert] + case_ids.index_put_((idx,), problem_config[to_invert][..., -1]) + return case_ids + + @torch.no_grad() + def _identify_surf_edges(self, scalar_field, cube_idx, surf_cubes): + """ + Identifies grid edges that intersect with the underlying surface by checking for opposite signs. As each edge + can be shared by multiple cubes, this function also assigns a unique index to each surface-intersecting edge + and marks the cube edges with this index. + """ + occ_n = scalar_field < 0 + all_edges = cube_idx[surf_cubes][:, self.cube_edges].reshape(-1, 2) + unique_edges, _idx_map, counts = torch.unique(all_edges, dim=0, return_inverse=True, return_counts=True) + + unique_edges = unique_edges.long() + mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1 + + surf_edges_mask = mask_edges[_idx_map] + counts = counts[_idx_map] + + mapping = torch.ones((unique_edges.shape[0]), dtype=torch.long, device=cube_idx.device) * -1 + mapping[mask_edges] = torch.arange(mask_edges.sum(), device=cube_idx.device) + # Shaped as [number of cubes x 12 edges per cube]. This is later used to map a cube edge to the unique index + # for a surface-intersecting edge. Non-surface-intersecting edges are marked with -1. + idx_map = mapping[_idx_map] + surf_edges = unique_edges[mask_edges] + return surf_edges, idx_map, counts, surf_edges_mask + + @torch.no_grad() + def _identify_surf_cubes(self, scalar_field, cube_idx): + """ + Identifies grid cubes that intersect with the underlying surface by checking if the signs at + all corners are not identical. + """ + occ_n = scalar_field < 0 + occ_fx8 = occ_n[cube_idx.reshape(-1)].reshape(-1, 8) + _occ_sum = torch.sum(occ_fx8, -1) + surf_cubes = (_occ_sum > 0) & (_occ_sum < 8) + return surf_cubes, occ_fx8 + + def _linear_interp(self, edges_weight, edges_x): + """ + Computes the location of zero-crossings on 'edges_x' using linear interpolation with 'edges_weight'. + """ + edge_dim = edges_weight.dim() - 2 + assert edges_weight.shape[edge_dim] == 2 + edges_weight = torch.cat([torch.index_select(input=edges_weight, index=torch.tensor(1, device=self.device), dim=edge_dim), - + torch.index_select(input=edges_weight, index=torch.tensor(0, device=self.device), dim=edge_dim)] + , edge_dim) + denominator = edges_weight.sum(edge_dim) + ue = (edges_x * edges_weight).sum(edge_dim) / denominator + return ue + + def _solve_vd_QEF(self, p_bxnx3, norm_bxnx3, c_bx3, qef_reg_scale): + p_bxnx3 = p_bxnx3.reshape(-1, 7, 3) + norm_bxnx3 = norm_bxnx3.reshape(-1, 7, 3) + c_bx3 = c_bx3.reshape(-1, 3) + A = norm_bxnx3 + B = ((p_bxnx3) * norm_bxnx3).sum(-1, keepdims=True) + + A_reg = (torch.eye(3, device=p_bxnx3.device) * qef_reg_scale).unsqueeze(0).repeat(p_bxnx3.shape[0], 1, 1) + B_reg = (qef_reg_scale * c_bx3).unsqueeze(-1) + A = torch.cat([A, A_reg], 1) + B = torch.cat([B, B_reg], 1) + dual_verts = torch.linalg.lstsq(A, B).solution.squeeze(-1) + return dual_verts + + def _compute_vd(self, voxelgrid_vertices, surf_cubes_fx8, surf_edges, scalar_field, + case_ids, beta, alpha, gamma_f, idx_map, qef_reg_scale, voxelgrid_colors): + """ + Computes the location of dual vertices as described in Section 4.2 + """ + alpha_nx12x2 = torch.index_select(input=alpha, index=self.cube_edges, dim=1).reshape(-1, 12, 2) + surf_edges_x = torch.index_select(input=voxelgrid_vertices, index=surf_edges.reshape(-1), dim=0).reshape(-1, 2, 3) + surf_edges_s = torch.index_select(input=scalar_field, index=surf_edges.reshape(-1), dim=0).reshape(-1, 2, 1) + zero_crossing = self._linear_interp(surf_edges_s, surf_edges_x) + + if voxelgrid_colors is not None: + C = voxelgrid_colors.shape[-1] + surf_edges_c = torch.index_select(input=voxelgrid_colors, index=surf_edges.reshape(-1), dim=0).reshape(-1, 2, C) + + idx_map = idx_map.reshape(-1, 12) + num_vd = torch.index_select(input=self.num_vd_table, index=case_ids, dim=0) + edge_group, edge_group_to_vd, edge_group_to_cube, vd_num_edges, vd_gamma = [], [], [], [], [] + + # if color is not None: + # vd_color = [] + + total_num_vd = 0 + vd_idx_map = torch.zeros((case_ids.shape[0], 12), dtype=torch.long, device=self.device, requires_grad=False) + + for num in torch.unique(num_vd): + cur_cubes = (num_vd == num) # consider cubes with the same numbers of vd emitted (for batching) + curr_num_vd = cur_cubes.sum() * num + curr_edge_group = self.dmc_table[case_ids[cur_cubes], :num].reshape(-1, num * 7) + curr_edge_group_to_vd = torch.arange( + curr_num_vd, device=self.device).unsqueeze(-1).repeat(1, 7) + total_num_vd + total_num_vd += curr_num_vd + curr_edge_group_to_cube = torch.arange(idx_map.shape[0], device=self.device)[ + cur_cubes].unsqueeze(-1).repeat(1, num * 7).reshape_as(curr_edge_group) + + curr_mask = (curr_edge_group != -1) + edge_group.append(torch.masked_select(curr_edge_group, curr_mask)) + edge_group_to_vd.append(torch.masked_select(curr_edge_group_to_vd.reshape_as(curr_edge_group), curr_mask)) + edge_group_to_cube.append(torch.masked_select(curr_edge_group_to_cube, curr_mask)) + vd_num_edges.append(curr_mask.reshape(-1, 7).sum(-1, keepdims=True)) + vd_gamma.append(torch.masked_select(gamma_f, cur_cubes).unsqueeze(-1).repeat(1, num).reshape(-1)) + # if color is not None: + # vd_color.append(color[cur_cubes].unsqueeze(1).repeat(1, num, 1).reshape(-1, 3)) + + edge_group = torch.cat(edge_group) + edge_group_to_vd = torch.cat(edge_group_to_vd) + edge_group_to_cube = torch.cat(edge_group_to_cube) + vd_num_edges = torch.cat(vd_num_edges) + vd_gamma = torch.cat(vd_gamma) + # if color is not None: + # vd_color = torch.cat(vd_color) + # else: + # vd_color = None + + vd = torch.zeros((total_num_vd, 3), device=self.device) + beta_sum = torch.zeros((total_num_vd, 1), device=self.device) + + idx_group = torch.gather(input=idx_map.reshape(-1), dim=0, index=edge_group_to_cube * 12 + edge_group) + + x_group = torch.index_select(input=surf_edges_x, index=idx_group.reshape(-1), dim=0).reshape(-1, 2, 3) + s_group = torch.index_select(input=surf_edges_s, index=idx_group.reshape(-1), dim=0).reshape(-1, 2, 1) + + + zero_crossing_group = torch.index_select( + input=zero_crossing, index=idx_group.reshape(-1), dim=0).reshape(-1, 3) + + alpha_group = torch.index_select(input=alpha_nx12x2.reshape(-1, 2), dim=0, + index=edge_group_to_cube * 12 + edge_group).reshape(-1, 2, 1) + ue_group = self._linear_interp(s_group * alpha_group, x_group) + + beta_group = torch.gather(input=beta.reshape(-1), dim=0, + index=edge_group_to_cube * 12 + edge_group).reshape(-1, 1) + beta_sum = beta_sum.index_add_(0, index=edge_group_to_vd, source=beta_group) + vd = vd.index_add_(0, index=edge_group_to_vd, source=ue_group * beta_group) / beta_sum + + ''' + interpolate colors use the same method as dual vertices + ''' + if voxelgrid_colors is not None: + vd_color = torch.zeros((total_num_vd, C), device=self.device) + c_group = torch.index_select(input=surf_edges_c, index=idx_group.reshape(-1), dim=0).reshape(-1, 2, C) + uc_group = self._linear_interp(s_group * alpha_group, c_group) + vd_color = vd_color.index_add_(0, index=edge_group_to_vd, source=uc_group * beta_group) / beta_sum + else: + vd_color = None + + L_dev = self._compute_reg_loss(vd, zero_crossing_group, edge_group_to_vd, vd_num_edges) + + v_idx = torch.arange(vd.shape[0], device=self.device) # + total_num_vd + + vd_idx_map = (vd_idx_map.reshape(-1)).scatter(dim=0, index=edge_group_to_cube * + 12 + edge_group, src=v_idx[edge_group_to_vd]) + + return vd, L_dev, vd_gamma, vd_idx_map, vd_color + + def _triangulate(self, scalar_field, surf_edges, vd, vd_gamma, edge_counts, idx_map, vd_idx_map, surf_edges_mask, training, vd_color): + """ + Connects four neighboring dual vertices to form a quadrilateral. The quadrilaterals are then split into + triangles based on the gamma parameter, as described in Section 4.3. + """ + with torch.no_grad(): + group_mask = (edge_counts == 4) & surf_edges_mask # surface edges shared by 4 cubes. + group = idx_map.reshape(-1)[group_mask] + vd_idx = vd_idx_map[group_mask] + edge_indices, indices = torch.sort(group, stable=True) + quad_vd_idx = vd_idx[indices].reshape(-1, 4) + + # Ensure all face directions point towards the positive SDF to maintain consistent winding. + s_edges = scalar_field[surf_edges[edge_indices.reshape(-1, 4)[:, 0]].reshape(-1)].reshape(-1, 2) + flip_mask = s_edges[:, 0] > 0 + quad_vd_idx = torch.cat((quad_vd_idx[flip_mask][:, [0, 1, 3, 2]], + quad_vd_idx[~flip_mask][:, [2, 3, 1, 0]])) + + quad_gamma = torch.index_select(input=vd_gamma, index=quad_vd_idx.reshape(-1), dim=0).reshape(-1, 4) + gamma_02 = quad_gamma[:, 0] * quad_gamma[:, 2] + gamma_13 = quad_gamma[:, 1] * quad_gamma[:, 3] + if not training: + mask = (gamma_02 > gamma_13) + faces = torch.zeros((quad_gamma.shape[0], 6), dtype=torch.long, device=quad_vd_idx.device) + faces[mask] = quad_vd_idx[mask][:, self.quad_split_1] + faces[~mask] = quad_vd_idx[~mask][:, self.quad_split_2] + faces = faces.reshape(-1, 3) + else: + vd_quad = torch.index_select(input=vd, index=quad_vd_idx.reshape(-1), dim=0).reshape(-1, 4, 3) + vd_02 = (vd_quad[:, 0] + vd_quad[:, 2]) / 2 + vd_13 = (vd_quad[:, 1] + vd_quad[:, 3]) / 2 + weight_sum = (gamma_02 + gamma_13) + 1e-8 + vd_center = (vd_02 * gamma_02.unsqueeze(-1) + vd_13 * gamma_13.unsqueeze(-1)) / weight_sum.unsqueeze(-1) + + if vd_color is not None: + color_quad = torch.index_select(input=vd_color, index=quad_vd_idx.reshape(-1), dim=0).reshape(-1, 4, vd_color.shape[-1]) + color_02 = (color_quad[:, 0] + color_quad[:, 2]) / 2 + color_13 = (color_quad[:, 1] + color_quad[:, 3]) / 2 + color_center = (color_02 * gamma_02.unsqueeze(-1) + color_13 * gamma_13.unsqueeze(-1)) / weight_sum.unsqueeze(-1) + vd_color = torch.cat([vd_color, color_center]) + + + vd_center_idx = torch.arange(vd_center.shape[0], device=self.device) + vd.shape[0] + vd = torch.cat([vd, vd_center]) + faces = quad_vd_idx[:, self.quad_split_train].reshape(-1, 4, 2) + faces = torch.cat([faces, vd_center_idx.reshape(-1, 1, 1).repeat(1, 4, 1)], -1).reshape(-1, 3) + return vd, faces, s_edges, edge_indices, vd_color \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/ablate_L_dev.jpg b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/ablate_L_dev.jpg new file mode 100644 index 0000000000000000000000000000000000000000..461bd1ce2a73d6b6e0ee61648af7746c2254bc53 Binary files /dev/null and b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/ablate_L_dev.jpg differ diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_final.png b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_final.png new file mode 100644 index 0000000000000000000000000000000000000000..07a18ad0708370f6b603943b37ee130d2f25d383 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_final.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d030fee195d332f63ef80805486ed0b4074b1afc34efcba621e385aca9ae9135 +size 55999 diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_init.png b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_init.png new file mode 100644 index 0000000000000000000000000000000000000000..aadc74a6da402df263d4b35396d033284e22a630 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/block_init.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:699ba21d95cce9d1504d31fca3694ba339f21703ac0bc3240c87df6ac2d2db3e +size 198533 diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/teaser_top.png b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/teaser_top.png new file mode 100644 index 0000000000000000000000000000000000000000..5ae12891d528010988e427e935e7a0620cd1b66a --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/images/teaser_top.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71c27efaeeb7fc3357440607b34805495fc34acf39be00bb70dd315b5b25a71d +size 3562986 diff --git a/third_party/TRELLIS/trellis/representations/mesh/flexicubes/tables.py b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/tables.py new file mode 100644 index 0000000000000000000000000000000000000000..5873e7727b5595a1e4fbc3bd10ae5be8f3d06cca --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/flexicubes/tables.py @@ -0,0 +1,791 @@ +# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. +dmc_table = [ +[[-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 8, 9, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 7, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 5, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 8, 9, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 7, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 7, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 5, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 8, 9, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 7, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 7, 8, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 8, 11, -1, -1, -1], [4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 5, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 8, 9, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 7, 8, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 5, 7, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 8, 9, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 7, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 9, 10, -1, -1, -1], [4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 7, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 5, 9, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 5, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 5, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 8, 9, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 7, 9, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 7, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 8, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[8, 9, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [1, 3, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 7, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 9, 10, 11, -1, -1], [4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [1, 3, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 8, 10, 11, -1, -1], [4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 5, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 8, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 8, 9, -1, -1, -1], [1, 3, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 7, 9, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 7, 8, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 8, 9, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 6, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 6, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 6, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 5, 9, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 5, 8, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 6, 8, 9, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 6, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 6, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 5, 6, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 6, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 6, 7, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 6, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 6, 7, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 6, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 4, 6, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 6, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [2, 3, 6, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 6, 7, 8, -1, -1], [4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, -1, -1, -1], [2, 3, 6, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 5, 6, 7, 8], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 6, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 6, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 5, 6, 8], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 5, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 10, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 9, 10, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 8, 9, 10, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 8, 11, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 6, 11, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 9, 10, -1, -1, -1], [4, 6, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 6, 9, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 5, 9, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1]], +[[0, 2, 4, 5, 10, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 5, 8, 10, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 6, 8, 9, 11, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 6, 9, 11, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 6, 8, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 6, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 6, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 6, 7, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 6, 7, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[6, 7, 8, 9, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 6, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 6, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 6, 8, 9, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [1, 3, 6, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 6, 7, 8, 10, -1], [4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 5, 6, 7, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 6, 7, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 5, 6, 8, 9, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 6, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 8, 9, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 7, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 7, 8, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 7, 9, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 6, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 6, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 6, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[6, 7, 8, 9, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 6, 7, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 6, 7, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 6, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 11, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 8, 11, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 8, 9, 11, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 7, 11, -1, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 7, 8, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [5, 6, 10, -1, -1, -1, -1]], +[[1, 2, 4, 7, 9, 11, -1], [5, 6, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 9, 10, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 8, 11, -1, -1, -1], [4, 6, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 6, 10, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 6, 8, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[6, 7, 8, 9, 10, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 6, 7, 9, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 6, 7, 8, 10, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 6, 7, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 5, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 5, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 6, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 6, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [1, 2, 5, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 7, -1, -1, -1], [1, 2, 5, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 6, 9, -1, -1], [4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 5, 6, 7, 9], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 6, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 4, 6, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 6, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 6, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 6, 7, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 6, 7, 9], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 6, 7, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 6, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 5, 6, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 6, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 6, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 6, 8, 9, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [1, 3, 5, 6, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, 6, 7, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 6, 9, 11, -1], [4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 6, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 6, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 6, 8, 9, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 6, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 6, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 6, 7, 8, 9, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 6, 7, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[6, 7, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [5, 7, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [5, 7, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 8, 9, -1, -1, -1], [5, 7, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 8, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 5, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [4, 5, 8, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 5, 9, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [4, 7, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 7, 10, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 7, 8, 10, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[8, 9, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 9, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 8, 10, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 10, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 7, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 5, 7, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 5, 7, 8, 9, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 5, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 5, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [2, 3, 4, 5, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 5, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 7, 9, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 7, 8, 9, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 4, 7, 10], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 8, 9, 10, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 9, 10, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 8, 10, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 10, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 5, 7, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 5, 7, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 5, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 5, 7, 8, 9, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 5, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 4, 5, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 5, 8, 9, 11], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 4, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [1, 2, 4, 7, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 4, 7, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 4, 7, 8, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 2, 8, 9, 11, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 2, 3, 9, 11, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 2, 8, 11, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[2, 3, 11, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 5, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 5, 7, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 5, 7, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[5, 7, 8, 9, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 5, 8, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 5, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 5, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 5, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 4, 7, 9, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 4, 7, 8, 9, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 4, 7, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[4, 7, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[1, 3, 8, 9, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 1, 9, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[0, 3, 8, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]], +[[-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1]] +] +num_vd_table = [0, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 2, 2, +2, 1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 2, 2, 1, 2, 3, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, +1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 1, 2, 3, 2, 2, 1, 1, 1, 1, +1, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 2, 3, 2, 2, 2, 2, 2, 1, 3, 4, 2, +2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 2, 2, +3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 2, 2, 3, 2, 3, 2, 4, 2, 2, 2, 2, 1, 2, 1, 2, 1, 1, +2, 1, 1, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, +1, 2, 1, 1, 1, 2, 2, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, +1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, +1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0] +check_table = [ +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 1, 0, 0, 194], +[1, -1, 0, 0, 193], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 1, 0, 164], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, -1, 0, 161], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, 1, 152], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, 1, 145], +[1, 0, 0, 1, 144], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, -1, 137], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 1, 0, 133], +[1, 0, 1, 0, 132], +[1, 1, 0, 0, 131], +[1, 1, 0, 0, 130], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, 1, 100], +[0, 0, 0, 0, 0], +[1, 0, 0, 1, 98], +[0, 0, 0, 0, 0], +[1, 0, 0, 1, 96], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 1, 0, 88], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, -1, 0, 82], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 1, 0, 74], +[0, 0, 0, 0, 0], +[1, 0, 1, 0, 72], +[0, 0, 0, 0, 0], +[1, 0, 0, -1, 70], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, -1, 0, 0, 67], +[0, 0, 0, 0, 0], +[1, -1, 0, 0, 65], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 1, 0, 0, 56], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, -1, 0, 0, 52], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 1, 0, 0, 44], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 1, 0, 0, 40], +[0, 0, 0, 0, 0], +[1, 0, 0, -1, 38], +[1, 0, -1, 0, 37], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, -1, 0, 33], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, -1, 0, 0, 28], +[0, 0, 0, 0, 0], +[1, 0, -1, 0, 26], +[1, 0, 0, -1, 25], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, -1, 0, 0, 20], +[0, 0, 0, 0, 0], +[1, 0, -1, 0, 18], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, -1, 9], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[1, 0, 0, -1, 6], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0], +[0, 0, 0, 0, 0] +] +tet_table = [ +[-1, -1, -1, -1, -1, -1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[4, 4, 4, 4, 4, 4], +[0, 0, 0, 0, 0, 0], +[4, 0, 0, 4, 4, -1], +[1, 1, 1, 1, 1, 1], +[4, 4, 4, 4, 4, 4], +[0, 4, 0, 4, 4, -1], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[5, 5, 5, 5, 5, 5], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, -1, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, -1, 2, 4, 4, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, 4, 4, 2], +[1, 1, 1, 1, 1, 1], +[2, 4, 2, 4, 4, 2], +[0, 4, 0, 4, 4, 0], +[2, 0, 2, 0, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, 5, 2, 5, 5, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, 0, 0, 2], +[1, 1, 1, 1, 1, 1], +[1, 1, 1, 1, 1, 1], +[0, 1, 1, -1, 0, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[4, 1, 1, 4, 4, 1], +[0, 1, 1, 0, 0, 1], +[4, 0, 0, 4, 4, 0], +[2, 2, 2, 2, 2, 2], +[-1, 1, 1, 4, 4, 1], +[0, 1, 1, 4, 4, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[5, 1, 1, 5, 5, 1], +[0, 1, 1, 0, 0, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[8, 8, 8, 8, 8, 8], +[1, 1, 1, 4, 4, 1], +[0, 0, 0, 0, 0, 0], +[4, 0, 0, 4, 4, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 4, 4, 1], +[0, 4, 0, 4, 4, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 5, 5, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[5, 5, 5, 5, 5, 5], +[6, 6, 6, 6, 6, 6], +[6, -1, 0, 6, 0, 6], +[6, 0, 0, 6, 0, 6], +[6, 1, 1, 6, 1, 6], +[4, 4, 4, 4, 4, 4], +[0, 0, 0, 0, 0, 0], +[4, 0, 0, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[6, 4, -1, 6, 4, 6], +[6, 4, 0, 6, 4, 6], +[6, 0, 0, 6, 0, 6], +[6, 1, 1, 6, 1, 6], +[5, 5, 5, 5, 5, 5], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, 2, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[2, 4, 2, 2, 4, 2], +[0, 4, 0, 4, 4, 0], +[2, 0, 2, 2, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[6, 1, 1, 6, -1, 6], +[6, 1, 1, 6, 0, 6], +[6, 0, 0, 6, 0, 6], +[6, 2, 2, 6, 2, 6], +[4, 1, 1, 4, 4, 1], +[0, 1, 1, 0, 0, 1], +[4, 0, 0, 4, 4, 4], +[2, 2, 2, 2, 2, 2], +[6, 1, 1, 6, 4, 6], +[6, 1, 1, 6, 4, 6], +[6, 0, 0, 6, 0, 6], +[6, 2, 2, 6, 2, 6], +[5, 1, 1, 5, 5, 1], +[0, 1, 1, 0, 0, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[6, 6, 6, 6, 6, 6], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 4, 1], +[0, 4, 0, 4, 4, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 5, 0, 5, 0, 5], +[5, 5, 5, 5, 5, 5], +[5, 5, 5, 5, 5, 5], +[0, 5, 0, 5, 0, 5], +[-1, 5, 0, 5, 0, 5], +[1, 5, 1, 5, 1, 5], +[4, 5, -1, 5, 4, 5], +[0, 5, 0, 5, 0, 5], +[4, 5, 0, 5, 4, 5], +[1, 5, 1, 5, 1, 5], +[4, 4, 4, 4, 4, 4], +[0, 4, 0, 4, 4, 4], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[6, 6, 6, 6, 6, 6], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[2, 5, 2, 5, -1, 5], +[0, 5, 0, 5, 0, 5], +[2, 5, 2, 5, 0, 5], +[1, 5, 1, 5, 1, 5], +[2, 5, 2, 5, 4, 5], +[0, 5, 0, 5, 0, 5], +[2, 5, 2, 5, 4, 5], +[1, 5, 1, 5, 1, 5], +[2, 4, 2, 4, 4, 2], +[0, 4, 0, 4, 4, 4], +[2, 0, 2, 0, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, 6, 2, 6, 6, 2], +[0, 0, 0, 0, 0, 0], +[2, 0, 2, 0, 0, 2], +[1, 1, 1, 1, 1, 1], +[1, 1, 1, 1, 1, 1], +[0, 1, 1, 1, 0, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[4, 1, 1, 1, 4, 1], +[0, 1, 1, 1, 0, 1], +[4, 0, 0, 4, 4, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[5, 5, 5, 5, 5, 5], +[1, 1, 1, 1, 4, 1], +[0, 0, 0, 0, 0, 0], +[4, 0, 0, 4, 4, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[6, 0, 0, 6, 0, 6], +[0, 0, 0, 0, 0, 0], +[6, 6, 6, 6, 6, 6], +[5, 5, 5, 5, 5, 5], +[5, 5, 0, 5, 0, 5], +[5, 5, 0, 5, 0, 5], +[5, 5, 1, 5, 1, 5], +[4, 4, 4, 4, 4, 4], +[0, 0, 0, 0, 0, 0], +[4, 4, 0, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[4, 4, 4, 4, 4, 4], +[4, 4, 0, 4, 4, 4], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[8, 8, 8, 8, 8, 8], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 0, 2], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[4, 1, 1, 4, 4, 1], +[2, 2, 2, 2, 2, 2], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[1, 1, 1, 1, 1, 1], +[1, 1, 1, 1, 1, 1], +[1, 1, 1, 1, 0, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[2, 4, 2, 4, 4, 2], +[1, 1, 1, 1, 1, 1], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[2, 2, 2, 2, 2, 2], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[5, 5, 5, 5, 5, 5], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[4, 4, 4, 4, 4, 4], +[1, 1, 1, 1, 1, 1], +[0, 0, 0, 0, 0, 0], +[0, 0, 0, 0, 0, 0], +[12, 12, 12, 12, 12, 12] +] diff --git a/third_party/TRELLIS/trellis/representations/mesh/utils_cube.py b/third_party/TRELLIS/trellis/representations/mesh/utils_cube.py new file mode 100644 index 0000000000000000000000000000000000000000..23913c97bb2d57dfa0384667c69f9860ea0a4155 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/mesh/utils_cube.py @@ -0,0 +1,61 @@ +import torch +cube_corners = torch.tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [ + 1, 0, 1], [0, 1, 1], [1, 1, 1]], dtype=torch.int) +cube_neighbor = torch.tensor([[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]) +cube_edges = torch.tensor([0, 1, 1, 5, 4, 5, 0, 4, 2, 3, 3, 7, 6, 7, 2, 6, + 2, 0, 3, 1, 7, 5, 6, 4], dtype=torch.long, requires_grad=False) + +def construct_dense_grid(res, device='cuda'): + '''construct a dense grid based on resolution''' + res_v = res + 1 + vertsid = torch.arange(res_v ** 3, device=device) + coordsid = vertsid.reshape(res_v, res_v, res_v)[:res, :res, :res].flatten() + cube_corners_bias = (cube_corners[:, 0] * res_v + cube_corners[:, 1]) * res_v + cube_corners[:, 2] + cube_fx8 = (coordsid.unsqueeze(1) + cube_corners_bias.unsqueeze(0).to(device)) + verts = torch.stack([vertsid // (res_v ** 2), (vertsid // res_v) % res_v, vertsid % res_v], dim=1) + return verts, cube_fx8 + + +def construct_voxel_grid(coords): + verts = (cube_corners.unsqueeze(0).to(coords) + coords.unsqueeze(1)).reshape(-1, 3) + verts_unique, inverse_indices = torch.unique(verts, dim=0, return_inverse=True) + cubes = inverse_indices.reshape(-1, 8) + return verts_unique, cubes + + +def cubes_to_verts(num_verts, cubes, value, reduce='mean'): + """ + Args: + cubes [Vx8] verts index for each cube + value [Vx8xM] value to be scattered + Operation: + reduced[cubes[i][j]][k] += value[i][k] + """ + M = value.shape[2] # number of channels + reduced = torch.zeros(num_verts, M, device=cubes.device) + return torch.scatter_reduce(reduced, 0, + cubes.unsqueeze(-1).expand(-1, -1, M).flatten(0, 1), + value.flatten(0, 1), reduce=reduce, include_self=False) + +def sparse_cube2verts(coords, feats, training=True): + new_coords, cubes = construct_voxel_grid(coords) + new_feats = cubes_to_verts(new_coords.shape[0], cubes, feats) + if training: + con_loss = torch.mean((feats - new_feats[cubes]) ** 2) + else: + con_loss = 0.0 + return new_coords, new_feats, con_loss + + +def get_dense_attrs(coords : torch.Tensor, feats : torch.Tensor, res : int, sdf_init=True): + F = feats.shape[-1] + dense_attrs = torch.zeros([res] * 3 + [F], device=feats.device) + if sdf_init: + dense_attrs[..., 0] = 1 # initial outside sdf value + dense_attrs[coords[:, 0], coords[:, 1], coords[:, 2], :] = feats + return dense_attrs.reshape(-1, F) + + +def get_defomed_verts(v_pos : torch.Tensor, deform : torch.Tensor, res): + return v_pos / res - 0.5 + (1 - 1e-8) / (res * 2) * torch.tanh(deform) + \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/octree/__init__.py b/third_party/TRELLIS/trellis/representations/octree/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..f66a39a5a7498e2e99fe9d94d663796b3bc157b5 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/octree/__init__.py @@ -0,0 +1 @@ +from .octree_dfs import DfsOctree \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/octree/octree_dfs.py b/third_party/TRELLIS/trellis/representations/octree/octree_dfs.py new file mode 100755 index 0000000000000000000000000000000000000000..9d1f7898f30414f304953cfb2d51d00511ec8325 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/octree/octree_dfs.py @@ -0,0 +1,362 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +DEFAULT_TRIVEC_CONFIG = { + 'dim': 8, + 'rank': 8, +} + +DEFAULT_VOXEL_CONFIG = { + 'solid': False, +} + +DEFAULT_DECOPOLY_CONFIG = { + 'degree': 8, + 'rank': 16, +} + + +class DfsOctree: + """ + Sparse Voxel Octree (SVO) implementation for PyTorch. + Using Depth-First Search (DFS) order to store the octree. + DFS order suits rendering and ray tracing. + + The structure and data are separatedly stored. + Structure is stored as a continuous array, each element is a 3*32 bits descriptor. + |-----------------------------------------| + | 0:3 bits | 4:31 bits | + | leaf num | unused | + |-----------------------------------------| + | 0:31 bits | + | child ptr | + |-----------------------------------------| + | 0:31 bits | + | data ptr | + |-----------------------------------------| + Each element represents a non-leaf node in the octree. + The valid mask is used to indicate whether the children are valid. + The leaf mask is used to indicate whether the children are leaf nodes. + The child ptr is used to point to the first non-leaf child. Non-leaf children descriptors are stored continuously from the child ptr. + The data ptr is used to point to the data of leaf children. Leaf children data are stored continuously from the data ptr. + + There are also auxiliary arrays to store the additional structural information to facilitate parallel processing. + - Position: the position of the octree nodes. + - Depth: the depth of the octree nodes. + + Args: + depth (int): the depth of the octree. + """ + + def __init__( + self, + depth, + aabb=[0,0,0,1,1,1], + sh_degree=2, + primitive='voxel', + primitive_config={}, + device='cuda', + ): + self.max_depth = depth + self.aabb = torch.tensor(aabb, dtype=torch.float32, device=device) + self.device = device + self.sh_degree = sh_degree + self.active_sh_degree = sh_degree + self.primitive = primitive + self.primitive_config = primitive_config + + self.structure = torch.tensor([[8, 1, 0]], dtype=torch.int32, device=self.device) + self.position = torch.zeros((8, 3), dtype=torch.float32, device=self.device) + self.depth = torch.zeros((8, 1), dtype=torch.uint8, device=self.device) + self.position[:, 0] = torch.tensor([0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75], device=self.device) + self.position[:, 1] = torch.tensor([0.25, 0.25, 0.75, 0.75, 0.25, 0.25, 0.75, 0.75], device=self.device) + self.position[:, 2] = torch.tensor([0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75], device=self.device) + self.depth[:, 0] = 1 + + self.data = ['position', 'depth'] + self.param_names = [] + + if primitive == 'voxel': + self.features_dc = torch.zeros((8, 1, 3), dtype=torch.float32, device=self.device) + self.features_ac = torch.zeros((8, (sh_degree+1)**2-1, 3), dtype=torch.float32, device=self.device) + self.data += ['features_dc', 'features_ac'] + self.param_names += ['features_dc', 'features_ac'] + if not primitive_config.get('solid', False): + self.density = torch.zeros((8, 1), dtype=torch.float32, device=self.device) + self.data.append('density') + self.param_names.append('density') + elif primitive == 'gaussian': + self.features_dc = torch.zeros((8, 1, 3), dtype=torch.float32, device=self.device) + self.features_ac = torch.zeros((8, (sh_degree+1)**2-1, 3), dtype=torch.float32, device=self.device) + self.opacity = torch.zeros((8, 1), dtype=torch.float32, device=self.device) + self.data += ['features_dc', 'features_ac', 'opacity'] + self.param_names += ['features_dc', 'features_ac', 'opacity'] + elif primitive == 'trivec': + self.trivec = torch.zeros((8, primitive_config['rank'], 3, primitive_config['dim']), dtype=torch.float32, device=self.device) + self.density = torch.zeros((8, primitive_config['rank']), dtype=torch.float32, device=self.device) + self.features_dc = torch.zeros((8, primitive_config['rank'], 1, 3), dtype=torch.float32, device=self.device) + self.features_ac = torch.zeros((8, primitive_config['rank'], (sh_degree+1)**2-1, 3), dtype=torch.float32, device=self.device) + self.density_shift = 0 + self.data += ['trivec', 'density', 'features_dc', 'features_ac'] + self.param_names += ['trivec', 'density', 'features_dc', 'features_ac'] + elif primitive == 'decoupoly': + self.decoupoly_V = torch.zeros((8, primitive_config['rank'], 3), dtype=torch.float32, device=self.device) + self.decoupoly_g = torch.zeros((8, primitive_config['rank'], primitive_config['degree']), dtype=torch.float32, device=self.device) + self.density = torch.zeros((8, primitive_config['rank']), dtype=torch.float32, device=self.device) + self.features_dc = torch.zeros((8, primitive_config['rank'], 1, 3), dtype=torch.float32, device=self.device) + self.features_ac = torch.zeros((8, primitive_config['rank'], (sh_degree+1)**2-1, 3), dtype=torch.float32, device=self.device) + self.density_shift = 0 + self.data += ['decoupoly_V', 'decoupoly_g', 'density', 'features_dc', 'features_ac'] + self.param_names += ['decoupoly_V', 'decoupoly_g', 'density', 'features_dc', 'features_ac'] + + self.setup_functions() + + def setup_functions(self): + self.density_activation = (lambda x: torch.exp(x - 2)) if self.primitive != 'trivec' else (lambda x: x) + self.opacity_activation = lambda x: torch.sigmoid(x - 6) + self.inverse_opacity_activation = lambda x: torch.log(x / (1 - x)) + 6 + self.color_activation = lambda x: torch.sigmoid(x) + + @property + def num_non_leaf_nodes(self): + return self.structure.shape[0] + + @property + def num_leaf_nodes(self): + return self.depth.shape[0] + + @property + def cur_depth(self): + return self.depth.max().item() + + @property + def occupancy(self): + return self.num_leaf_nodes / 8 ** self.cur_depth + + @property + def get_xyz(self): + return self.position + + @property + def get_depth(self): + return self.depth + + @property + def get_density(self): + if self.primitive == 'voxel' and self.voxel_config['solid']: + return torch.full((self.position.shape[0], 1), 1000, dtype=torch.float32, device=self.device) + return self.density_activation(self.density) + + @property + def get_opacity(self): + return self.opacity_activation(self.density) + + @property + def get_trivec(self): + return self.trivec + + @property + def get_decoupoly(self): + return F.normalize(self.decoupoly_V, dim=-1), self.decoupoly_g + + @property + def get_color(self): + return self.color_activation(self.colors) + + @property + def get_features(self): + if self.sh_degree == 0: + return self.features_dc + return torch.cat([self.features_dc, self.features_ac], dim=-2) + + def state_dict(self): + ret = {'structure': self.structure, 'position': self.position, 'depth': self.depth, 'sh_degree': self.sh_degree, 'active_sh_degree': self.active_sh_degree, 'trivec_config': self.trivec_config, 'voxel_config': self.voxel_config, 'primitive': self.primitive} + if hasattr(self, 'density_shift'): + ret['density_shift'] = self.density_shift + for data in set(self.data + self.param_names): + if not isinstance(getattr(self, data), nn.Module): + ret[data] = getattr(self, data) + else: + ret[data] = getattr(self, data).state_dict() + return ret + + def load_state_dict(self, state_dict): + keys = list(set(self.data + self.param_names + list(state_dict.keys()) + ['structure', 'position', 'depth'])) + for key in keys: + if key not in state_dict: + print(f"Warning: key {key} not found in the state_dict.") + continue + try: + if not isinstance(getattr(self, key), nn.Module): + setattr(self, key, state_dict[key]) + else: + getattr(self, key).load_state_dict(state_dict[key]) + except Exception as e: + print(e) + raise ValueError(f"Error loading key {key}.") + + def gather_from_leaf_children(self, data): + """ + Gather the data from the leaf children. + + Args: + data (torch.Tensor): the data to gather. The first dimension should be the number of leaf nodes. + """ + leaf_cnt = self.structure[:, 0] + leaf_cnt_masks = [leaf_cnt == i for i in range(1, 9)] + ret = torch.zeros((self.num_non_leaf_nodes,), dtype=data.dtype, device=self.device) + for i in range(8): + if leaf_cnt_masks[i].sum() == 0: + continue + start = self.structure[leaf_cnt_masks[i], 2] + for j in range(i+1): + ret[leaf_cnt_masks[i]] += data[start + j] + return ret + + def gather_from_non_leaf_children(self, data): + """ + Gather the data from the non-leaf children. + + Args: + data (torch.Tensor): the data to gather. The first dimension should be the number of leaf nodes. + """ + non_leaf_cnt = 8 - self.structure[:, 0] + non_leaf_cnt_masks = [non_leaf_cnt == i for i in range(1, 9)] + ret = torch.zeros_like(data, device=self.device) + for i in range(8): + if non_leaf_cnt_masks[i].sum() == 0: + continue + start = self.structure[non_leaf_cnt_masks[i], 1] + for j in range(i+1): + ret[non_leaf_cnt_masks[i]] += data[start + j] + return ret + + def structure_control(self, mask): + """ + Control the structure of the octree. + + Args: + mask (torch.Tensor): the mask to control the structure. 1 for subdivide, -1 for merge, 0 for keep. + """ + # Dont subdivide when the depth is the maximum. + mask[self.depth.squeeze() == self.max_depth] = torch.clamp_max(mask[self.depth.squeeze() == self.max_depth], 0) + # Dont merge when the depth is the minimum. + mask[self.depth.squeeze() == 1] = torch.clamp_min(mask[self.depth.squeeze() == 1], 0) + + # Gather control mask + structre_ctrl = self.gather_from_leaf_children(mask) + structre_ctrl[structre_ctrl==-8] = -1 + + new_leaf_num = self.structure[:, 0].clone() + # Modify the leaf num. + structre_valid = structre_ctrl >= 0 + new_leaf_num[structre_valid] -= structre_ctrl[structre_valid] # Add the new nodes. + structre_delete = structre_ctrl < 0 + merged_nodes = self.gather_from_non_leaf_children(structre_delete.int()) + new_leaf_num += merged_nodes # Delete the merged nodes. + + # Update the structure array to allocate new nodes. + mem_offset = torch.zeros((self.num_non_leaf_nodes + 1,), dtype=torch.int32, device=self.device) + mem_offset.index_add_(0, self.structure[structre_valid, 1], structre_ctrl[structre_valid]) # Add the new nodes. + mem_offset[:-1] -= structre_delete.int() # Delete the merged nodes. + new_structre_idx = torch.arange(0, self.num_non_leaf_nodes + 1, dtype=torch.int32, device=self.device) + mem_offset.cumsum(0) + new_structure_length = new_structre_idx[-1].item() + new_structre_idx = new_structre_idx[:-1] + new_structure = torch.empty((new_structure_length, 3), dtype=torch.int32, device=self.device) + new_structure[new_structre_idx[structre_valid], 0] = new_leaf_num[structre_valid] + + # Initialize the new nodes. + new_node_mask = torch.ones((new_structure_length,), dtype=torch.bool, device=self.device) + new_node_mask[new_structre_idx[structre_valid]] = False + new_structure[new_node_mask, 0] = 8 # Initialize to all leaf nodes. + new_node_num = new_node_mask.sum().item() + + # Rebuild child ptr. + non_leaf_cnt = 8 - new_structure[:, 0] + new_child_ptr = torch.cat([torch.zeros((1,), dtype=torch.int32, device=self.device), non_leaf_cnt.cumsum(0)[:-1]]) + new_structure[:, 1] = new_child_ptr + 1 + + # Rebuild data ptr with old data. + leaf_cnt = torch.zeros((new_structure_length,), dtype=torch.int32, device=self.device) + leaf_cnt.index_add_(0, new_structre_idx, self.structure[:, 0]) + old_data_ptr = torch.cat([torch.zeros((1,), dtype=torch.int32, device=self.device), leaf_cnt.cumsum(0)[:-1]]) + + # Update the data array + subdivide_mask = mask == 1 + merge_mask = mask == -1 + data_valid = ~(subdivide_mask | merge_mask) + mem_offset = torch.zeros((self.num_leaf_nodes + 1,), dtype=torch.int32, device=self.device) + mem_offset.index_add_(0, old_data_ptr[new_node_mask], torch.full((new_node_num,), 8, dtype=torch.int32, device=self.device)) # Add data array for new nodes + mem_offset[:-1] -= subdivide_mask.int() # Delete data elements for subdivide nodes + mem_offset[:-1] -= merge_mask.int() # Delete data elements for merge nodes + mem_offset.index_add_(0, self.structure[structre_valid, 2], merged_nodes[structre_valid]) # Add data elements for merge nodes + new_data_idx = torch.arange(0, self.num_leaf_nodes + 1, dtype=torch.int32, device=self.device) + mem_offset.cumsum(0) + new_data_length = new_data_idx[-1].item() + new_data_idx = new_data_idx[:-1] + new_data = {data: torch.empty((new_data_length,) + getattr(self, data).shape[1:], dtype=getattr(self, data).dtype, device=self.device) for data in self.data} + for data in self.data: + new_data[data][new_data_idx[data_valid]] = getattr(self, data)[data_valid] + + # Rebuild data ptr + leaf_cnt = new_structure[:, 0] + new_data_ptr = torch.cat([torch.zeros((1,), dtype=torch.int32, device=self.device), leaf_cnt.cumsum(0)[:-1]]) + new_structure[:, 2] = new_data_ptr + + # Initialize the new data array + ## For subdivide nodes + if subdivide_mask.sum() > 0: + subdivide_data_ptr = new_structure[new_node_mask, 2] + for data in self.data: + for i in range(8): + if data == 'position': + offset = torch.tensor([i // 4, (i // 2) % 2, i % 2], dtype=torch.float32, device=self.device) - 0.5 + scale = 2 ** (-1.0 - self.depth[subdivide_mask]) + new_data['position'][subdivide_data_ptr + i] = self.position[subdivide_mask] + offset * scale + elif data == 'depth': + new_data['depth'][subdivide_data_ptr + i] = self.depth[subdivide_mask] + 1 + elif data == 'opacity': + new_data['opacity'][subdivide_data_ptr + i] = self.inverse_opacity_activation(torch.sqrt(self.opacity_activation(self.opacity[subdivide_mask]))) + elif data == 'trivec': + offset = torch.tensor([i // 4, (i // 2) % 2, i % 2], dtype=torch.float32, device=self.device) * 0.5 + coord = (torch.linspace(0, 0.5, self.trivec.shape[-1], dtype=torch.float32, device=self.device)[None] + offset[:, None]).reshape(1, 3, self.trivec.shape[-1], 1) + axis = torch.linspace(0, 1, 3, dtype=torch.float32, device=self.device).reshape(1, 3, 1, 1).repeat(1, 1, self.trivec.shape[-1], 1) + coord = torch.stack([coord, axis], dim=3).reshape(1, 3, self.trivec.shape[-1], 2).expand(self.trivec[subdivide_mask].shape[0], -1, -1, -1) * 2 - 1 + new_data['trivec'][subdivide_data_ptr + i] = F.grid_sample(self.trivec[subdivide_mask], coord, align_corners=True) + else: + new_data[data][subdivide_data_ptr + i] = getattr(self, data)[subdivide_mask] + ## For merge nodes + if merge_mask.sum() > 0: + merge_data_ptr = torch.empty((merged_nodes.sum().item(),), dtype=torch.int32, device=self.device) + merge_nodes_cumsum = torch.cat([torch.zeros((1,), dtype=torch.int32, device=self.device), merged_nodes.cumsum(0)[:-1]]) + for i in range(8): + merge_data_ptr[merge_nodes_cumsum[merged_nodes > i] + i] = new_structure[new_structre_idx[merged_nodes > i], 2] + i + old_merge_data_ptr = self.structure[structre_delete, 2] + for data in self.data: + if data == 'position': + scale = 2 ** (1.0 - self.depth[old_merge_data_ptr]) + new_data['position'][merge_data_ptr] = ((self.position[old_merge_data_ptr] + 0.5) / scale).floor() * scale + 0.5 * scale - 0.5 + elif data == 'depth': + new_data['depth'][merge_data_ptr] = self.depth[old_merge_data_ptr] - 1 + elif data == 'opacity': + new_data['opacity'][subdivide_data_ptr + i] = self.inverse_opacity_activation(self.opacity_activation(self.opacity[subdivide_mask])**2) + elif data == 'trivec': + new_data['trivec'][merge_data_ptr] = self.trivec[old_merge_data_ptr] + else: + new_data[data][merge_data_ptr] = getattr(self, data)[old_merge_data_ptr] + + # Update the structure and data array + self.structure = new_structure + for data in self.data: + setattr(self, data, new_data[data]) + + # Save data array control temp variables + self.data_rearrange_buffer = { + 'subdivide_mask': subdivide_mask, + 'merge_mask': merge_mask, + 'data_valid': data_valid, + 'new_data_idx': new_data_idx, + 'new_data_length': new_data_length, + 'new_data': new_data + } diff --git a/third_party/TRELLIS/trellis/representations/radiance_field/__init__.py b/third_party/TRELLIS/trellis/representations/radiance_field/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..b72a1b7e76b509ee5a5e6979858eb17b4158a151 --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/radiance_field/__init__.py @@ -0,0 +1 @@ +from .strivec import Strivec \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/representations/radiance_field/strivec.py b/third_party/TRELLIS/trellis/representations/radiance_field/strivec.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc4b749786d934dae82864b560baccd91fcabbc --- /dev/null +++ b/third_party/TRELLIS/trellis/representations/radiance_field/strivec.py @@ -0,0 +1,28 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from ..octree import DfsOctree as Octree + + +class Strivec(Octree): + def __init__( + self, + resolution: int, + aabb: list, + sh_degree: int = 0, + rank: int = 8, + dim: int = 8, + device: str = "cuda", + ): + assert np.log2(resolution) % 1 == 0, "Resolution must be a power of 2" + self.resolution = resolution + depth = int(np.round(np.log2(resolution))) + super().__init__( + depth=depth, + aabb=aabb, + sh_degree=sh_degree, + primitive="trivec", + primitive_config={"rank": rank, "dim": dim}, + device=device, + ) diff --git a/third_party/TRELLIS/trellis/utils/__init__.py b/third_party/TRELLIS/trellis/utils/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/third_party/TRELLIS/trellis/utils/elastic_utils.py b/third_party/TRELLIS/trellis/utils/elastic_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ebece90a07416362a96e7f0ee21b1397dfb132 --- /dev/null +++ b/third_party/TRELLIS/trellis/utils/elastic_utils.py @@ -0,0 +1,228 @@ +from abc import abstractmethod +from contextlib import contextmanager +from typing import Tuple +import torch +import torch.nn as nn +import numpy as np + + +class MemoryController: + """ + Base class for memory management during training. + """ + + _last_input_size = None + _last_mem_ratio = [] + + @contextmanager + def record(self): + pass + + def update_run_states(self, input_size=None, mem_ratio=None): + if self._last_input_size is None: + self._last_input_size = input_size + elif self._last_input_size!= input_size: + raise ValueError(f'Input size should not change for different ElasticModules.') + self._last_mem_ratio.append(mem_ratio) + + @abstractmethod + def get_mem_ratio(self, input_size): + pass + + @abstractmethod + def state_dict(self): + pass + + @abstractmethod + def log(self): + pass + + +class LinearMemoryController(MemoryController): + """ + A simple controller for memory management during training. + The memory usage is modeled as a linear function of: + - the number of input parameters + - the ratio of memory the model use compared to the maximum usage (with no checkpointing) + memory_usage = k * input_size * mem_ratio + b + The controller keeps track of the memory usage and gives the + expected memory ratio to keep the memory usage under a target + """ + def __init__( + self, + buffer_size=1000, + update_every=500, + target_ratio=0.8, + available_memory=None, + max_mem_ratio_start=0.1, + params=None, + device=None + ): + self.buffer_size = buffer_size + self.update_every = update_every + self.target_ratio = target_ratio + self.device = device or torch.cuda.current_device() + self.available_memory = available_memory or torch.cuda.get_device_properties(self.device).total_memory / 1024**3 + + self._memory = np.zeros(buffer_size, dtype=np.float32) + self._input_size = np.zeros(buffer_size, dtype=np.float32) + self._mem_ratio = np.zeros(buffer_size, dtype=np.float32) + self._buffer_ptr = 0 + self._buffer_length = 0 + self._params = tuple(params) if params is not None else (0.0, 0.0) + self._max_mem_ratio = max_mem_ratio_start + self.step = 0 + + def __repr__(self): + return f'LinearMemoryController(target_ratio={self.target_ratio}, available_memory={self.available_memory})' + + def _add_sample(self, memory, input_size, mem_ratio): + self._memory[self._buffer_ptr] = memory + self._input_size[self._buffer_ptr] = input_size + self._mem_ratio[self._buffer_ptr] = mem_ratio + self._buffer_ptr = (self._buffer_ptr + 1) % self.buffer_size + self._buffer_length = min(self._buffer_length + 1, self.buffer_size) + + @contextmanager + def record(self): + torch.cuda.reset_peak_memory_stats(self.device) + self._last_input_size = None + self._last_mem_ratio = [] + yield + self._last_memory = torch.cuda.max_memory_allocated(self.device) / 1024**3 + self._last_mem_ratio = sum(self._last_mem_ratio) / len(self._last_mem_ratio) + self._add_sample(self._last_memory, self._last_input_size, self._last_mem_ratio) + self.step += 1 + if self.step % self.update_every == 0: + self._max_mem_ratio = min(1.0, self._max_mem_ratio + 0.1) + self._fit_params() + + def _fit_params(self): + memory_usage = self._memory[:self._buffer_length] + input_size = self._input_size[:self._buffer_length] + mem_ratio = self._mem_ratio[:self._buffer_length] + + x = input_size * mem_ratio + y = memory_usage + k, b = np.polyfit(x, y, 1) + self._params = (k, b) + # self._visualize() + + def _visualize(self): + import matplotlib.pyplot as plt + memory_usage = self._memory[:self._buffer_length] + input_size = self._input_size[:self._buffer_length] + mem_ratio = self._mem_ratio[:self._buffer_length] + k, b = self._params + + plt.scatter(input_size * mem_ratio, memory_usage, c=mem_ratio, cmap='viridis') + x = np.array([0.0, 20000.0]) + plt.plot(x, k * x + b, c='r') + plt.savefig(f'linear_memory_controller_{self.step}.png') + plt.cla() + + def get_mem_ratio(self, input_size): + k, b = self._params + if k == 0: return np.random.rand() * self._max_mem_ratio + pred = (self.available_memory * self.target_ratio - b) / (k * input_size) + return min(self._max_mem_ratio, max(0.0, pred)) + + def state_dict(self): + return { + 'params': self._params, + } + + def load_state_dict(self, state_dict): + self._params = tuple(state_dict['params']) + + def log(self): + return { + 'params/k': self._params[0], + 'params/b': self._params[1], + 'memory': self._last_memory, + 'input_size': self._last_input_size, + 'mem_ratio': self._last_mem_ratio, + } + + +class ElasticModule(nn.Module): + """ + Module for training with elastic memory management. + """ + def __init__(self): + super().__init__() + self._memory_controller: MemoryController = None + + @abstractmethod + def _get_input_size(self, *args, **kwargs) -> int: + """ + Get the size of the input data. + + Returns: + int: The size of the input data. + """ + pass + + @abstractmethod + def _forward_with_mem_ratio(self, *args, mem_ratio=0.0, **kwargs) -> Tuple[float, Tuple]: + """ + Forward with a given memory ratio. + """ + pass + + def register_memory_controller(self, memory_controller: MemoryController): + self._memory_controller = memory_controller + + def forward(self, *args, **kwargs): + if self._memory_controller is None or not torch.is_grad_enabled() or not self.training: + _, ret = self._forward_with_mem_ratio(*args, **kwargs) + else: + input_size = self._get_input_size(*args, **kwargs) + mem_ratio = self._memory_controller.get_mem_ratio(input_size) + mem_ratio, ret = self._forward_with_mem_ratio(*args, mem_ratio=mem_ratio, **kwargs) + self._memory_controller.update_run_states(input_size, mem_ratio) + return ret + + +class ElasticModuleMixin: + """ + Mixin for training with elastic memory management. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._memory_controller: MemoryController = None + + @abstractmethod + def _get_input_size(self, *args, **kwargs) -> int: + """ + Get the size of the input data. + + Returns: + int: The size of the input data. + """ + pass + + @abstractmethod + @contextmanager + def with_mem_ratio(self, mem_ratio=1.0) -> float: + """ + Context manager for training with a reduced memory ratio compared to the full memory usage. + + Returns: + float: The exact memory ratio used during the forward pass. + """ + pass + + def register_memory_controller(self, memory_controller: MemoryController): + self._memory_controller = memory_controller + + def forward(self, *args, **kwargs): + if self._memory_controller is None or not torch.is_grad_enabled() or not self.training: + ret = super().forward(*args, **kwargs) + else: + input_size = self._get_input_size(*args, **kwargs) + mem_ratio = self._memory_controller.get_mem_ratio(input_size) + with self.with_mem_ratio(mem_ratio) as exact_mem_ratio: + ret = super().forward(*args, **kwargs) + self._memory_controller.update_run_states(input_size, exact_mem_ratio) + return ret \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/utils/general_utils.py b/third_party/TRELLIS/trellis/utils/general_utils.py new file mode 100755 index 0000000000000000000000000000000000000000..3b454d9c75521e33466055fe37c3fc1e37180a79 --- /dev/null +++ b/third_party/TRELLIS/trellis/utils/general_utils.py @@ -0,0 +1,187 @@ +import numpy as np +import cv2 +import torch + + +# Dictionary utils +def _dict_merge(dicta, dictb, prefix=''): + """ + Merge two dictionaries. + """ + assert isinstance(dicta, dict), 'input must be a dictionary' + assert isinstance(dictb, dict), 'input must be a dictionary' + dict_ = {} + all_keys = set(dicta.keys()).union(set(dictb.keys())) + for key in all_keys: + if key in dicta.keys() and key in dictb.keys(): + if isinstance(dicta[key], dict) and isinstance(dictb[key], dict): + dict_[key] = _dict_merge(dicta[key], dictb[key], prefix=f'{prefix}.{key}') + else: + raise ValueError(f'Duplicate key {prefix}.{key} found in both dictionaries. Types: {type(dicta[key])}, {type(dictb[key])}') + elif key in dicta.keys(): + dict_[key] = dicta[key] + else: + dict_[key] = dictb[key] + return dict_ + + +def dict_merge(dicta, dictb): + """ + Merge two dictionaries. + """ + return _dict_merge(dicta, dictb, prefix='') + + +def dict_foreach(dic, func, special_func={}): + """ + Recursively apply a function to all non-dictionary leaf values in a dictionary. + """ + assert isinstance(dic, dict), 'input must be a dictionary' + for key in dic.keys(): + if isinstance(dic[key], dict): + dic[key] = dict_foreach(dic[key], func) + else: + if key in special_func.keys(): + dic[key] = special_func[key](dic[key]) + else: + dic[key] = func(dic[key]) + return dic + + +def dict_reduce(dicts, func, special_func={}): + """ + Reduce a list of dictionaries. Leaf values must be scalars. + """ + assert isinstance(dicts, list), 'input must be a list of dictionaries' + assert all([isinstance(d, dict) for d in dicts]), 'input must be a list of dictionaries' + assert len(dicts) > 0, 'input must be a non-empty list of dictionaries' + all_keys = set([key for dict_ in dicts for key in dict_.keys()]) + reduced_dict = {} + for key in all_keys: + vlist = [dict_[key] for dict_ in dicts if key in dict_.keys()] + if isinstance(vlist[0], dict): + reduced_dict[key] = dict_reduce(vlist, func, special_func) + else: + if key in special_func.keys(): + reduced_dict[key] = special_func[key](vlist) + else: + reduced_dict[key] = func(vlist) + return reduced_dict + + +def dict_any(dic, func): + """ + Recursively apply a function to all non-dictionary leaf values in a dictionary. + """ + assert isinstance(dic, dict), 'input must be a dictionary' + for key in dic.keys(): + if isinstance(dic[key], dict): + if dict_any(dic[key], func): + return True + else: + if func(dic[key]): + return True + return False + + +def dict_all(dic, func): + """ + Recursively apply a function to all non-dictionary leaf values in a dictionary. + """ + assert isinstance(dic, dict), 'input must be a dictionary' + for key in dic.keys(): + if isinstance(dic[key], dict): + if not dict_all(dic[key], func): + return False + else: + if not func(dic[key]): + return False + return True + + +def dict_flatten(dic, sep='.'): + """ + Flatten a nested dictionary into a dictionary with no nested dictionaries. + """ + assert isinstance(dic, dict), 'input must be a dictionary' + flat_dict = {} + for key in dic.keys(): + if isinstance(dic[key], dict): + sub_dict = dict_flatten(dic[key], sep=sep) + for sub_key in sub_dict.keys(): + flat_dict[str(key) + sep + str(sub_key)] = sub_dict[sub_key] + else: + flat_dict[key] = dic[key] + return flat_dict + + +def make_grid(images, nrow=None, ncol=None, aspect_ratio=None): + num_images = len(images) + if nrow is None and ncol is None: + if aspect_ratio is not None: + nrow = int(np.round(np.sqrt(num_images / aspect_ratio))) + else: + nrow = int(np.sqrt(num_images)) + ncol = (num_images + nrow - 1) // nrow + elif nrow is None and ncol is not None: + nrow = (num_images + ncol - 1) // ncol + elif nrow is not None and ncol is None: + ncol = (num_images + nrow - 1) // nrow + else: + assert nrow * ncol >= num_images, 'nrow * ncol must be greater than or equal to the number of images' + + grid = np.zeros((nrow * images[0].shape[0], ncol * images[0].shape[1], images[0].shape[2]), dtype=images[0].dtype) + for i, img in enumerate(images): + row = i // ncol + col = i % ncol + grid[row * img.shape[0]:(row + 1) * img.shape[0], col * img.shape[1]:(col + 1) * img.shape[1]] = img + return grid + + +def notes_on_image(img, notes=None): + img = np.pad(img, ((0, 32), (0, 0), (0, 0)), 'constant', constant_values=0) + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + if notes is not None: + img = cv2.putText(img, notes, (0, img.shape[0] - 4), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + return img + + +def save_image_with_notes(img, path, notes=None): + """ + Save an image with notes. + """ + if isinstance(img, torch.Tensor): + img = img.cpu().numpy().transpose(1, 2, 0) + if img.dtype == np.float32 or img.dtype == np.float64: + img = np.clip(img * 255, 0, 255).astype(np.uint8) + img = notes_on_image(img, notes) + cv2.imwrite(path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) + + +# debug utils + +def atol(x, y): + """ + Absolute tolerance. + """ + return torch.abs(x - y) + + +def rtol(x, y): + """ + Relative tolerance. + """ + return torch.abs(x - y) / torch.clamp_min(torch.maximum(torch.abs(x), torch.abs(y)), 1e-12) + + +# print utils +def indent(s, n=4): + """ + Indent a string. + """ + lines = s.split('\n') + for i in range(1, len(lines)): + lines[i] = ' ' * n + lines[i] + return '\n'.join(lines) + diff --git a/third_party/TRELLIS/trellis/utils/postprocessing_utils.py b/third_party/TRELLIS/trellis/utils/postprocessing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..373fc3fa4647f7d522583a7695bdb40f7fdbfc49 --- /dev/null +++ b/third_party/TRELLIS/trellis/utils/postprocessing_utils.py @@ -0,0 +1,598 @@ +from typing import * +import numpy as np +import torch +import utils3d +import nvdiffrast.torch as dr +from tqdm import tqdm +import trimesh +import trimesh.visual +import xatlas +import pyvista as pv +from pymeshfix import _meshfix +import igraph +import cv2 +from PIL import Image +from .random_utils import sphere_hammersley_sequence +from .render_utils import render_multiview +from ..renderers import GaussianRenderer +from ..representations import Strivec, Gaussian, MeshExtractResult + +def _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor: + return torch.where(f <= 0.0031308, f * 12.92, torch.pow(torch.clamp(f, 0.0031308), 1.0/2.4) * 1.055 - 0.055) + +def rgb_to_srgb_image(f: torch.Tensor) -> torch.Tensor: + assert f.shape[-1] == 3 or f.shape[-1] == 4 + + out = torch.cat((_rgb_to_srgb(f[..., 0:3]), f[..., 3:4]), dim=-1) if f.shape[-1] == 4 else _rgb_to_srgb(f) + assert out.shape[0] == f.shape[0] and out.shape[1] == f.shape[1] and out.shape[2] == f.shape[2] + return out + +@torch.no_grad() +def _fill_holes( + verts, + faces, + max_hole_size=0.04, + max_hole_nbe=32, + resolution=128, + num_views=500, + debug=False, + verbose=False +): + """ + Rasterize a mesh from multiple views and remove invisible faces. + Also includes postprocessing to: + 1. Remove connected components that are have low visibility. + 2. Mincut to remove faces at the inner side of the mesh connected to the outer side with a small hole. + + Args: + verts (torch.Tensor): Vertices of the mesh. Shape (V, 3). + faces (torch.Tensor): Faces of the mesh. Shape (F, 3). + max_hole_size (float): Maximum area of a hole to fill. + resolution (int): Resolution of the rasterization. + num_views (int): Number of views to rasterize the mesh. + verbose (bool): Whether to print progress. + """ + # Construct cameras + yaws = [] + pitchs = [] + for i in range(num_views): + y, p = sphere_hammersley_sequence(i, num_views) + yaws.append(y) + pitchs.append(p) + yaws = torch.tensor(yaws).cuda() + pitchs = torch.tensor(pitchs).cuda() + radius = 2.0 + fov = torch.deg2rad(torch.tensor(40)).cuda() + projection = utils3d.torch.perspective_from_fov_xy(fov, fov, 1, 3) + views = [] + for (yaw, pitch) in zip(yaws, pitchs): + orig = torch.tensor([ + torch.sin(yaw) * torch.cos(pitch), + torch.cos(yaw) * torch.cos(pitch), + torch.sin(pitch), + ]).cuda().float() * radius + view = utils3d.torch.view_look_at(orig, torch.tensor([0, 0, 0]).float().cuda(), torch.tensor([0, 0, 1]).float().cuda()) + views.append(view) + views = torch.stack(views, dim=0) + + # Rasterize + visblity = torch.zeros(faces.shape[0], dtype=torch.int32, device=verts.device) + rastctx = utils3d.torch.RastContext(backend='cuda') + for i in tqdm(range(views.shape[0]), total=views.shape[0], disable=not verbose, desc='Rasterizing'): + view = views[i] + buffers = utils3d.torch.rasterize_triangle_faces( + rastctx, verts[None], faces, resolution, resolution, view=view, projection=projection + ) + face_id = buffers['face_id'][0][buffers['mask'][0] > 0.95] - 1 + face_id = torch.unique(face_id).long() + visblity[face_id] += 1 + visblity = visblity.float() / num_views + + # Mincut + ## construct outer faces + edges, face2edge, edge_degrees = utils3d.torch.compute_edges(faces) + boundary_edge_indices = torch.nonzero(edge_degrees == 1).reshape(-1) + connected_components = utils3d.torch.compute_connected_components(faces, edges, face2edge) + outer_face_indices = torch.zeros(faces.shape[0], dtype=torch.bool, device=faces.device) + for i in range(len(connected_components)): + outer_face_indices[connected_components[i]] = visblity[connected_components[i]] > min(max(visblity[connected_components[i]].quantile(0.75).item(), 0.25), 0.5) + outer_face_indices = outer_face_indices.nonzero().reshape(-1) + + ## construct inner faces + inner_face_indices = torch.nonzero(visblity == 0).reshape(-1) + if verbose: + tqdm.write(f'Found {inner_face_indices.shape[0]} invisible faces') + if inner_face_indices.shape[0] == 0: + return verts, faces + + ## Construct dual graph (faces as nodes, edges as edges) + dual_edges, dual_edge2edge = utils3d.torch.compute_dual_graph(face2edge) + dual_edge2edge = edges[dual_edge2edge] + dual_edges_weights = torch.norm(verts[dual_edge2edge[:, 0]] - verts[dual_edge2edge[:, 1]], dim=1) + if verbose: + tqdm.write(f'Dual graph: {dual_edges.shape[0]} edges') + + ## solve mincut problem + ### construct main graph + g = igraph.Graph() + g.add_vertices(faces.shape[0]) + g.add_edges(dual_edges.cpu().numpy()) + g.es['weight'] = dual_edges_weights.cpu().numpy() + + ### source and target + g.add_vertex('s') + g.add_vertex('t') + + ### connect invisible faces to source + g.add_edges([(f, 's') for f in inner_face_indices], attributes={'weight': torch.ones(inner_face_indices.shape[0], dtype=torch.float32).cpu().numpy()}) + + ### connect outer faces to target + g.add_edges([(f, 't') for f in outer_face_indices], attributes={'weight': torch.ones(outer_face_indices.shape[0], dtype=torch.float32).cpu().numpy()}) + + ### solve mincut + cut = g.mincut('s', 't', (np.array(g.es['weight']) * 1000).tolist()) + remove_face_indices = torch.tensor([v for v in cut.partition[0] if v < faces.shape[0]], dtype=torch.long, device=faces.device) + if verbose: + tqdm.write(f'Mincut solved, start checking the cut') + + ### check if the cut is valid with each connected component + to_remove_cc = utils3d.torch.compute_connected_components(faces[remove_face_indices]) + if debug: + tqdm.write(f'Number of connected components of the cut: {len(to_remove_cc)}') + valid_remove_cc = [] + cutting_edges = [] + for cc in to_remove_cc: + #### check if the connected component has low visibility + visblity_median = visblity[remove_face_indices[cc]].median() + if debug: + tqdm.write(f'visblity_median: {visblity_median}') + if visblity_median > 0.25: + continue + + #### check if the cuting loop is small enough + cc_edge_indices, cc_edges_degree = torch.unique(face2edge[remove_face_indices[cc]], return_counts=True) + cc_boundary_edge_indices = cc_edge_indices[cc_edges_degree == 1] + cc_new_boundary_edge_indices = cc_boundary_edge_indices[~torch.isin(cc_boundary_edge_indices, boundary_edge_indices)] + if len(cc_new_boundary_edge_indices) > 0: + cc_new_boundary_edge_cc = utils3d.torch.compute_edge_connected_components(edges[cc_new_boundary_edge_indices]) + cc_new_boundary_edges_cc_center = [verts[edges[cc_new_boundary_edge_indices[edge_cc]]].mean(dim=1).mean(dim=0) for edge_cc in cc_new_boundary_edge_cc] + cc_new_boundary_edges_cc_area = [] + for i, edge_cc in enumerate(cc_new_boundary_edge_cc): + _e1 = verts[edges[cc_new_boundary_edge_indices[edge_cc]][:, 0]] - cc_new_boundary_edges_cc_center[i] + _e2 = verts[edges[cc_new_boundary_edge_indices[edge_cc]][:, 1]] - cc_new_boundary_edges_cc_center[i] + cc_new_boundary_edges_cc_area.append(torch.norm(torch.cross(_e1, _e2, dim=-1), dim=1).sum() * 0.5) + if debug: + cutting_edges.append(cc_new_boundary_edge_indices) + tqdm.write(f'Area of the cutting loop: {cc_new_boundary_edges_cc_area}') + if any([l > max_hole_size for l in cc_new_boundary_edges_cc_area]): + continue + + valid_remove_cc.append(cc) + + if debug: + face_v = verts[faces].mean(dim=1).cpu().numpy() + vis_dual_edges = dual_edges.cpu().numpy() + vis_colors = np.zeros((faces.shape[0], 3), dtype=np.uint8) + vis_colors[inner_face_indices.cpu().numpy()] = [0, 0, 255] + vis_colors[outer_face_indices.cpu().numpy()] = [0, 255, 0] + vis_colors[remove_face_indices.cpu().numpy()] = [255, 0, 255] + if len(valid_remove_cc) > 0: + vis_colors[remove_face_indices[torch.cat(valid_remove_cc)].cpu().numpy()] = [255, 0, 0] + utils3d.io.write_ply('dbg_dual.ply', face_v, edges=vis_dual_edges, vertex_colors=vis_colors) + + vis_verts = verts.cpu().numpy() + vis_edges = edges[torch.cat(cutting_edges)].cpu().numpy() + utils3d.io.write_ply('dbg_cut.ply', vis_verts, edges=vis_edges) + + + if len(valid_remove_cc) > 0: + remove_face_indices = remove_face_indices[torch.cat(valid_remove_cc)] + mask = torch.ones(faces.shape[0], dtype=torch.bool, device=faces.device) + mask[remove_face_indices] = 0 + faces = faces[mask] + faces, verts = utils3d.torch.remove_unreferenced_vertices(faces, verts) + if verbose: + tqdm.write(f'Removed {(~mask).sum()} faces by mincut') + else: + if verbose: + tqdm.write(f'Removed 0 faces by mincut') + + mesh = _meshfix.PyTMesh() + mesh.load_array(verts.cpu().numpy(), faces.cpu().numpy()) + mesh.fill_small_boundaries(nbe=max_hole_nbe, refine=True) + verts, faces = mesh.return_arrays() + verts, faces = torch.tensor(verts, device='cuda', dtype=torch.float32), torch.tensor(faces, device='cuda', dtype=torch.int32) + + return verts, faces + + +def postprocess_mesh( + vertices: np.array, + faces: np.array, + simplify: bool = True, + simplify_ratio: float = 0.9, + fill_holes: bool = True, + fill_holes_max_hole_size: float = 0.04, + fill_holes_max_hole_nbe: int = 32, + fill_holes_resolution: int = 1024, + fill_holes_num_views: int = 1000, + debug: bool = False, + verbose: bool = False, +): + """ + Postprocess a mesh by simplifying, removing invisible faces, and removing isolated pieces. + + Args: + vertices (np.array): Vertices of the mesh. Shape (V, 3). + faces (np.array): Faces of the mesh. Shape (F, 3). + simplify (bool): Whether to simplify the mesh, using quadric edge collapse. + simplify_ratio (float): Ratio of faces to keep after simplification. + fill_holes (bool): Whether to fill holes in the mesh. + fill_holes_max_hole_size (float): Maximum area of a hole to fill. + fill_holes_max_hole_nbe (int): Maximum number of boundary edges of a hole to fill. + fill_holes_resolution (int): Resolution of the rasterization. + fill_holes_num_views (int): Number of views to rasterize the mesh. + verbose (bool): Whether to print progress. + """ + + if verbose: + tqdm.write(f'Before postprocess: {vertices.shape[0]} vertices, {faces.shape[0]} faces') + + # Simplify + if simplify and simplify_ratio > 0: + mesh = pv.PolyData(vertices, np.concatenate([np.full((faces.shape[0], 1), 3), faces], axis=1)) + mesh = mesh.decimate(simplify_ratio, progress_bar=verbose) + vertices, faces = mesh.points, mesh.faces.reshape(-1, 4)[:, 1:] + if verbose: + tqdm.write(f'After decimate: {vertices.shape[0]} vertices, {faces.shape[0]} faces') + + # Remove invisible faces + if fill_holes: + vertices, faces = torch.tensor(vertices).cuda(), torch.tensor(faces.astype(np.int32)).cuda() + vertices, faces = _fill_holes( + vertices, faces, + max_hole_size=fill_holes_max_hole_size, + max_hole_nbe=fill_holes_max_hole_nbe, + resolution=fill_holes_resolution, + num_views=fill_holes_num_views, + debug=debug, + verbose=verbose, + ) + vertices, faces = vertices.cpu().numpy(), faces.cpu().numpy() + if verbose: + tqdm.write(f'After remove invisible faces: {vertices.shape[0]} vertices, {faces.shape[0]} faces') + + return vertices, faces + + +def parametrize_mesh(vertices: np.array, faces: np.array): + """ + Parametrize a mesh to a texture space, using xatlas. + + Args: + vertices (np.array): Vertices of the mesh. Shape (V, 3). + faces (np.array): Faces of the mesh. Shape (F, 3). + """ + + vmapping, indices, uvs = xatlas.parametrize(vertices, faces) + + vertices = vertices[vmapping] + faces = indices + + return vertices, faces, uvs + + +def bake_texture( + vertices: np.array, + faces: np.array, + uvs: np.array, + observations: List[np.array], + masks: List[np.array], + extrinsics: List[np.array], + intrinsics: List[np.array], + texture_size: int = 2048, + near: float = 0.1, + far: float = 10.0, + mode: Literal['fast', 'opt'] = 'opt', + lambda_tv: float = 1e-2, + verbose: bool = False, +): + """ + Bake texture to a mesh from multiple observations. + + Args: + vertices (np.array): Vertices of the mesh. Shape (V, 3). + faces (np.array): Faces of the mesh. Shape (F, 3). + uvs (np.array): UV coordinates of the mesh. Shape (V, 2). + observations (List[np.array]): List of observations. Each observation is a 2D image. Shape (H, W, 3). + masks (List[np.array]): List of masks. Each mask is a 2D image. Shape (H, W). + extrinsics (List[np.array]): List of extrinsics. Shape (4, 4). + intrinsics (List[np.array]): List of intrinsics. Shape (3, 3). + texture_size (int): Size of the texture. + near (float): Near plane of the camera. + far (float): Far plane of the camera. + mode (Literal['fast', 'opt']): Mode of texture baking. + lambda_tv (float): Weight of total variation loss in optimization. + verbose (bool): Whether to print progress. + """ + vertices = torch.tensor(vertices).cuda() + faces = torch.tensor(faces.astype(np.int32)).cuda() + uvs = torch.tensor(uvs).cuda() + observations = [torch.tensor(obs / 255.0).float().cuda() for obs in observations] + masks = [torch.tensor(m>0).bool().cuda() for m in masks] + views = [utils3d.torch.extrinsics_to_view(torch.tensor(extr).cuda()) for extr in extrinsics] + projections = [utils3d.torch.intrinsics_to_perspective(torch.tensor(intr).cuda(), near, far) for intr in intrinsics] + + if mode == 'fast': + texture = torch.zeros((texture_size * texture_size, 3), dtype=torch.float32).cuda() + texture_weights = torch.zeros((texture_size * texture_size), dtype=torch.float32).cuda() + rastctx = utils3d.torch.RastContext(backend='cuda') + for observation, view, projection in tqdm(zip(observations, views, projections), total=len(observations), disable=not verbose, desc='Texture baking (fast)'): + with torch.no_grad(): + rast = utils3d.torch.rasterize_triangle_faces( + rastctx, vertices[None], faces, observation.shape[1], observation.shape[0], uv=uvs[None], view=view, projection=projection + ) + uv_map = rast['uv'][0].detach().flip(0) + mask = rast['mask'][0].detach().bool() & masks[0] + + # nearest neighbor interpolation + uv_map = (uv_map * texture_size).floor().long() + obs = observation[mask] + uv_map = uv_map[mask] + idx = uv_map[:, 0] + (texture_size - uv_map[:, 1] - 1) * texture_size + texture = texture.scatter_add(0, idx.view(-1, 1).expand(-1, 3), obs) + texture_weights = texture_weights.scatter_add(0, idx, torch.ones((obs.shape[0]), dtype=torch.float32, device=texture.device)) + + mask = texture_weights > 0 + texture[mask] /= texture_weights[mask][:, None] + texture = np.clip(texture.reshape(texture_size, texture_size, 3).cpu().numpy() * 255, 0, 255).astype(np.uint8) + + # inpaint + mask = (texture_weights == 0).cpu().numpy().astype(np.uint8).reshape(texture_size, texture_size) + texture = cv2.inpaint(texture, mask, 3, cv2.INPAINT_TELEA) + + elif mode == 'opt': + rastctx = utils3d.torch.RastContext(backend='cuda') + observations = [observations.flip(0) for observations in observations] + masks = [m.flip(0) for m in masks] + _uv = [] + _uv_dr = [] + for observation, view, projection in tqdm(zip(observations, views, projections), total=len(views), disable=not verbose, desc='Texture baking (opt): UV'): + with torch.no_grad(): + rast = utils3d.torch.rasterize_triangle_faces( + rastctx, vertices[None], faces, observation.shape[1], observation.shape[0], uv=uvs[None], view=view, projection=projection + ) + _uv.append(rast['uv'].detach()) + _uv_dr.append(rast['uv_dr'].detach()) + + texture = torch.nn.Parameter(torch.zeros((1, texture_size, texture_size, 3), dtype=torch.float32).cuda()) + optimizer = torch.optim.Adam([texture], betas=(0.5, 0.9), lr=1e-2) + + def exp_anealing(optimizer, step, total_steps, start_lr, end_lr): + return start_lr * (end_lr / start_lr) ** (step / total_steps) + + def cosine_anealing(optimizer, step, total_steps, start_lr, end_lr): + return end_lr + 0.5 * (start_lr - end_lr) * (1 + np.cos(np.pi * step / total_steps)) + + def tv_loss(texture): + return torch.nn.functional.l1_loss(texture[:, :-1, :, :], texture[:, 1:, :, :]) + \ + torch.nn.functional.l1_loss(texture[:, :, :-1, :], texture[:, :, 1:, :]) + + total_steps = 2500 + with tqdm(total=total_steps, disable=not verbose, desc='Texture baking (opt): optimizing') as pbar: + for step in range(total_steps): + optimizer.zero_grad() + selected = np.random.randint(0, len(views)) + uv, uv_dr, observation, mask = _uv[selected], _uv_dr[selected], observations[selected], masks[selected] + render = dr.texture(texture, uv, uv_dr)[0] + loss = torch.nn.functional.l1_loss(render[mask], observation[mask]) + if lambda_tv > 0: + loss += lambda_tv * tv_loss(texture) + loss.backward() + optimizer.step() + # annealing + optimizer.param_groups[0]['lr'] = cosine_anealing(optimizer, step, total_steps, 1e-2, 1e-5) + pbar.set_postfix({'loss': loss.item()}) + pbar.update() + + texture = np.clip(texture[0].flip(0).detach().cpu().numpy() * 255, 0, 255).astype(np.uint8) + mask = 1 - utils3d.torch.rasterize_triangle_faces( + rastctx, (uvs * 2 - 1)[None], faces, texture_size, texture_size + )['mask'][0].detach().cpu().numpy().astype(np.uint8) + texture = cv2.inpaint(texture, mask, 3, cv2.INPAINT_TELEA) + else: + raise ValueError(f'Unknown mode: {mode}') + + return texture + + +def to_glb( + app_rep: Union[Strivec, Gaussian], + mesh: MeshExtractResult, + simplify: float = 0.95, + fill_holes: bool = True, + fill_holes_max_size: float = 0.04, + texture_size: int = 1024, + debug: bool = False, + verbose: bool = True, +) -> trimesh.Trimesh: + """ + Convert a generated asset to a glb file. + + Args: + app_rep (Union[Strivec, Gaussian]): Appearance representation. + mesh (MeshExtractResult): Extracted mesh. + simplify (float): Ratio of faces to remove in simplification. + fill_holes (bool): Whether to fill holes in the mesh. + fill_holes_max_size (float): Maximum area of a hole to fill. + texture_size (int): Size of the texture. + debug (bool): Whether to print debug information. + verbose (bool): Whether to print progress. + """ + vertices = mesh.vertices.cpu().numpy() + faces = mesh.faces.cpu().numpy() + + # mesh postprocess + vertices, faces = postprocess_mesh( + vertices, faces, + simplify=simplify > 0, + simplify_ratio=simplify, + fill_holes=fill_holes, + fill_holes_max_hole_size=fill_holes_max_size, + fill_holes_max_hole_nbe=int(250 * np.sqrt(1-simplify)), + fill_holes_resolution=1024, + fill_holes_num_views=1000, + debug=debug, + verbose=verbose, + ) + + # parametrize mesh + vertices, faces, uvs = parametrize_mesh(vertices, faces) + + # bake texture + observations, extrinsics, intrinsics = render_multiview(app_rep, resolution=1024, nviews=100) + masks = [np.any(observation > 0, axis=-1) for observation in observations] + extrinsics = [extrinsics[i].cpu().numpy() for i in range(len(extrinsics))] + intrinsics = [intrinsics[i].cpu().numpy() for i in range(len(intrinsics))] + texture = bake_texture( + vertices, faces, uvs, + observations, masks, extrinsics, intrinsics, + texture_size=texture_size, mode='opt', + lambda_tv=0.01, + verbose=verbose + ) + texture = Image.fromarray(texture) + + # rotate mesh (from z-up to y-up) + vertices = vertices @ np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + material = trimesh.visual.material.PBRMaterial( + roughnessFactor=1.0, + baseColorTexture=texture, + baseColorFactor=np.array([255, 255, 255, 255], dtype=np.uint8) + ) + mesh = trimesh.Trimesh(vertices, faces) + mesh_textured = trimesh.Trimesh(vertices, faces, visual=trimesh.visual.TextureVisuals(uv=uvs, material=material)) + return mesh, mesh_textured + + +def simplify_gs( + gs: Gaussian, + simplify: float = 0.95, + verbose: bool = True, +): + """ + Simplify 3D Gaussians + NOTE: this function is not used in the current implementation for the unsatisfactory performance. + + Args: + gs (Gaussian): 3D Gaussian. + simplify (float): Ratio of Gaussians to remove in simplification. + """ + if simplify <= 0: + return gs + + # simplify + observations, extrinsics, intrinsics = render_multiview(gs, resolution=1024, nviews=100) + observations = [torch.tensor(obs / 255.0).float().cuda().permute(2, 0, 1) for obs in observations] + + # Following https://arxiv.org/pdf/2411.06019 + renderer = GaussianRenderer({ + "resolution": 1024, + "near": 0.8, + "far": 1.6, + "ssaa": 1, + "bg_color": (0,0,0), + }) + new_gs = Gaussian(**gs.init_params) + new_gs._features_dc = gs._features_dc.clone() + new_gs._features_rest = gs._features_rest.clone() if gs._features_rest is not None else None + new_gs._opacity = torch.nn.Parameter(gs._opacity.clone()) + new_gs._rotation = torch.nn.Parameter(gs._rotation.clone()) + new_gs._scaling = torch.nn.Parameter(gs._scaling.clone()) + new_gs._xyz = torch.nn.Parameter(gs._xyz.clone()) + + start_lr = [1e-4, 1e-3, 5e-3, 0.025] + end_lr = [1e-6, 1e-5, 5e-5, 0.00025] + optimizer = torch.optim.Adam([ + {"params": new_gs._xyz, "lr": start_lr[0]}, + {"params": new_gs._rotation, "lr": start_lr[1]}, + {"params": new_gs._scaling, "lr": start_lr[2]}, + {"params": new_gs._opacity, "lr": start_lr[3]}, + ], lr=start_lr[0]) + + def exp_anealing(optimizer, step, total_steps, start_lr, end_lr): + return start_lr * (end_lr / start_lr) ** (step / total_steps) + + def cosine_anealing(optimizer, step, total_steps, start_lr, end_lr): + return end_lr + 0.5 * (start_lr - end_lr) * (1 + np.cos(np.pi * step / total_steps)) + + _zeta = new_gs.get_opacity.clone().detach().squeeze() + _lambda = torch.zeros_like(_zeta) + _delta = 1e-7 + _interval = 10 + num_target = int((1 - simplify) * _zeta.shape[0]) + + with tqdm(total=2500, disable=not verbose, desc='Simplifying Gaussian') as pbar: + for i in range(2500): + # prune + if i % 100 == 0: + mask = new_gs.get_opacity.squeeze() > 0.05 + mask = torch.nonzero(mask).squeeze() + new_gs._xyz = torch.nn.Parameter(new_gs._xyz[mask]) + new_gs._rotation = torch.nn.Parameter(new_gs._rotation[mask]) + new_gs._scaling = torch.nn.Parameter(new_gs._scaling[mask]) + new_gs._opacity = torch.nn.Parameter(new_gs._opacity[mask]) + new_gs._features_dc = new_gs._features_dc[mask] + new_gs._features_rest = new_gs._features_rest[mask] if new_gs._features_rest is not None else None + _zeta = _zeta[mask] + _lambda = _lambda[mask] + # update optimizer state + for param_group, new_param in zip(optimizer.param_groups, [new_gs._xyz, new_gs._rotation, new_gs._scaling, new_gs._opacity]): + stored_state = optimizer.state[param_group['params'][0]] + if 'exp_avg' in stored_state: + stored_state['exp_avg'] = stored_state['exp_avg'][mask] + stored_state['exp_avg_sq'] = stored_state['exp_avg_sq'][mask] + del optimizer.state[param_group['params'][0]] + param_group['params'][0] = new_param + optimizer.state[param_group['params'][0]] = stored_state + + opacity = new_gs.get_opacity.squeeze() + + # sparisfy + if i % _interval == 0: + _zeta = _lambda + opacity.detach() + if opacity.shape[0] > num_target: + index = _zeta.topk(num_target)[1] + _m = torch.ones_like(_zeta, dtype=torch.bool) + _m[index] = 0 + _zeta[_m] = 0 + _lambda = _lambda + opacity.detach() - _zeta + + # sample a random view + view_idx = np.random.randint(len(observations)) + observation = observations[view_idx] + extrinsic = extrinsics[view_idx] + intrinsic = intrinsics[view_idx] + + color = renderer.render(new_gs, extrinsic, intrinsic)['color'] + rgb_loss = torch.nn.functional.l1_loss(color, observation) + loss = rgb_loss + \ + _delta * torch.sum(torch.pow(_lambda + opacity - _zeta, 2)) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # update lr + for j in range(len(optimizer.param_groups)): + optimizer.param_groups[j]['lr'] = cosine_anealing(optimizer, i, 2500, start_lr[j], end_lr[j]) + + pbar.set_postfix({'loss': rgb_loss.item(), 'num': opacity.shape[0], 'lambda': _lambda.mean().item()}) + pbar.update() + + new_gs._xyz = new_gs._xyz.data + new_gs._rotation = new_gs._rotation.data + new_gs._scaling = new_gs._scaling.data + new_gs._opacity = new_gs._opacity.data + + return new_gs diff --git a/third_party/TRELLIS/trellis/utils/random_utils.py b/third_party/TRELLIS/trellis/utils/random_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5b668c277b51f4930991912a80573adc79364028 --- /dev/null +++ b/third_party/TRELLIS/trellis/utils/random_utils.py @@ -0,0 +1,30 @@ +import numpy as np + +PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53] + +def radical_inverse(base, n): + val = 0 + inv_base = 1.0 / base + inv_base_n = inv_base + while n > 0: + digit = n % base + val += digit * inv_base_n + n //= base + inv_base_n *= inv_base + return val + +def halton_sequence(dim, n): + return [radical_inverse(PRIMES[dim], n) for dim in range(dim)] + +def hammersley_sequence(dim, n, num_samples): + return [n / num_samples] + halton_sequence(dim - 1, n) + +def sphere_hammersley_sequence(n, num_samples, offset=(0, 0), remap=False): + u, v = hammersley_sequence(2, n, num_samples) + u += offset[0] / num_samples + v += offset[1] + if remap: + u = 2 * u if u < 0.25 else 2 / 3 * u + 1 / 3 + theta = np.arccos(1 - 2 * u) - np.pi / 2 + phi = v * 2 * np.pi + return [phi, theta] \ No newline at end of file diff --git a/third_party/TRELLIS/trellis/utils/render_utils.py b/third_party/TRELLIS/trellis/utils/render_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de8e96345f9fe233444c4b901987505e4b77a5ff --- /dev/null +++ b/third_party/TRELLIS/trellis/utils/render_utils.py @@ -0,0 +1,116 @@ +import torch +import numpy as np +from tqdm import tqdm +import utils3d +from PIL import Image + +from ..renderers import OctreeRenderer, GaussianRenderer, MeshRenderer +from ..representations import Octree, Gaussian, MeshExtractResult +from ..modules import sparse as sp +from .random_utils import sphere_hammersley_sequence + + +def yaw_pitch_r_fov_to_extrinsics_intrinsics(yaws, pitchs, rs, fovs): + is_list = isinstance(yaws, list) + if not is_list: + yaws = [yaws] + pitchs = [pitchs] + if not isinstance(rs, list): + rs = [rs] * len(yaws) + if not isinstance(fovs, list): + fovs = [fovs] * len(yaws) + extrinsics = [] + intrinsics = [] + for yaw, pitch, r, fov in zip(yaws, pitchs, rs, fovs): + fov = torch.deg2rad(torch.tensor(float(fov))).cuda() + yaw = torch.tensor(float(yaw)).cuda() + pitch = torch.tensor(float(pitch)).cuda() + orig = torch.tensor([ + torch.sin(yaw) * torch.cos(pitch), + torch.cos(yaw) * torch.cos(pitch), + torch.sin(pitch), + ]).cuda() * r + extr = utils3d.torch.extrinsics_look_at(orig, torch.tensor([0, 0, 0]).float().cuda(), torch.tensor([0, 0, 1]).float().cuda()) + intr = utils3d.torch.intrinsics_from_fov_xy(fov, fov) + extrinsics.append(extr) + intrinsics.append(intr) + if not is_list: + extrinsics = extrinsics[0] + intrinsics = intrinsics[0] + return extrinsics, intrinsics + + +def render_frames(sample, extrinsics, intrinsics, options={}, colors_overwrite=None, verbose=True, **kwargs): + if isinstance(sample, Octree): + renderer = OctreeRenderer() + renderer.rendering_options.resolution = options.get('resolution', 512) + renderer.rendering_options.near = options.get('near', 0.8) + renderer.rendering_options.far = options.get('far', 1.6) + renderer.rendering_options.bg_color = options.get('bg_color', (0, 0, 0)) + renderer.rendering_options.ssaa = options.get('ssaa', 4) + renderer.pipe.primitive = sample.primitive + elif isinstance(sample, Gaussian): + renderer = GaussianRenderer() + renderer.rendering_options.resolution = options.get('resolution', 512) + renderer.rendering_options.near = options.get('near', 0.8) + renderer.rendering_options.far = options.get('far', 1.6) + renderer.rendering_options.bg_color = options.get('bg_color', (0, 0, 0)) + renderer.rendering_options.ssaa = options.get('ssaa', 1) + renderer.pipe.kernel_size = kwargs.get('kernel_size', 0.1) + renderer.pipe.use_mip_gaussian = True + elif isinstance(sample, MeshExtractResult): + renderer = MeshRenderer() + renderer.rendering_options.resolution = options.get('resolution', 512) + renderer.rendering_options.near = options.get('near', 1) + renderer.rendering_options.far = options.get('far', 100) + renderer.rendering_options.ssaa = options.get('ssaa', 4) + else: + raise ValueError(f'Unsupported sample type: {type(sample)}') + + rets = {} + for j, (extr, intr) in tqdm(enumerate(zip(extrinsics, intrinsics)), desc='Rendering', disable=not verbose): + if not isinstance(sample, MeshExtractResult): + res = renderer.render(sample, extr, intr, colors_overwrite=colors_overwrite) + if 'color' not in rets: rets['color'] = [] + if 'depth' not in rets: rets['depth'] = [] + rets['color'].append(np.clip(res['color'].detach().cpu().numpy().transpose(1, 2, 0) * 255, 0, 255).astype(np.uint8)) + if 'percent_depth' in res: + rets['depth'].append(res['percent_depth'].detach().cpu().numpy()) + elif 'depth' in res: + rets['depth'].append(res['depth'].detach().cpu().numpy()) + else: + rets['depth'].append(None) + else: + res = renderer.render(sample, extr, intr) + if 'normal' not in rets: rets['normal'] = [] + + rets['normal'].append(np.clip(res['normal'].detach().cpu().numpy().transpose(1, 2, 0) * 255, 0, 255).astype(np.uint8)) + return rets + +def render_video(sample, resolution=512, bg_color=(0, 0, 0), num_frames=300, r=2, fov=40, **kwargs): + yaws = torch.linspace(0, 2 * 3.1415, num_frames) + pitch = 0.25 + 0.5 * torch.sin(torch.linspace(0, 2 * 3.1415, num_frames)) + yaws = yaws.tolist() + pitch = pitch.tolist() + extrinsics, intrinsics = yaw_pitch_r_fov_to_extrinsics_intrinsics(yaws, pitch, r, fov) + return render_frames(sample, extrinsics, intrinsics, {'resolution': resolution, 'bg_color': bg_color}, **kwargs) + + +def render_multiview(sample, resolution=512, nviews=30): + r = 2 + fov = 40 + cams = [sphere_hammersley_sequence(i, nviews) for i in range(nviews)] + yaws = [cam[0] for cam in cams] + pitchs = [cam[1] for cam in cams] + extrinsics, intrinsics = yaw_pitch_r_fov_to_extrinsics_intrinsics(yaws, pitchs, r, fov) + res = render_frames(sample, extrinsics, intrinsics, {'resolution': resolution, 'bg_color': (0, 0, 0)}) + return res['color'], extrinsics, intrinsics + + +def render_snapshot(samples, resolution=512, bg_color=(0, 0, 0), offset=(-16 / 180 * np.pi, 20 / 180 * np.pi), r=10, fov=8, **kwargs): + yaw = [0, np.pi/2, np.pi, 3*np.pi/2] + yaw_offset = offset[0] + yaw = [y + yaw_offset for y in yaw] + pitch = [offset[1] for _ in range(4)] + extrinsics, intrinsics = yaw_pitch_r_fov_to_extrinsics_intrinsics(yaw, pitch, r, fov) + return render_frames(samples, extrinsics, intrinsics, {'resolution': resolution, 'bg_color': bg_color}, **kwargs)