Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- third_party/AnyBimanual/.DS_Store +0 -0
- third_party/AnyBimanual/.gitignore +171 -0
- third_party/AnyBimanual/LICENSE +402 -0
- third_party/AnyBimanual/README.md +136 -0
- third_party/AnyBimanual/eval.py +296 -0
- third_party/AnyBimanual/peract_config.py +34 -0
- third_party/AnyBimanual/pyproject.toml +33 -0
- third_party/AnyBimanual/run_seed_fn.py +243 -0
- third_party/AnyBimanual/third_party/pytorch3d/.clang-format +85 -0
- third_party/AnyBimanual/third_party/pytorch3d/.flake8 +9 -0
- third_party/AnyBimanual/third_party/pytorch3d/.gitignore +21 -0
- third_party/AnyBimanual/third_party/pytorch3d/INSTALL.md +157 -0
- third_party/AnyBimanual/third_party/pytorch3d/LICENSE +30 -0
- third_party/AnyBimanual/third_party/pytorch3d/LICENSE-3RD-PARTY +71 -0
- third_party/AnyBimanual/third_party/pytorch3d/README.md +183 -0
- third_party/AnyBimanual/third_party/pytorch3d/setup.cfg +14 -0
- third_party/AnyBimanual/third_party/pytorch3d/setup.py +181 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_shapenet_core.py +297 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_so3.py +283 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_splatter_blend.py +627 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_struct_utils.py +227 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_subdivide_meshes.py +234 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_symeig3x3.py +264 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_texturing.py +1325 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_transforms.py +1350 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_vert_align.py +194 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_vis.py +74 -0
- third_party/AnyBimanual/third_party/pytorch3d/tests/test_volumes.py +987 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/.dockerignore +2 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/.gitignore +13 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/README.md +265 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/core/Footer.js +91 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/core/Tutorial.js +100 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/core/TutorialSidebar.js +93 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/package.json +14 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/help.js +55 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/index.js +240 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/users.js +49 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/pages/tutorials/index.js +83 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/sidebars.json +9 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/siteConfig.js +91 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/css/custom.css +360 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/css/pygments.css +213 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/batching.svg +16 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/colab_icon.png +0 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/favicon.ico +0 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/ops.png +0 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/ops.svg +23 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/oss_logo.png +0 -0
- third_party/AnyBimanual/third_party/pytorch3d/website/static/img/pytorch3dfavicon.png +0 -0
third_party/AnyBimanual/.DS_Store
ADDED
|
Binary file (10.2 kB). View file
|
|
|
third_party/AnyBimanual/.gitignore
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
wandb/
|
| 2 |
+
log/
|
| 3 |
+
log_single/
|
| 4 |
+
scripts_run/
|
| 5 |
+
outputs/
|
| 6 |
+
# Byte-compiled / optimized / DLL files
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
replay/
|
| 11 |
+
ckpts/
|
| 12 |
+
bimanual_data/
|
| 13 |
+
misc/
|
| 14 |
+
# C extensions
|
| 15 |
+
*.so
|
| 16 |
+
*.ipynb
|
| 17 |
+
.history/
|
| 18 |
+
.vsocde/
|
| 19 |
+
# Distribution / packaging
|
| 20 |
+
# .Python
|
| 21 |
+
# build/
|
| 22 |
+
# develop-eggs/
|
| 23 |
+
# dist/
|
| 24 |
+
# downloads/
|
| 25 |
+
# eggs/
|
| 26 |
+
# .eggs/
|
| 27 |
+
# lib/
|
| 28 |
+
# lib64/
|
| 29 |
+
# parts/
|
| 30 |
+
# sdist/
|
| 31 |
+
# var/
|
| 32 |
+
# wheels/
|
| 33 |
+
# share/python-wheels/
|
| 34 |
+
# *.egg-info/
|
| 35 |
+
# .installed.cfg
|
| 36 |
+
# *.egg
|
| 37 |
+
# MANIFEST
|
| 38 |
+
|
| 39 |
+
# # PyInstaller
|
| 40 |
+
# # Usually these files are written by a python script from a template
|
| 41 |
+
# # before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 42 |
+
# *.manifest
|
| 43 |
+
# *.spec
|
| 44 |
+
|
| 45 |
+
# # Installer logs
|
| 46 |
+
# pip-log.txt
|
| 47 |
+
# pip-delete-this-directory.txt
|
| 48 |
+
|
| 49 |
+
# # Unit test / coverage reports
|
| 50 |
+
# htmlcov/
|
| 51 |
+
# .tox/
|
| 52 |
+
# .nox/
|
| 53 |
+
# .coverage
|
| 54 |
+
# .coverage.*
|
| 55 |
+
# .cache
|
| 56 |
+
# nosetests.xml
|
| 57 |
+
# coverage.xml
|
| 58 |
+
# *.cover
|
| 59 |
+
# *.py,cover
|
| 60 |
+
# .hypothesis/
|
| 61 |
+
# .pytest_cache/
|
| 62 |
+
# cover/
|
| 63 |
+
|
| 64 |
+
# # Translations
|
| 65 |
+
# *.mo
|
| 66 |
+
# *.pot
|
| 67 |
+
|
| 68 |
+
# # Django stuff:
|
| 69 |
+
# *.log
|
| 70 |
+
# local_settings.py
|
| 71 |
+
# db.sqlite3
|
| 72 |
+
# db.sqlite3-journal
|
| 73 |
+
|
| 74 |
+
# # Flask stuff:
|
| 75 |
+
# instance/
|
| 76 |
+
# .webassets-cache
|
| 77 |
+
|
| 78 |
+
# # Scrapy stuff:
|
| 79 |
+
# .scrapy
|
| 80 |
+
|
| 81 |
+
# # Sphinx documentation
|
| 82 |
+
# docs/_build/
|
| 83 |
+
|
| 84 |
+
# # PyBuilder
|
| 85 |
+
# .pybuilder/
|
| 86 |
+
# target/
|
| 87 |
+
|
| 88 |
+
# # Jupyter Notebook
|
| 89 |
+
# .ipynb_checkpoints
|
| 90 |
+
|
| 91 |
+
# # IPython
|
| 92 |
+
# profile_default/
|
| 93 |
+
# ipython_config.py
|
| 94 |
+
|
| 95 |
+
# # pyenv
|
| 96 |
+
# # For a library or package, you might want to ignore these files since the code is
|
| 97 |
+
# # intended to run in multiple environments; otherwise, check them in:
|
| 98 |
+
# # .python-version
|
| 99 |
+
|
| 100 |
+
# # pipenv
|
| 101 |
+
# # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 102 |
+
# # However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 103 |
+
# # having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 104 |
+
# # install all needed dependencies.
|
| 105 |
+
# #Pipfile.lock
|
| 106 |
+
|
| 107 |
+
# # poetry
|
| 108 |
+
# # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 109 |
+
# # This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 110 |
+
# # commonly ignored for libraries.
|
| 111 |
+
# # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 112 |
+
# #poetry.lock
|
| 113 |
+
|
| 114 |
+
# # pdm
|
| 115 |
+
# # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 116 |
+
# #pdm.lock
|
| 117 |
+
# # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 118 |
+
# # in version control.
|
| 119 |
+
# # https://pdm.fming.dev/#use-with-ide
|
| 120 |
+
# .pdm.toml
|
| 121 |
+
|
| 122 |
+
# # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 123 |
+
# __pypackages__/
|
| 124 |
+
|
| 125 |
+
# # Celery stuff
|
| 126 |
+
# celerybeat-schedule
|
| 127 |
+
# celerybeat.pid
|
| 128 |
+
|
| 129 |
+
# # SageMath parsed files
|
| 130 |
+
# *.sage.py
|
| 131 |
+
|
| 132 |
+
# # Environments
|
| 133 |
+
# .env
|
| 134 |
+
# .venv
|
| 135 |
+
# env/
|
| 136 |
+
# venv/
|
| 137 |
+
# ENV/
|
| 138 |
+
# env.bak/
|
| 139 |
+
# venv.bak/
|
| 140 |
+
|
| 141 |
+
# # Spyder project settings
|
| 142 |
+
# .spyderproject
|
| 143 |
+
# .spyproject
|
| 144 |
+
|
| 145 |
+
# # Rope project settings
|
| 146 |
+
# .ropeproject
|
| 147 |
+
|
| 148 |
+
# # mkdocs documentation
|
| 149 |
+
# /site
|
| 150 |
+
|
| 151 |
+
# # mypy
|
| 152 |
+
# .mypy_cache/
|
| 153 |
+
# .dmypy.json
|
| 154 |
+
# dmypy.json
|
| 155 |
+
|
| 156 |
+
# # Pyre type checker
|
| 157 |
+
# .pyre/
|
| 158 |
+
|
| 159 |
+
# # pytype static type analyzer
|
| 160 |
+
# .pytype/
|
| 161 |
+
|
| 162 |
+
# # Cython debug symbols
|
| 163 |
+
# cython_debug/
|
| 164 |
+
|
| 165 |
+
# # PyCharm
|
| 166 |
+
# # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 167 |
+
# # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 168 |
+
# # and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 169 |
+
# # option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 170 |
+
# #.idea/
|
| 171 |
+
|
third_party/AnyBimanual/LICENSE
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
| 202 |
+
Apache License
|
| 203 |
+
Version 2.0, January 2004
|
| 204 |
+
http://www.apache.org/licenses/
|
| 205 |
+
|
| 206 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 207 |
+
|
| 208 |
+
1. Definitions.
|
| 209 |
+
|
| 210 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 211 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 212 |
+
|
| 213 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 214 |
+
the copyright owner that is granting the License.
|
| 215 |
+
|
| 216 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 217 |
+
other entities that control, are controlled by, or are under common
|
| 218 |
+
control with that entity. For the purposes of this definition,
|
| 219 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 220 |
+
direction or management of such entity, whether by contract or
|
| 221 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 222 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 223 |
+
|
| 224 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 225 |
+
exercising permissions granted by this License.
|
| 226 |
+
|
| 227 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 228 |
+
including but not limited to software source code, documentation
|
| 229 |
+
source, and configuration files.
|
| 230 |
+
|
| 231 |
+
"Object" form shall mean any form resulting from mechanical
|
| 232 |
+
transformation or translation of a Source form, including but
|
| 233 |
+
not limited to compiled object code, generated documentation,
|
| 234 |
+
and conversions to other media types.
|
| 235 |
+
|
| 236 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 237 |
+
Object form, made available under the License, as indicated by a
|
| 238 |
+
copyright notice that is included in or attached to the work
|
| 239 |
+
(an example is provided in the Appendix below).
|
| 240 |
+
|
| 241 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 242 |
+
form, that is based on (or derived from) the Work and for which the
|
| 243 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 244 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 245 |
+
of this License, Derivative Works shall not include works that remain
|
| 246 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 247 |
+
the Work and Derivative Works thereof.
|
| 248 |
+
|
| 249 |
+
"Contribution" shall mean any work of authorship, including
|
| 250 |
+
the original version of the Work and any modifications or additions
|
| 251 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 252 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 253 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 254 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 255 |
+
means any form of electronic, verbal, or written communication sent
|
| 256 |
+
to the Licensor or its representatives, including but not limited to
|
| 257 |
+
communication on electronic mailing lists, source code control systems,
|
| 258 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 259 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 260 |
+
excluding communication that is conspicuously marked or otherwise
|
| 261 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 262 |
+
|
| 263 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 264 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 265 |
+
subsequently incorporated within the Work.
|
| 266 |
+
|
| 267 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 268 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 269 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 270 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 271 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 272 |
+
Work and such Derivative Works in Source or Object form.
|
| 273 |
+
|
| 274 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 275 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 276 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 277 |
+
(except as stated in this section) patent license to make, have made,
|
| 278 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 279 |
+
where such license applies only to those patent claims licensable
|
| 280 |
+
by such Contributor that are necessarily infringed by their
|
| 281 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 282 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 283 |
+
institute patent litigation against any entity (including a
|
| 284 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 285 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 286 |
+
or contributory patent infringement, then any patent licenses
|
| 287 |
+
granted to You under this License for that Work shall terminate
|
| 288 |
+
as of the date such litigation is filed.
|
| 289 |
+
|
| 290 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 291 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 292 |
+
modifications, and in Source or Object form, provided that You
|
| 293 |
+
meet the following conditions:
|
| 294 |
+
|
| 295 |
+
(a) You must give any other recipients of the Work or
|
| 296 |
+
Derivative Works a copy of this License; and
|
| 297 |
+
|
| 298 |
+
(b) You must cause any modified files to carry prominent notices
|
| 299 |
+
stating that You changed the files; and
|
| 300 |
+
|
| 301 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 302 |
+
that You distribute, all copyright, patent, trademark, and
|
| 303 |
+
attribution notices from the Source form of the Work,
|
| 304 |
+
excluding those notices that do not pertain to any part of
|
| 305 |
+
the Derivative Works; and
|
| 306 |
+
|
| 307 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 308 |
+
distribution, then any Derivative Works that You distribute must
|
| 309 |
+
include a readable copy of the attribution notices contained
|
| 310 |
+
within such NOTICE file, excluding those notices that do not
|
| 311 |
+
pertain to any part of the Derivative Works, in at least one
|
| 312 |
+
of the following places: within a NOTICE text file distributed
|
| 313 |
+
as part of the Derivative Works; within the Source form or
|
| 314 |
+
documentation, if provided along with the Derivative Works; or,
|
| 315 |
+
within a display generated by the Derivative Works, if and
|
| 316 |
+
wherever such third-party notices normally appear. The contents
|
| 317 |
+
of the NOTICE file are for informational purposes only and
|
| 318 |
+
do not modify the License. You may add Your own attribution
|
| 319 |
+
notices within Derivative Works that You distribute, alongside
|
| 320 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 321 |
+
that such additional attribution notices cannot be construed
|
| 322 |
+
as modifying the License.
|
| 323 |
+
|
| 324 |
+
You may add Your own copyright statement to Your modifications and
|
| 325 |
+
may provide additional or different license terms and conditions
|
| 326 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 327 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 328 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 329 |
+
the conditions stated in this License.
|
| 330 |
+
|
| 331 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 332 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 333 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 334 |
+
this License, without any additional terms or conditions.
|
| 335 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 336 |
+
the terms of any separate license agreement you may have executed
|
| 337 |
+
with Licensor regarding such Contributions.
|
| 338 |
+
|
| 339 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 340 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 341 |
+
except as required for reasonable and customary use in describing the
|
| 342 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 343 |
+
|
| 344 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 345 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 346 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 347 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 348 |
+
implied, including, without limitation, any warranties or conditions
|
| 349 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 350 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 351 |
+
appropriateness of using or redistributing the Work and assume any
|
| 352 |
+
risks associated with Your exercise of permissions under this License.
|
| 353 |
+
|
| 354 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 355 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 356 |
+
unless required by applicable law (such as deliberate and grossly
|
| 357 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 358 |
+
liable to You for damages, including any direct, indirect, special,
|
| 359 |
+
incidental, or consequential damages of any character arising as a
|
| 360 |
+
result of this License or out of the use or inability to use the
|
| 361 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 362 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 363 |
+
other commercial damages or losses), even if such Contributor
|
| 364 |
+
has been advised of the possibility of such damages.
|
| 365 |
+
|
| 366 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 367 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 368 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 369 |
+
or other liability obligations and/or rights consistent with this
|
| 370 |
+
License. However, in accepting such obligations, You may act only
|
| 371 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 372 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 373 |
+
defend, and hold each Contributor harmless for any liability
|
| 374 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 375 |
+
of your accepting any such warranty or additional liability.
|
| 376 |
+
|
| 377 |
+
END OF TERMS AND CONDITIONS
|
| 378 |
+
|
| 379 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 380 |
+
|
| 381 |
+
To apply the Apache License to your work, attach the following
|
| 382 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 383 |
+
replaced with your own identifying information. (Don't include
|
| 384 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 385 |
+
comment syntax for the file format. We also recommend that a
|
| 386 |
+
file or class name and description of purpose be included on the
|
| 387 |
+
same "printed page" as the copyright notice for easier
|
| 388 |
+
identification within third-party archives.
|
| 389 |
+
|
| 390 |
+
Copyright [yyyy] [name of copyright owner]
|
| 391 |
+
|
| 392 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 393 |
+
you may not use this file except in compliance with the License.
|
| 394 |
+
You may obtain a copy of the License at
|
| 395 |
+
|
| 396 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 397 |
+
|
| 398 |
+
Unless required by applicable law or agreed to in writing, software
|
| 399 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 400 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 401 |
+
See the License for the specific language governing permissions and
|
| 402 |
+
limitations under the License.
|
third_party/AnyBimanual/README.md
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!-- <p align="center">
|
| 2 |
+
<img src="docs/logo.png" alt="AnyBimanual Logo">
|
| 3 |
+
</p> -->
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
<div align="center">
|
| 8 |
+
<img src="docs/logo.png" alt="AnyBimanual Logo" width="200">
|
| 9 |
+
|
| 10 |
+
# AnyBimanual: Transferring Unimanual Policy for General Bimanual Manipulation
|
| 11 |
+
|
| 12 |
+
[](https://github.com/pre-commit/pre-commit)
|
| 13 |
+
[](https://pytorch.org/get-started/locally/)
|
| 14 |
+
[](https://wandb.ai/site/)
|
| 15 |
+
[](https://hydra.cc/)
|
| 16 |
+
[](https://github.com/ashleve/lightning-hydra-template#license)
|
| 17 |
+
|
| 18 |
+
[Guanxing Lu <sup>*</sup>](https://guanxinglu.github.io/), [Tengbo Yu <sup>*</sup>](https://github.com/Tengbo-Yu), [Haoyuan Deng](https://github.com/Denghaoyuan123?tab=repositories), [Season Si Chen](https://www.sigs.tsinghua.edu.cn/Chensi_en/main.htm), [Yansong Tang <sup>†</sup>](https://andytang15.github.io/), [Ziwei Wang](https://ziweiwangthu.github.io/)
|
| 19 |
+
|
| 20 |
+
**[[Project Page](https://anybimanual.github.io/)] | [[Paper](https://arxiv.org/pdf/2412.06779)] | [[Real-World Codebase](https://github.com/Denghaoyuan123/Bimanual_ur5e_joystick_control)] | [[YouTube](https://www.youtube.com/watch?v=RFwLgtzrXuM)] | [[X](#)]**
|
| 21 |
+
</div>
|
| 22 |
+
<!--  -->
|
| 23 |
+
|
| 24 |
+
**AnyBimanual** is a training framework to transfer any pretrained unimanual robotic manipulation policy to multi-task bimanual manipulation policy with few bimanual demonstrations. We first introduce a **skill manager** to dynamically schedule the skill representations discovered from pretrained unimanual policy for bimanual manipulation tasks, which linearly combines skill primitives with task-oriented compensation to represent the bimanual manipulation instruction. To mitigate the observation discrepancy between unimanual and bimanual systems, we present a **visual aligner** to generate soft masks for visual embedding of the workspace, which aims to align visual input of unimanual policy model for each arm with those during pretraining stage. AnyBimanual shows superiority on **12** simulated tasks from **RLBench2** with a sizable **12.67\%** improvement in success rate over previous methods. Experiments on **9** real-world tasks further verify its practicality with an average success rate of **84.62\%**.
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
https://github.com/user-attachments/assets/2cd23178-fde9-4af9-89c4-7cd703c88156
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
🎉 **NEWS**:
|
| 31 |
+
|
| 32 |
+
- *Jun. 2025:* Our paper is accepted by ICCV2025!
|
| 33 |
+
- *Dec. 2024:* Codebase for both simulated and real-world experiments is released!
|
| 34 |
+
|
| 35 |
+
# 📝 TODO
|
| 36 |
+
- [x] Release pretrained checkpoints.
|
| 37 |
+
|
| 38 |
+
# 💻 Installation
|
| 39 |
+
|
| 40 |
+
**NOTE**: AnyBimanual is mainly built upon the [Perceiver-Actor^2](https://github.com/markusgrotz/peract_bimanual) repo by Markus Grotz et al.
|
| 41 |
+
|
| 42 |
+
See [INSTALL.md](docs/INSTALLATION.md) for installation instructions.
|
| 43 |
+
|
| 44 |
+
See [ERROR_CATCH.md](docs/ERROR_CATCH.md) for error catching.
|
| 45 |
+
|
| 46 |
+
# 🛠️ Usage
|
| 47 |
+
|
| 48 |
+
The following steps are structured in order.
|
| 49 |
+
|
| 50 |
+
## 🗃️ Generate Demonstrations
|
| 51 |
+
|
| 52 |
+
Please checkout the website for [pre-generated RLBench
|
| 53 |
+
demonstrations](https://bimanual.github.io). If you directly use these
|
| 54 |
+
datasets, you don't need to run `tools/bimanual_data_generator.py` from
|
| 55 |
+
RLBench. Using these datasets will also help reproducibility since each scene
|
| 56 |
+
is randomly sampled in `data_generator_bimanual.py`.
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
## 🚆 Training
|
| 60 |
+
We use wandb to log some curves and visualizations. Login to wandb before running the scripts.
|
| 61 |
+
```bash
|
| 62 |
+
wandb login
|
| 63 |
+
```
|
| 64 |
+
To train our PerAct + AnyBimanual, run:
|
| 65 |
+
```bash
|
| 66 |
+
bash scripts/train.sh BIMANUAL_PERACT 0,1 12345 ${exp_name}
|
| 67 |
+
```
|
| 68 |
+
where the `exp_name` can be specified as you like.
|
| 69 |
+
|
| 70 |
+
To train our PerAct-LF + AnyBimanual, run:
|
| 71 |
+
```bash
|
| 72 |
+
bash scripts/train.sh PERACT_BC 0,1 12345 ${exp_name}
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
To train our RVT-LF + AnyBimanual, run:
|
| 76 |
+
```bash
|
| 77 |
+
bash scripts/train.sh RVT 0,1 12345 ${exp_name}
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
Set the `augmentation_type` in the `scripts/train.sh` to choose whether to apply the augmentation methods mentioned in our paper or to use the original SE3 augmentation.
|
| 81 |
+
|
| 82 |
+
## 🔬 Evaluation
|
| 83 |
+
To evaluate the checkpoint in simulator, you can use:
|
| 84 |
+
```bash
|
| 85 |
+
bash scripts/eval.sh BIMANUAL_PERACT 0 ${exp_name}
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
# 🦾 Real Robot
|
| 89 |
+
|
| 90 |
+
### 🎮 Prepare data in real world
|
| 91 |
+
|
| 92 |
+
You can refer to [Demonstrations Collection by teleoperation](https://github.com/Denghaoyuan123/Bimanual_ur5e_joystick_control) to set up your device in the real world and collect **raw data**.
|
| 93 |
+
|
| 94 |
+
Convert **raw data** into [**RLbench2**](https://github.com/markusgrotz/peract_bimanual) form, run:
|
| 95 |
+
```bash
|
| 96 |
+
python3 anybimanual_real_supply/data/preprocess_ntu_dualarm.py
|
| 97 |
+
```
|
| 98 |
+
Keyframes selection, run:
|
| 99 |
+
```bash
|
| 100 |
+
python3 anybimanual_real_supply/data/auto_keyframe_mani.py
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
### 🎯 Finetune
|
| 104 |
+
```bash
|
| 105 |
+
bash scripts/train_real.sh BIMANUAL_PERACT 0,1 12345 ${exp_name}
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
### 🕹️ Evaluation on real robot
|
| 109 |
+
Run model inference scripts to receive real-world observation to generate actions, here we give an example of the Agent Class.
|
| 110 |
+
```bash
|
| 111 |
+
python3 anybimanual_real_supply/eval_agent_on_robot.py
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
After receiving the action generated by the model, you can refer to [Bimanual_ur5e_action_control_for_IL](https://github.com/Denghaoyuan123/Bimanual_ur5e_action_control_for_IL) to drive dual_UR5e to perform the action.
|
| 115 |
+
|
| 116 |
+
# 📍 Checkpoints
|
| 117 |
+
Release the [checkpoints](https://cloud.tsinghua.edu.cn/d/dd54f729a902464cb2d1/).
|
| 118 |
+
|
| 119 |
+
# 🏷️ License
|
| 120 |
+
This repository is released under the MIT license.
|
| 121 |
+
|
| 122 |
+
# 🙏 Acknowledgement
|
| 123 |
+
|
| 124 |
+
Our code is built upon [Perceiver-Actor^2](https://github.com/markusgrotz/peract_bimanual), [SkillDiffuser](https://github.com/Liang-ZX/skilldiffuser), [PerAct](https://github.com/peract/peract), [RLBench](https://github.com/stepjam/RLBench), and [CLIP](https://github.com/openai/CLIP). We thank all these authors for their nicely open sourced code and their great contributions to the community.
|
| 125 |
+
|
| 126 |
+
# 🔗 Citation
|
| 127 |
+
If you find this repository helpful, please consider citing:
|
| 128 |
+
|
| 129 |
+
```
|
| 130 |
+
@article{lu2024anybimanual,
|
| 131 |
+
title={AnyBimanual: Transferring Unimanual Policy for General Bimanual Manipulation},
|
| 132 |
+
author={Lu, Guanxing and Yu, Tengbo and Deng, Haoyuan and Chen, Season Si and Tang, Yansong and Wang, Ziwei},
|
| 133 |
+
journal={arXiv preprint arXiv:2412.06779},
|
| 134 |
+
year={2024}
|
| 135 |
+
}
|
| 136 |
+
```
|
third_party/AnyBimanual/eval.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
import peract_config
|
| 7 |
+
|
| 8 |
+
import hydra
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import pandas as pd
|
| 12 |
+
from omegaconf import DictConfig, OmegaConf, ListConfig
|
| 13 |
+
from rlbench.action_modes.action_mode import BimanualMoveArmThenGripper
|
| 14 |
+
from rlbench.action_modes.action_mode import BimanualJointPositionActionMode
|
| 15 |
+
from rlbench.action_modes.arm_action_modes import BimanualEndEffectorPoseViaPlanning
|
| 16 |
+
from rlbench.action_modes.arm_action_modes import BimanualJointPosition, JointPosition
|
| 17 |
+
from rlbench.action_modes.gripper_action_modes import BimanualDiscrete
|
| 18 |
+
from rlbench.action_modes.action_mode import MoveArmThenGripper
|
| 19 |
+
from rlbench.action_modes.arm_action_modes import EndEffectorPoseViaPlanning
|
| 20 |
+
from rlbench.action_modes.gripper_action_modes import Discrete
|
| 21 |
+
|
| 22 |
+
from rlbench.backend import task as rlbench_task
|
| 23 |
+
from rlbench.backend.utils import task_file_to_task_class
|
| 24 |
+
from yarr.runners.independent_env_runner import IndependentEnvRunner
|
| 25 |
+
from yarr.utils.stat_accumulator import SimpleAccumulator
|
| 26 |
+
|
| 27 |
+
from helpers import utils
|
| 28 |
+
from helpers import observation_utils
|
| 29 |
+
|
| 30 |
+
from yarr.utils.rollout_generator import RolloutGenerator
|
| 31 |
+
import torch.multiprocessing as mp
|
| 32 |
+
|
| 33 |
+
from agents import agent_factory
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def eval_seed(
|
| 37 |
+
train_cfg, eval_cfg, logdir, env_device, multi_task, seed, env_config
|
| 38 |
+
) -> None:
|
| 39 |
+
tasks = eval_cfg.rlbench.tasks
|
| 40 |
+
rg = RolloutGenerator()
|
| 41 |
+
|
| 42 |
+
train_cfg.method.robot_name = eval_cfg.method.robot_name
|
| 43 |
+
|
| 44 |
+
agent = agent_factory.create_agent(train_cfg)
|
| 45 |
+
stat_accum = SimpleAccumulator(eval_video_fps=30)
|
| 46 |
+
|
| 47 |
+
cwd = os.getcwd()
|
| 48 |
+
weightsdir = os.path.join(logdir, "weights")
|
| 49 |
+
|
| 50 |
+
env_runner = IndependentEnvRunner(
|
| 51 |
+
train_env=None,
|
| 52 |
+
agent=agent,
|
| 53 |
+
train_replay_buffer=None,
|
| 54 |
+
num_train_envs=0,
|
| 55 |
+
num_eval_envs=eval_cfg.framework.eval_envs,
|
| 56 |
+
rollout_episodes=99999,
|
| 57 |
+
eval_episodes=eval_cfg.framework.eval_episodes,
|
| 58 |
+
training_iterations=train_cfg.framework.training_iterations,
|
| 59 |
+
eval_from_eps_number=eval_cfg.framework.eval_from_eps_number,
|
| 60 |
+
episode_length=eval_cfg.rlbench.episode_length,
|
| 61 |
+
stat_accumulator=stat_accum,
|
| 62 |
+
weightsdir=weightsdir,
|
| 63 |
+
logdir=logdir,
|
| 64 |
+
env_device=env_device,
|
| 65 |
+
rollout_generator=rg,
|
| 66 |
+
num_eval_runs=len(tasks),
|
| 67 |
+
multi_task=multi_task,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
env_runner._on_thread_start = peract_config.config_logging
|
| 71 |
+
|
| 72 |
+
manager = mp.Manager()
|
| 73 |
+
save_load_lock = manager.Lock()
|
| 74 |
+
writer_lock = manager.Lock()
|
| 75 |
+
|
| 76 |
+
# evaluate all checkpoints (0, 1000, ...) which don't have results, i.e. validation phase
|
| 77 |
+
if eval_cfg.framework.eval_type == "missing":
|
| 78 |
+
weight_folders = os.listdir(weightsdir)
|
| 79 |
+
weight_folders = sorted(map(int, weight_folders))
|
| 80 |
+
|
| 81 |
+
env_data_csv_file = os.path.join(logdir, "eval_data.csv")
|
| 82 |
+
if os.path.exists(env_data_csv_file):
|
| 83 |
+
env_dict = pd.read_csv(env_data_csv_file).to_dict()
|
| 84 |
+
evaluated_weights = sorted(map(int, list(env_dict["step"].values())))
|
| 85 |
+
weight_folders = [w for w in weight_folders if w not in evaluated_weights]
|
| 86 |
+
|
| 87 |
+
print("Missing weights: ", weight_folders)
|
| 88 |
+
|
| 89 |
+
# pick the best checkpoint from validation and evaluate, i.e. test phase
|
| 90 |
+
elif eval_cfg.framework.eval_type == "best":
|
| 91 |
+
env_data_csv_file = os.path.join(logdir, "eval_data.csv")
|
| 92 |
+
if os.path.exists(env_data_csv_file):
|
| 93 |
+
env_dict = pd.read_csv(env_data_csv_file).to_dict()
|
| 94 |
+
existing_weights = list(
|
| 95 |
+
map(int, sorted(os.listdir(os.path.join(logdir, "weights"))))
|
| 96 |
+
)
|
| 97 |
+
task_weights = {}
|
| 98 |
+
for task in tasks:
|
| 99 |
+
weights = list(env_dict["step"].values())
|
| 100 |
+
|
| 101 |
+
if len(tasks) > 1:
|
| 102 |
+
task_score = list(env_dict["eval_envs/return/%s" % task].values())
|
| 103 |
+
else:
|
| 104 |
+
task_score = list(env_dict["eval_envs/return"].values())
|
| 105 |
+
|
| 106 |
+
avail_weights, avail_task_scores = [], []
|
| 107 |
+
for step_idx, step in enumerate(weights):
|
| 108 |
+
if step in existing_weights:
|
| 109 |
+
avail_weights.append(step)
|
| 110 |
+
avail_task_scores.append(task_score[step_idx])
|
| 111 |
+
|
| 112 |
+
assert len(avail_weights) == len(avail_task_scores)
|
| 113 |
+
best_weight = avail_weights[
|
| 114 |
+
np.argwhere(avail_task_scores == np.amax(avail_task_scores))
|
| 115 |
+
.flatten()
|
| 116 |
+
.tolist()[-1]
|
| 117 |
+
]
|
| 118 |
+
task_weights[task] = best_weight
|
| 119 |
+
|
| 120 |
+
weight_folders = [task_weights]
|
| 121 |
+
print("Best weights:", weight_folders)
|
| 122 |
+
else:
|
| 123 |
+
raise Exception("No existing eval_data.csv file found in %s" % logdir)
|
| 124 |
+
|
| 125 |
+
# evaluate only the last checkpoint
|
| 126 |
+
elif eval_cfg.framework.eval_type == "last":
|
| 127 |
+
weight_folders = os.listdir(weightsdir)
|
| 128 |
+
weight_folders = sorted(map(int, weight_folders))
|
| 129 |
+
weight_folders = [weight_folders[-1]]
|
| 130 |
+
print("Last weight:", weight_folders)
|
| 131 |
+
|
| 132 |
+
elif eval_cfg.framework.eval_type == "all":
|
| 133 |
+
weight_folders = os.listdir(weightsdir)
|
| 134 |
+
weight_folders = sorted(map(int, weight_folders))
|
| 135 |
+
|
| 136 |
+
# evaluate a specific checkpoint
|
| 137 |
+
elif type(eval_cfg.framework.eval_type) == int:
|
| 138 |
+
weight_folders = [int(eval_cfg.framework.eval_type)]
|
| 139 |
+
print("Weight:", weight_folders)
|
| 140 |
+
|
| 141 |
+
elif isinstance(eval_cfg.framework.eval_type, ListConfig):
|
| 142 |
+
weight_folders = [int(w) for w in eval_cfg.framework.eval_type]
|
| 143 |
+
print("Checking specified checkpoints:", weight_folders)
|
| 144 |
+
|
| 145 |
+
else:
|
| 146 |
+
print(type(eval_cfg.framework.eval_type))
|
| 147 |
+
raise Exception("Unknown eval type")
|
| 148 |
+
|
| 149 |
+
if len(weight_folders) == 0:
|
| 150 |
+
logging.info(
|
| 151 |
+
"No weights to evaluate. Results are already available in eval_data.csv"
|
| 152 |
+
)
|
| 153 |
+
sys.exit(0)
|
| 154 |
+
# evaluate several checkpoints in parallel
|
| 155 |
+
# NOTE: in multi-task settings, each task is evaluated serially, which makes everything slow!
|
| 156 |
+
split_n = utils.split_list(weight_folders, eval_cfg.framework.eval_envs)
|
| 157 |
+
for split in split_n:
|
| 158 |
+
processes = []
|
| 159 |
+
for e_idx, weight in enumerate(split):
|
| 160 |
+
p = mp.Process(
|
| 161 |
+
target=env_runner.start,
|
| 162 |
+
args=(
|
| 163 |
+
weight,
|
| 164 |
+
save_load_lock,
|
| 165 |
+
writer_lock,
|
| 166 |
+
env_config,
|
| 167 |
+
e_idx % torch.cuda.device_count(),
|
| 168 |
+
eval_cfg.framework.eval_save_metrics,
|
| 169 |
+
eval_cfg.cinematic_recorder,
|
| 170 |
+
),
|
| 171 |
+
)
|
| 172 |
+
p.start()
|
| 173 |
+
processes.append(p)
|
| 174 |
+
for p in processes:
|
| 175 |
+
p.join()
|
| 176 |
+
|
| 177 |
+
del env_runner
|
| 178 |
+
del agent
|
| 179 |
+
gc.collect()
|
| 180 |
+
torch.cuda.empty_cache()
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 184 |
+
def main(eval_cfg: DictConfig) -> None:
|
| 185 |
+
logging.info("\n" + OmegaConf.to_yaml(eval_cfg))
|
| 186 |
+
|
| 187 |
+
start_seed = eval_cfg.framework.start_seed
|
| 188 |
+
logdir = os.path.join(
|
| 189 |
+
eval_cfg.framework.logdir,
|
| 190 |
+
eval_cfg.rlbench.task_name,
|
| 191 |
+
eval_cfg.method.name,
|
| 192 |
+
"seed%d" % start_seed,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
train_config_path = os.path.join(logdir, "config.yaml")
|
| 196 |
+
|
| 197 |
+
if os.path.exists(train_config_path):
|
| 198 |
+
with open(train_config_path, "r") as f:
|
| 199 |
+
train_cfg = OmegaConf.load(f)
|
| 200 |
+
else:
|
| 201 |
+
raise Exception(f"Missing seed{start_seed}/config.yaml. Logdir is {logdir}")
|
| 202 |
+
|
| 203 |
+
# sanity checks
|
| 204 |
+
assert(train_cfg.method.name == eval_cfg.method.name)
|
| 205 |
+
assert(train_cfg.method.agent_type == eval_cfg.method.agent_type)
|
| 206 |
+
for task in eval_cfg.rlbench.tasks:
|
| 207 |
+
assert(task in train_cfg.rlbench.tasks)
|
| 208 |
+
|
| 209 |
+
env_device = utils.get_device(eval_cfg.framework.gpu)
|
| 210 |
+
logging.info("Using env device %s." % str(env_device))
|
| 211 |
+
|
| 212 |
+
gripper_mode = eval(eval_cfg.rlbench.gripper_mode)()
|
| 213 |
+
arm_action_mode = eval(eval_cfg.rlbench.arm_action_mode)()
|
| 214 |
+
action_mode = eval(eval_cfg.rlbench.action_mode)(arm_action_mode, gripper_mode)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
is_bimanual = eval_cfg.method.robot_name == "bimanual"
|
| 218 |
+
|
| 219 |
+
if is_bimanual:
|
| 220 |
+
# TODO: automate instantiation with eval
|
| 221 |
+
task_path = rlbench_task.BIMANUAL_TASKS_PATH
|
| 222 |
+
else:
|
| 223 |
+
task_path = rlbench_task.TASKS_PATH
|
| 224 |
+
|
| 225 |
+
task_files = [
|
| 226 |
+
t.replace(".py", "")
|
| 227 |
+
for t in os.listdir(task_path)
|
| 228 |
+
if t != "__init__.py" and t.endswith(".py")
|
| 229 |
+
]
|
| 230 |
+
eval_cfg.rlbench.cameras = (
|
| 231 |
+
eval_cfg.rlbench.cameras
|
| 232 |
+
if isinstance(eval_cfg.rlbench.cameras, ListConfig)
|
| 233 |
+
else [eval_cfg.rlbench.cameras]
|
| 234 |
+
)
|
| 235 |
+
obs_config = observation_utils.create_obs_config(
|
| 236 |
+
eval_cfg.rlbench.cameras,
|
| 237 |
+
eval_cfg.rlbench.camera_resolution,
|
| 238 |
+
eval_cfg.method.name,
|
| 239 |
+
eval_cfg.method.robot_name
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
if eval_cfg.cinematic_recorder.enabled:
|
| 243 |
+
obs_config.record_gripper_closing = True
|
| 244 |
+
|
| 245 |
+
multi_task = len(eval_cfg.rlbench.tasks) > 1
|
| 246 |
+
|
| 247 |
+
tasks = eval_cfg.rlbench.tasks
|
| 248 |
+
task_classes = []
|
| 249 |
+
for task in tasks:
|
| 250 |
+
if task not in task_files:
|
| 251 |
+
raise ValueError('Task %s not recognised!.' % task)
|
| 252 |
+
task_classes.append(task_file_to_task_class(task, is_bimanual))
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# single-task or multi-task
|
| 256 |
+
if multi_task:
|
| 257 |
+
env_config = (
|
| 258 |
+
task_classes,
|
| 259 |
+
obs_config,
|
| 260 |
+
action_mode,
|
| 261 |
+
eval_cfg.rlbench.demo_path,
|
| 262 |
+
eval_cfg.rlbench.episode_length,
|
| 263 |
+
eval_cfg.rlbench.headless,
|
| 264 |
+
eval_cfg.framework.eval_episodes,
|
| 265 |
+
train_cfg.rlbench.include_lang_goal_in_obs,
|
| 266 |
+
eval_cfg.rlbench.time_in_state,
|
| 267 |
+
eval_cfg.framework.record_every_n,
|
| 268 |
+
)
|
| 269 |
+
else:
|
| 270 |
+
env_config = (
|
| 271 |
+
task_classes[0],
|
| 272 |
+
obs_config,
|
| 273 |
+
action_mode,
|
| 274 |
+
eval_cfg.rlbench.demo_path,
|
| 275 |
+
eval_cfg.rlbench.episode_length,
|
| 276 |
+
eval_cfg.rlbench.headless,
|
| 277 |
+
train_cfg.rlbench.include_lang_goal_in_obs,
|
| 278 |
+
eval_cfg.rlbench.time_in_state,
|
| 279 |
+
eval_cfg.framework.record_every_n,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
logging.info("Evaluating seed %d." % start_seed)
|
| 283 |
+
eval_seed(
|
| 284 |
+
train_cfg,
|
| 285 |
+
eval_cfg,
|
| 286 |
+
logdir,
|
| 287 |
+
env_device,
|
| 288 |
+
multi_task,
|
| 289 |
+
start_seed,
|
| 290 |
+
env_config,
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
if __name__ == "__main__":
|
| 295 |
+
peract_config.on_init()
|
| 296 |
+
main()
|
third_party/AnyBimanual/peract_config.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
System configuration for peract
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
import torch.multiprocessing as mp
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def config_logging(logging_level=logging.INFO, reset=False):
|
| 11 |
+
|
| 12 |
+
if reset:
|
| 13 |
+
root = logging.getLogger()
|
| 14 |
+
list(map(root.removeHandler, root.handlers))
|
| 15 |
+
list(map(root.removeFilter, root.filters))
|
| 16 |
+
|
| 17 |
+
from rich.logging import RichHandler
|
| 18 |
+
logging.basicConfig(level=logging_level, handlers=[RichHandler()])
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def on_init():
|
| 22 |
+
|
| 23 |
+
config_logging(logging.INFO)
|
| 24 |
+
|
| 25 |
+
logging.debug("Configuring environment.")
|
| 26 |
+
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
| 27 |
+
mp.set_start_method("spawn", force=True)
|
| 28 |
+
mp.set_sharing_strategy("file_system")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def on_config(cfg):
|
| 32 |
+
|
| 33 |
+
os.environ["MASTER_ADDR"] = str(cfg.ddp.master_addr)
|
| 34 |
+
os.environ["MASTER_PORT"] = str(cfg.ddp.master_port)
|
third_party/AnyBimanual/pyproject.toml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "peract_bimanual"
|
| 3 |
+
version = "0.0.1"
|
| 4 |
+
description = "A perceiver actor framework for bimanual manipulation tasks"
|
| 5 |
+
authors = [ "Markus Grotz <grotz@uw.edu>",
|
| 6 |
+
"Mohit Shridhar <mshr@cs.washington.edu>"]
|
| 7 |
+
packages = [{include = "agents"}, {include = "helpers"}, {include = "voxel"}]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
readme = "README.md"
|
| 11 |
+
classifiers = [
|
| 12 |
+
"Programming Language :: Python :: 3",
|
| 13 |
+
"Framework :: Robot Framework "
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
[tool.poetry.dependencies]
|
| 17 |
+
python = ">=3.8,<4.0"
|
| 18 |
+
einops = "0.3.2"
|
| 19 |
+
ftfy = "^6.1.1"
|
| 20 |
+
hydra-core = "1.0.5"
|
| 21 |
+
matplotlib = "^3.7.1"
|
| 22 |
+
pandas = "1.4.1"
|
| 23 |
+
regex = "^2023.6.3"
|
| 24 |
+
tensorboard = "^2.13.0"
|
| 25 |
+
perceiver-pytorch = "^0.8.7"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
[tool.poetry.extras]
|
| 29 |
+
docs = ["sphinx"]
|
| 30 |
+
|
| 31 |
+
[build-system]
|
| 32 |
+
requires = ["setuptools", "wheel", "poetry-core>=1.0.0"]
|
| 33 |
+
build-backend = "poetry.core.masonry.api"
|
third_party/AnyBimanual/run_seed_fn.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pickle
|
| 3 |
+
import gc
|
| 4 |
+
from typing import List
|
| 5 |
+
import filecmp
|
| 6 |
+
import hydra
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from omegaconf import DictConfig
|
| 10 |
+
|
| 11 |
+
from rlbench import CameraConfig, ObservationConfig
|
| 12 |
+
from yarr.replay_buffer.wrappers.pytorch_replay_buffer import PyTorchReplayBuffer
|
| 13 |
+
from yarr.runners.offline_train_runner import OfflineTrainRunner
|
| 14 |
+
from yarr.utils.stat_accumulator import SimpleAccumulator
|
| 15 |
+
from yarr.replay_buffer.task_uniform_replay_buffer import TaskUniformReplayBuffer
|
| 16 |
+
|
| 17 |
+
from helpers.custom_rlbench_env import CustomRLBenchEnv, CustomMultiTaskRLBenchEnv
|
| 18 |
+
import torch.distributed as dist
|
| 19 |
+
from torch.utils.data import DataLoader, default_collate
|
| 20 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 21 |
+
import random
|
| 22 |
+
from agents import agent_factory
|
| 23 |
+
from agents import replay_utils
|
| 24 |
+
from typing import Tuple, Optional
|
| 25 |
+
import peract_config
|
| 26 |
+
from functools import partial
|
| 27 |
+
import copy
|
| 28 |
+
from tqdm import tqdm
|
| 29 |
+
def run_seed(
|
| 30 |
+
rank,
|
| 31 |
+
cfg: DictConfig,
|
| 32 |
+
obs_config: ObservationConfig,
|
| 33 |
+
seed,
|
| 34 |
+
world_size,
|
| 35 |
+
) -> None:
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
peract_config.config_logging()
|
| 39 |
+
|
| 40 |
+
dist.init_process_group("gloo", rank=rank, world_size=world_size)
|
| 41 |
+
|
| 42 |
+
tasks = cfg.rlbench.tasks
|
| 43 |
+
cams = cfg.rlbench.cameras
|
| 44 |
+
|
| 45 |
+
# task_folder = "debug" if len(tasks) > 1 else tasks[0]
|
| 46 |
+
task_folder = cfg.replay.task_folder if len(tasks) > 1 else tasks[0]
|
| 47 |
+
# task_folder = cfg.rlbench.task_name
|
| 48 |
+
# replay_path = os.path.join(
|
| 49 |
+
# cfg.replay.path, task_folder, cfg.method.name, "seed%d" % seed
|
| 50 |
+
# )
|
| 51 |
+
replay_path = os.path.join(
|
| 52 |
+
cfg.replay.path, task_folder
|
| 53 |
+
)
|
| 54 |
+
# to do create agent
|
| 55 |
+
agent = agent_factory.create_agent(cfg)
|
| 56 |
+
|
| 57 |
+
if not agent:
|
| 58 |
+
print("Unable to create agent")
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
if cfg.method.name == "ARM":
|
| 62 |
+
raise NotImplementedError("ARM is not supported yet")
|
| 63 |
+
elif cfg.method.name == "BC_LANG":
|
| 64 |
+
from agents.baselines import bc_lang
|
| 65 |
+
|
| 66 |
+
assert cfg.ddp.num_devices == 1, "BC_LANG only supports single GPU training"
|
| 67 |
+
replay_buffer = bc_lang.launch_utils.create_replay(
|
| 68 |
+
cfg.replay.batch_size,
|
| 69 |
+
cfg.replay.timesteps,
|
| 70 |
+
cfg.replay.prioritisation,
|
| 71 |
+
cfg.replay.task_uniform,
|
| 72 |
+
replay_path if cfg.replay.use_disk else None,
|
| 73 |
+
cams,
|
| 74 |
+
cfg.rlbench.camera_resolution,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
bc_lang.launch_utils.fill_multi_task_replay(
|
| 78 |
+
cfg,
|
| 79 |
+
obs_config,
|
| 80 |
+
rank,
|
| 81 |
+
replay_buffer,
|
| 82 |
+
tasks,
|
| 83 |
+
cfg.rlbench.demos,
|
| 84 |
+
cfg.method.demo_augmentation,
|
| 85 |
+
cfg.method.demo_augmentation_every_n,
|
| 86 |
+
cams,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
elif cfg.method.name == "VIT_BC_LANG":
|
| 91 |
+
from agents.baselines import vit_bc_lang
|
| 92 |
+
|
| 93 |
+
assert cfg.ddp.num_devices == 1, "VIT_BC_LANG only supports single GPU training"
|
| 94 |
+
replay_buffer = vit_bc_lang.launch_utils.create_replay(
|
| 95 |
+
cfg.replay.batch_size,
|
| 96 |
+
cfg.replay.timesteps,
|
| 97 |
+
cfg.replay.prioritisation,
|
| 98 |
+
cfg.replay.task_uniform,
|
| 99 |
+
replay_path if cfg.replay.use_disk else None,
|
| 100 |
+
cams,
|
| 101 |
+
cfg.rlbench.camera_resolution,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
vit_bc_lang.launch_utils.fill_multi_task_replay(
|
| 105 |
+
cfg,
|
| 106 |
+
obs_config,
|
| 107 |
+
rank,
|
| 108 |
+
replay_buffer,
|
| 109 |
+
tasks,
|
| 110 |
+
cfg.rlbench.demos,
|
| 111 |
+
cfg.method.demo_augmentation,
|
| 112 |
+
cfg.method.demo_augmentation_every_n,
|
| 113 |
+
cams,
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
elif cfg.method.name.startswith("ACT_BC_LANG"):
|
| 117 |
+
from agents import act_bc_lang
|
| 118 |
+
|
| 119 |
+
assert cfg.ddp.num_devices == 1, "ACT_BC_LANG only supports single GPU training"
|
| 120 |
+
replay_buffer = act_bc_lang.launch_utils.create_replay(
|
| 121 |
+
cfg.replay.batch_size,
|
| 122 |
+
cfg.replay.timesteps,
|
| 123 |
+
cfg.replay.prioritisation,
|
| 124 |
+
cfg.replay.task_uniform,
|
| 125 |
+
replay_path if cfg.replay.use_disk else None,
|
| 126 |
+
cams,
|
| 127 |
+
cfg.rlbench.camera_resolution,
|
| 128 |
+
replay_size=3e5,
|
| 129 |
+
prev_action_horizon=cfg.method.prev_action_horizon,
|
| 130 |
+
next_action_horizon=cfg.method.next_action_horizon
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
act_bc_lang.launch_utils.fill_multi_task_replay(
|
| 134 |
+
cfg,
|
| 135 |
+
obs_config,
|
| 136 |
+
rank,
|
| 137 |
+
replay_buffer,
|
| 138 |
+
tasks,
|
| 139 |
+
cfg.rlbench.demos,
|
| 140 |
+
cfg.method.demo_augmentation,
|
| 141 |
+
cfg.method.demo_augmentation_every_n,
|
| 142 |
+
cams,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
elif cfg.method.name == "C2FARM_LINGUNET_BC":
|
| 146 |
+
from agents import c2farm_lingunet_bc
|
| 147 |
+
|
| 148 |
+
replay_buffer = c2farm_lingunet_bc.launch_utils.create_replay(
|
| 149 |
+
cfg.replay.batch_size,
|
| 150 |
+
cfg.replay.timesteps,
|
| 151 |
+
cfg.replay.prioritisation,
|
| 152 |
+
cfg.replay.task_uniform,
|
| 153 |
+
replay_path if cfg.replay.use_disk else None,
|
| 154 |
+
cams,
|
| 155 |
+
cfg.method.voxel_sizes,
|
| 156 |
+
cfg.rlbench.camera_resolution,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
c2farm_lingunet_bc.launch_utils.fill_multi_task_replay(
|
| 160 |
+
cfg,
|
| 161 |
+
obs_config,
|
| 162 |
+
rank,
|
| 163 |
+
replay_buffer,
|
| 164 |
+
tasks,
|
| 165 |
+
cfg.rlbench.demos,
|
| 166 |
+
cfg.method.demo_augmentation,
|
| 167 |
+
cfg.method.demo_augmentation_every_n,
|
| 168 |
+
cams,
|
| 169 |
+
cfg.rlbench.scene_bounds,
|
| 170 |
+
cfg.method.voxel_sizes,
|
| 171 |
+
cfg.method.bounds_offset,
|
| 172 |
+
cfg.method.rotation_resolution,
|
| 173 |
+
cfg.method.crop_augmentation,
|
| 174 |
+
keypoint_method=cfg.method.keypoint_method,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
elif cfg.method.name.startswith("BIMANUAL_PERACT") or cfg.method.name.startswith("RVT") or cfg.method.name.startswith("PERACT_BC") or cfg.method.name.startswith("DIFFUSER_ACTOR") or cfg.method.name.startswith("ANY_BIMANUAL"):
|
| 179 |
+
print(replay_path)
|
| 180 |
+
if os.path.exists(replay_path) and os.listdir(replay_path):
|
| 181 |
+
print("Replay files found. Loading...")
|
| 182 |
+
replay_buffer = replay_utils.create_replay(cfg, replay_path)
|
| 183 |
+
replay_files = [os.path.join(replay_path, f) for f in os.listdir(replay_path) if f.endswith('.replay')]
|
| 184 |
+
for replay_file in tqdm(replay_files, desc="Processing files"):
|
| 185 |
+
with open(replay_file, 'rb') as f:
|
| 186 |
+
try:
|
| 187 |
+
replay_data = pickle.load(f)
|
| 188 |
+
replay_buffer.load_add(replay_data)
|
| 189 |
+
except pickle.UnpicklingError as e:
|
| 190 |
+
print(f"Error unpickling file {replay_file}: {e}")
|
| 191 |
+
else:
|
| 192 |
+
print("No replay files found. Creating replay...")
|
| 193 |
+
replay_buffer = replay_utils.create_replay(cfg, replay_path)
|
| 194 |
+
replay_utils.fill_multi_task_replay(
|
| 195 |
+
cfg,
|
| 196 |
+
obs_config,
|
| 197 |
+
rank,
|
| 198 |
+
replay_buffer,
|
| 199 |
+
tasks
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
elif cfg.method.name == "PERACT_RL":
|
| 203 |
+
raise NotImplementedError("PERACT_RL is not supported yet")
|
| 204 |
+
else:
|
| 205 |
+
raise ValueError("Method %s does not exists." % cfg.method.name)
|
| 206 |
+
|
| 207 |
+
wrapped_replay = PyTorchReplayBuffer(
|
| 208 |
+
replay_buffer, num_workers=cfg.framework.num_workers
|
| 209 |
+
)
|
| 210 |
+
stat_accum = SimpleAccumulator(eval_video_fps=30)
|
| 211 |
+
|
| 212 |
+
cwd = os.getcwd()
|
| 213 |
+
weightsdir = os.path.join(cwd, "seed%d" % seed, "weights")
|
| 214 |
+
logdir = os.path.join(cwd, "seed%d" % seed)
|
| 215 |
+
|
| 216 |
+
train_runner = OfflineTrainRunner(
|
| 217 |
+
agent=agent,
|
| 218 |
+
wrapped_replay_buffer=wrapped_replay,
|
| 219 |
+
train_device=rank,
|
| 220 |
+
stat_accumulator=stat_accum,
|
| 221 |
+
iterations=cfg.framework.training_iterations,
|
| 222 |
+
logdir=logdir,
|
| 223 |
+
logging_level=cfg.framework.logging_level,
|
| 224 |
+
log_freq=cfg.framework.log_freq,
|
| 225 |
+
weightsdir=weightsdir,
|
| 226 |
+
num_weights_to_keep=cfg.framework.num_weights_to_keep,
|
| 227 |
+
save_freq=cfg.framework.save_freq,
|
| 228 |
+
tensorboard_logging=cfg.framework.tensorboard_logging,
|
| 229 |
+
csv_logging=cfg.framework.csv_logging,
|
| 230 |
+
load_existing_weights=cfg.framework.load_existing_weights,
|
| 231 |
+
rank=rank,
|
| 232 |
+
world_size=world_size,
|
| 233 |
+
cfg=cfg
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
train_runner._on_thread_start = partial(peract_config.config_logging, cfg.framework.logging_level)
|
| 237 |
+
|
| 238 |
+
train_runner.start()
|
| 239 |
+
|
| 240 |
+
del train_runner
|
| 241 |
+
del agent
|
| 242 |
+
gc.collect()
|
| 243 |
+
torch.cuda.empty_cache()
|
third_party/AnyBimanual/third_party/pytorch3d/.clang-format
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
AccessModifierOffset: -1
|
| 2 |
+
AlignAfterOpenBracket: AlwaysBreak
|
| 3 |
+
AlignConsecutiveAssignments: false
|
| 4 |
+
AlignConsecutiveDeclarations: false
|
| 5 |
+
AlignEscapedNewlinesLeft: true
|
| 6 |
+
AlignOperands: false
|
| 7 |
+
AlignTrailingComments: false
|
| 8 |
+
AllowAllParametersOfDeclarationOnNextLine: false
|
| 9 |
+
AllowShortBlocksOnASingleLine: false
|
| 10 |
+
AllowShortCaseLabelsOnASingleLine: false
|
| 11 |
+
AllowShortFunctionsOnASingleLine: Empty
|
| 12 |
+
AllowShortIfStatementsOnASingleLine: false
|
| 13 |
+
AllowShortLoopsOnASingleLine: false
|
| 14 |
+
AlwaysBreakAfterReturnType: None
|
| 15 |
+
AlwaysBreakBeforeMultilineStrings: true
|
| 16 |
+
AlwaysBreakTemplateDeclarations: true
|
| 17 |
+
BinPackArguments: false
|
| 18 |
+
BinPackParameters: false
|
| 19 |
+
BraceWrapping:
|
| 20 |
+
AfterClass: false
|
| 21 |
+
AfterControlStatement: false
|
| 22 |
+
AfterEnum: false
|
| 23 |
+
AfterFunction: false
|
| 24 |
+
AfterNamespace: false
|
| 25 |
+
AfterObjCDeclaration: false
|
| 26 |
+
AfterStruct: false
|
| 27 |
+
AfterUnion: false
|
| 28 |
+
BeforeCatch: false
|
| 29 |
+
BeforeElse: false
|
| 30 |
+
IndentBraces: false
|
| 31 |
+
BreakBeforeBinaryOperators: None
|
| 32 |
+
BreakBeforeBraces: Attach
|
| 33 |
+
BreakBeforeTernaryOperators: true
|
| 34 |
+
BreakConstructorInitializersBeforeComma: false
|
| 35 |
+
BreakAfterJavaFieldAnnotations: false
|
| 36 |
+
BreakStringLiterals: false
|
| 37 |
+
ColumnLimit: 80
|
| 38 |
+
CommentPragmas: '^ IWYU pragma:'
|
| 39 |
+
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
| 40 |
+
ConstructorInitializerIndentWidth: 4
|
| 41 |
+
ContinuationIndentWidth: 4
|
| 42 |
+
Cpp11BracedListStyle: true
|
| 43 |
+
DerivePointerAlignment: false
|
| 44 |
+
DisableFormat: false
|
| 45 |
+
ForEachMacros: [ FOR_EACH, FOR_EACH_R, FOR_EACH_RANGE, ]
|
| 46 |
+
IncludeCategories:
|
| 47 |
+
- Regex: '^<.*\.h(pp)?>'
|
| 48 |
+
Priority: 1
|
| 49 |
+
- Regex: '^<.*'
|
| 50 |
+
Priority: 2
|
| 51 |
+
- Regex: '.*'
|
| 52 |
+
Priority: 3
|
| 53 |
+
IndentCaseLabels: true
|
| 54 |
+
IndentWidth: 2
|
| 55 |
+
IndentWrappedFunctionNames: false
|
| 56 |
+
KeepEmptyLinesAtTheStartOfBlocks: false
|
| 57 |
+
MacroBlockBegin: ''
|
| 58 |
+
MacroBlockEnd: ''
|
| 59 |
+
MaxEmptyLinesToKeep: 1
|
| 60 |
+
NamespaceIndentation: None
|
| 61 |
+
ObjCBlockIndentWidth: 2
|
| 62 |
+
ObjCSpaceAfterProperty: false
|
| 63 |
+
ObjCSpaceBeforeProtocolList: false
|
| 64 |
+
PenaltyBreakBeforeFirstCallParameter: 1
|
| 65 |
+
PenaltyBreakComment: 300
|
| 66 |
+
PenaltyBreakFirstLessLess: 120
|
| 67 |
+
PenaltyBreakString: 1000
|
| 68 |
+
PenaltyExcessCharacter: 1000000
|
| 69 |
+
PenaltyReturnTypeOnItsOwnLine: 200
|
| 70 |
+
PointerAlignment: Left
|
| 71 |
+
ReflowComments: true
|
| 72 |
+
SortIncludes: true
|
| 73 |
+
SpaceAfterCStyleCast: false
|
| 74 |
+
SpaceBeforeAssignmentOperators: true
|
| 75 |
+
SpaceBeforeParens: ControlStatements
|
| 76 |
+
SpaceInEmptyParentheses: false
|
| 77 |
+
SpacesBeforeTrailingComments: 1
|
| 78 |
+
SpacesInAngles: false
|
| 79 |
+
SpacesInContainerLiterals: true
|
| 80 |
+
SpacesInCStyleCastParentheses: false
|
| 81 |
+
SpacesInParentheses: false
|
| 82 |
+
SpacesInSquareBrackets: false
|
| 83 |
+
Standard: Cpp11
|
| 84 |
+
TabWidth: 8
|
| 85 |
+
UseTab: Never
|
third_party/AnyBimanual/third_party/pytorch3d/.flake8
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[flake8]
|
| 2 |
+
# B028 No explicit stacklevel argument found.
|
| 3 |
+
# B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
|
| 4 |
+
# B905 `zip()` without an explicit `strict=` parameter.
|
| 5 |
+
ignore = E203, E266, E501, W503, E221, B028, B905, B907
|
| 6 |
+
max-line-length = 88
|
| 7 |
+
max-complexity = 18
|
| 8 |
+
select = B,C,E,F,W,T4,B9
|
| 9 |
+
exclude = build,__init__.py
|
third_party/AnyBimanual/third_party/pytorch3d/.gitignore
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
build/
|
| 2 |
+
dist/
|
| 3 |
+
*.egg-info/
|
| 4 |
+
**/__pycache__/
|
| 5 |
+
*-checkpoint.ipynb
|
| 6 |
+
**/.ipynb_checkpoints
|
| 7 |
+
**/.ipynb_checkpoints/**
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Docusaurus site
|
| 11 |
+
website/yarn.lock
|
| 12 |
+
website/build/
|
| 13 |
+
website/i18n/
|
| 14 |
+
website/node_modules/*
|
| 15 |
+
website/npm-debug.log
|
| 16 |
+
|
| 17 |
+
## Generated for tutorials
|
| 18 |
+
website/_tutorials/
|
| 19 |
+
website/static/files/
|
| 20 |
+
website/pages/tutorials/*
|
| 21 |
+
!website/pages/tutorials/index.js
|
third_party/AnyBimanual/third_party/pytorch3d/INSTALL.md
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Installation
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Requirements
|
| 5 |
+
|
| 6 |
+
### Core library
|
| 7 |
+
|
| 8 |
+
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
| 9 |
+
|
| 10 |
+
- Linux or macOS or Windows
|
| 11 |
+
- Python 3.8, 3.9 or 3.10
|
| 12 |
+
- PyTorch 2.0.0, 2.0.1, 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0 or 2.3.1.
|
| 13 |
+
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
| 14 |
+
- gcc & g++ ≥ 4.9
|
| 15 |
+
- [ioPath](https://github.com/facebookresearch/iopath)
|
| 16 |
+
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
| 17 |
+
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
| 18 |
+
|
| 19 |
+
The runtime dependencies can be installed by running:
|
| 20 |
+
```
|
| 21 |
+
conda create -n pytorch3d python=3.9
|
| 22 |
+
conda activate pytorch3d
|
| 23 |
+
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
| 24 |
+
conda install -c iopath iopath
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
| 28 |
+
```
|
| 29 |
+
conda install -c bottler nvidiacub
|
| 30 |
+
```
|
| 31 |
+
Otherwise download the CUB library from https://github.com/NVIDIA/cub/releases and unpack it to a folder of your choice.
|
| 32 |
+
Define the environment variable CUB_HOME before building and point it to the directory that contains `CMakeLists.txt` for CUB.
|
| 33 |
+
For example on Linux/Mac,
|
| 34 |
+
```
|
| 35 |
+
curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
| 36 |
+
tar xzf 1.10.0.tar.gz
|
| 37 |
+
export CUB_HOME=$PWD/cub-1.10.0
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
### Tests/Linting and Demos
|
| 41 |
+
|
| 42 |
+
For developing on top of PyTorch3D or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` or the examples in `docs/examples` you will also need matplotlib and OpenCV.
|
| 43 |
+
- scikit-image
|
| 44 |
+
- black
|
| 45 |
+
- usort
|
| 46 |
+
- flake8
|
| 47 |
+
- matplotlib
|
| 48 |
+
- tdqm
|
| 49 |
+
- jupyter
|
| 50 |
+
- imageio
|
| 51 |
+
- fvcore
|
| 52 |
+
- plotly
|
| 53 |
+
- opencv-python
|
| 54 |
+
|
| 55 |
+
These can be installed by running:
|
| 56 |
+
```
|
| 57 |
+
# Demos and examples
|
| 58 |
+
conda install jupyter
|
| 59 |
+
pip install scikit-image matplotlib imageio plotly opencv-python
|
| 60 |
+
|
| 61 |
+
# Tests/Linting
|
| 62 |
+
conda install -c fvcore -c conda-forge fvcore
|
| 63 |
+
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
## Installing prebuilt binaries for PyTorch3D
|
| 67 |
+
After installing the above dependencies, run one of the following commands:
|
| 68 |
+
|
| 69 |
+
### 1. Install with CUDA support from Anaconda Cloud, on Linux only
|
| 70 |
+
|
| 71 |
+
```
|
| 72 |
+
# Anaconda Cloud
|
| 73 |
+
conda install pytorch3d -c pytorch3d
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
Or, to install a nightly (non-official, alpha) build:
|
| 77 |
+
```
|
| 78 |
+
# Anaconda Cloud
|
| 79 |
+
conda install pytorch3d -c pytorch3d-nightly
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
### 2. Install wheels for Linux
|
| 83 |
+
We have prebuilt wheels with CUDA for Linux for PyTorch 1.11.0, for each of the supported CUDA versions,
|
| 84 |
+
for Python 3.8 and 3.9. This is for ease of use on Google Colab.
|
| 85 |
+
These are installed in a special way.
|
| 86 |
+
For example, to install for Python 3.8, PyTorch 1.11.0 and CUDA 11.3
|
| 87 |
+
```
|
| 88 |
+
pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
In general, from inside IPython, or in Google Colab or a jupyter notebook, you can install with
|
| 92 |
+
```
|
| 93 |
+
import sys
|
| 94 |
+
import torch
|
| 95 |
+
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
| 96 |
+
version_str="".join([
|
| 97 |
+
f"py3{sys.version_info.minor}_cu",
|
| 98 |
+
torch.version.cuda.replace(".",""),
|
| 99 |
+
f"_pyt{pyt_version_str}"
|
| 100 |
+
])
|
| 101 |
+
!pip install iopath
|
| 102 |
+
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
## Building / installing from source.
|
| 106 |
+
CUDA support will be included if CUDA is available in pytorch or if the environment variable
|
| 107 |
+
`FORCE_CUDA` is set to `1`.
|
| 108 |
+
|
| 109 |
+
### 1. Install from GitHub
|
| 110 |
+
```
|
| 111 |
+
pip install "git+https://github.com/facebookresearch/pytorch3d.git"
|
| 112 |
+
```
|
| 113 |
+
To install using the code of the released version instead of from the main branch, use the following instead.
|
| 114 |
+
```
|
| 115 |
+
pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable"
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
For CUDA builds with versions earlier than CUDA 11, set `CUB_HOME` before building as described above.
|
| 119 |
+
|
| 120 |
+
**Install from Github on macOS:**
|
| 121 |
+
Some environment variables should be provided, like this.
|
| 122 |
+
```
|
| 123 |
+
MACOSX_DEPLOYMENT_TARGET=10.14 CC=clang CXX=clang++ pip install "git+https://github.com/facebookresearch/pytorch3d.git"
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
### 2. Install from a local clone
|
| 127 |
+
```
|
| 128 |
+
git clone https://github.com/facebookresearch/pytorch3d.git
|
| 129 |
+
cd pytorch3d && pip install -e .
|
| 130 |
+
```
|
| 131 |
+
To rebuild after installing from a local clone run, `rm -rf build/ **/*.so` then `pip install -e .`. You often need to rebuild pytorch3d after reinstalling PyTorch. For CUDA builds with versions earlier than CUDA 11, set `CUB_HOME` before building as described above.
|
| 132 |
+
|
| 133 |
+
**Install from local clone on macOS:**
|
| 134 |
+
```
|
| 135 |
+
MACOSX_DEPLOYMENT_TARGET=10.14 CC=clang CXX=clang++ pip install -e .
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
**Install from local clone on Windows:**
|
| 139 |
+
|
| 140 |
+
Depending on the version of PyTorch, changes to some PyTorch headers may be needed before compilation. These are often discussed in issues in this repository.
|
| 141 |
+
|
| 142 |
+
After any necessary patching, you can go to "x64 Native Tools Command Prompt for VS 2019" to compile and install
|
| 143 |
+
```
|
| 144 |
+
cd pytorch3d
|
| 145 |
+
python3 setup.py install
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
After installing, you can run **unit tests**
|
| 149 |
+
```
|
| 150 |
+
python3 -m unittest discover -v -s tests -t .
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
# FAQ
|
| 154 |
+
|
| 155 |
+
### Can I use Docker?
|
| 156 |
+
|
| 157 |
+
We don't provide a docker file but see [#113](https://github.com/facebookresearch/pytorch3d/issues/113) for a docker file shared by a user (NOTE: this has not been tested by the PyTorch3D team).
|
third_party/AnyBimanual/third_party/pytorch3d/LICENSE
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD License
|
| 2 |
+
|
| 3 |
+
For PyTorch3D software
|
| 4 |
+
|
| 5 |
+
Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
|
| 6 |
+
|
| 7 |
+
Redistribution and use in source and binary forms, with or without modification,
|
| 8 |
+
are permitted provided that the following conditions are met:
|
| 9 |
+
|
| 10 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 11 |
+
list of conditions and the following disclaimer.
|
| 12 |
+
|
| 13 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 14 |
+
this list of conditions and the following disclaimer in the documentation
|
| 15 |
+
and/or other materials provided with the distribution.
|
| 16 |
+
|
| 17 |
+
* Neither the name Meta nor the names of its contributors may be used to
|
| 18 |
+
endorse or promote products derived from this software without specific
|
| 19 |
+
prior written permission.
|
| 20 |
+
|
| 21 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 22 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 23 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 24 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
| 25 |
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 26 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 27 |
+
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
| 28 |
+
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 29 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 30 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
third_party/AnyBimanual/third_party/pytorch3d/LICENSE-3RD-PARTY
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
SRN license ( https://github.com/vsitzmann/scene-representation-networks/ ):
|
| 2 |
+
|
| 3 |
+
MIT License
|
| 4 |
+
|
| 5 |
+
Copyright (c) 2019 Vincent Sitzmann
|
| 6 |
+
|
| 7 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
in the Software without restriction, including without limitation the rights
|
| 10 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
furnished to do so, subject to the following conditions:
|
| 13 |
+
|
| 14 |
+
The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
copies or substantial portions of the Software.
|
| 16 |
+
|
| 17 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
SOFTWARE.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
IDR license ( github.com/lioryariv/idr ):
|
| 27 |
+
|
| 28 |
+
MIT License
|
| 29 |
+
|
| 30 |
+
Copyright (c) 2020 Lior Yariv
|
| 31 |
+
|
| 32 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 33 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 34 |
+
in the Software without restriction, including without limitation the rights
|
| 35 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 36 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 37 |
+
furnished to do so, subject to the following conditions:
|
| 38 |
+
|
| 39 |
+
The above copyright notice and this permission notice shall be included in all
|
| 40 |
+
copies or substantial portions of the Software.
|
| 41 |
+
|
| 42 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 43 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 44 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 45 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 46 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 47 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 48 |
+
SOFTWARE.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
NeRF https://github.com/bmild/nerf/
|
| 52 |
+
|
| 53 |
+
Copyright (c) 2020 bmild
|
| 54 |
+
|
| 55 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 56 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 57 |
+
in the Software without restriction, including without limitation the rights
|
| 58 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 59 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 60 |
+
furnished to do so, subject to the following conditions:
|
| 61 |
+
|
| 62 |
+
The above copyright notice and this permission notice shall be included in all
|
| 63 |
+
copies or substantial portions of the Software.
|
| 64 |
+
|
| 65 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 66 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 67 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 68 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 69 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 70 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 71 |
+
SOFTWARE.
|
third_party/AnyBimanual/third_party/pytorch3d/README.md
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pytorch3dlogo.png" width="900"/>
|
| 2 |
+
|
| 3 |
+
[](https://circleci.com/gh/facebookresearch/pytorch3d)
|
| 4 |
+
[](https://anaconda.org/pytorch3d/pytorch3d)
|
| 5 |
+
|
| 6 |
+
# Introduction
|
| 7 |
+
|
| 8 |
+
PyTorch3D provides efficient, reusable components for 3D Computer Vision research with [PyTorch](https://pytorch.org).
|
| 9 |
+
|
| 10 |
+
Key features include:
|
| 11 |
+
|
| 12 |
+
- Data structure for storing and manipulating triangle meshes
|
| 13 |
+
- Efficient operations on triangle meshes (projective transformations, graph convolution, sampling, loss functions)
|
| 14 |
+
- A differentiable mesh renderer
|
| 15 |
+
- Implicitron, see [its README](projects/implicitron_trainer), a framework for new-view synthesis via implicit representations. ([blog post](https://ai.facebook.com/blog/implicitron-a-new-modular-extensible-framework-for-neural-implicit-representations-in-pytorch3d/))
|
| 16 |
+
|
| 17 |
+
PyTorch3D is designed to integrate smoothly with deep learning methods for predicting and manipulating 3D data.
|
| 18 |
+
For this reason, all operators in PyTorch3D:
|
| 19 |
+
|
| 20 |
+
- Are implemented using PyTorch tensors
|
| 21 |
+
- Can handle minibatches of hetereogenous data
|
| 22 |
+
- Can be differentiated
|
| 23 |
+
- Can utilize GPUs for acceleration
|
| 24 |
+
|
| 25 |
+
Within FAIR, PyTorch3D has been used to power research projects such as [Mesh R-CNN](https://arxiv.org/abs/1906.02739).
|
| 26 |
+
|
| 27 |
+
See our [blog post](https://ai.facebook.com/blog/-introducing-pytorch3d-an-open-source-library-for-3d-deep-learning/) to see more demos and learn about PyTorch3D.
|
| 28 |
+
|
| 29 |
+
## Installation
|
| 30 |
+
|
| 31 |
+
For detailed instructions refer to [INSTALL.md](INSTALL.md).
|
| 32 |
+
|
| 33 |
+
## License
|
| 34 |
+
|
| 35 |
+
PyTorch3D is released under the [BSD License](LICENSE).
|
| 36 |
+
|
| 37 |
+
## Tutorials
|
| 38 |
+
|
| 39 |
+
Get started with PyTorch3D by trying one of the tutorial notebooks.
|
| 40 |
+
|
| 41 |
+
|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/dolphin_deform.gif" width="310"/>|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/bundle_adjust.gif" width="310"/>|
|
| 42 |
+
|:-----------------------------------------------------------------------------------------------------------:|:--------------------------------------------------:|
|
| 43 |
+
| [Deform a sphere mesh to dolphin](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/deform_source_mesh_to_target_mesh.ipynb)| [Bundle adjustment](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/bundle_adjustment.ipynb) |
|
| 44 |
+
|
| 45 |
+
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/render_textured_mesh.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/camera_position_teapot.gif" width="310" height="310"/>
|
| 46 |
+
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
| 47 |
+
| [Render textured meshes](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_textured_meshes.ipynb)| [Camera position optimization](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/camera_position_optimization_with_differentiable_rendering.ipynb)|
|
| 48 |
+
|
| 49 |
+
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pointcloud_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/cow_deform.gif" width="310" height="310"/>
|
| 50 |
+
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
| 51 |
+
| [Render textured pointclouds](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_colored_points.ipynb)| [Fit a mesh with texture](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_mesh.ipynb)|
|
| 52 |
+
|
| 53 |
+
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/densepose_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/shapenet_render.png" width="310" height="310"/>
|
| 54 |
+
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
| 55 |
+
| [Render DensePose data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_densepose.ipynb)| [Load & Render ShapeNet data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/dataloaders_ShapeNetCore_R2N2.ipynb)|
|
| 56 |
+
|
| 57 |
+
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_nerf.gif" width="310" height="310"/>
|
| 58 |
+
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
| 59 |
+
| [Fit Textured Volume](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_volume.ipynb)| [Fit A Simple Neural Radiance Field](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_simple_neural_radiance_field.ipynb)|
|
| 60 |
+
|
| 61 |
+
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/implicitron_config.gif" width="310" height="310"/>
|
| 62 |
+
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
| 63 |
+
| [Fit Textured Volume in Implicitron](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/implicitron_volumes.ipynb)| [Implicitron Config System](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/implicitron_config_system.ipynb)|
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
## Documentation
|
| 70 |
+
|
| 71 |
+
Learn more about the API by reading the PyTorch3D [documentation](https://pytorch3d.readthedocs.org/).
|
| 72 |
+
|
| 73 |
+
We also have deep dive notes on several API components:
|
| 74 |
+
|
| 75 |
+
- [Heterogeneous Batching](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/batching.md)
|
| 76 |
+
- [Mesh IO](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/meshes_io.md)
|
| 77 |
+
- [Differentiable Rendering](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/renderer_getting_started.md)
|
| 78 |
+
|
| 79 |
+
### Overview Video
|
| 80 |
+
|
| 81 |
+
We have created a short (~14 min) video tutorial providing an overview of the PyTorch3D codebase including several code examples. Click on the image below to watch the video on YouTube:
|
| 82 |
+
|
| 83 |
+
<a href="http://www.youtube.com/watch?v=Pph1r-x9nyY"><img src="http://img.youtube.com/vi/Pph1r-x9nyY/0.jpg" height="225" ></a>
|
| 84 |
+
|
| 85 |
+
## Development
|
| 86 |
+
|
| 87 |
+
We welcome new contributions to PyTorch3D and we will be actively maintaining this library! Please refer to [CONTRIBUTING.md](./.github/CONTRIBUTING.md) for full instructions on how to run the code, tests and linter, and submit your pull requests.
|
| 88 |
+
|
| 89 |
+
## Development and Compatibility
|
| 90 |
+
|
| 91 |
+
- `main` branch: actively developed, without any guarantee, Anything can be broken at any time
|
| 92 |
+
- REMARK: this includes nightly builds which are built from `main`
|
| 93 |
+
- HINT: the commit history can help locate regressions or changes
|
| 94 |
+
- backward-compatibility between releases: no guarantee. Best efforts to communicate breaking changes and facilitate migration of code or data (incl. models).
|
| 95 |
+
|
| 96 |
+
## Contributors
|
| 97 |
+
|
| 98 |
+
PyTorch3D is written and maintained by the Facebook AI Research Computer Vision Team.
|
| 99 |
+
|
| 100 |
+
In alphabetical order:
|
| 101 |
+
|
| 102 |
+
* Amitav Baruah
|
| 103 |
+
* Steve Branson
|
| 104 |
+
* Krzysztof Chalupka
|
| 105 |
+
* Jiali Duan
|
| 106 |
+
* Luya Gao
|
| 107 |
+
* Georgia Gkioxari
|
| 108 |
+
* Taylor Gordon
|
| 109 |
+
* Justin Johnson
|
| 110 |
+
* Patrick Labatut
|
| 111 |
+
* Christoph Lassner
|
| 112 |
+
* Wan-Yen Lo
|
| 113 |
+
* David Novotny
|
| 114 |
+
* Nikhila Ravi
|
| 115 |
+
* Jeremy Reizenstein
|
| 116 |
+
* Dave Schnizlein
|
| 117 |
+
* Roman Shapovalov
|
| 118 |
+
* Olivia Wiles
|
| 119 |
+
|
| 120 |
+
## Citation
|
| 121 |
+
|
| 122 |
+
If you find PyTorch3D useful in your research, please cite our tech report:
|
| 123 |
+
|
| 124 |
+
```bibtex
|
| 125 |
+
@article{ravi2020pytorch3d,
|
| 126 |
+
author = {Nikhila Ravi and Jeremy Reizenstein and David Novotny and Taylor Gordon
|
| 127 |
+
and Wan-Yen Lo and Justin Johnson and Georgia Gkioxari},
|
| 128 |
+
title = {Accelerating 3D Deep Learning with PyTorch3D},
|
| 129 |
+
journal = {arXiv:2007.08501},
|
| 130 |
+
year = {2020},
|
| 131 |
+
}
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
If you are using the pulsar backend for sphere-rendering (the `PulsarPointRenderer` or `pytorch3d.renderer.points.pulsar.Renderer`), please cite the tech report:
|
| 135 |
+
|
| 136 |
+
```bibtex
|
| 137 |
+
@article{lassner2020pulsar,
|
| 138 |
+
author = {Christoph Lassner and Michael Zollh\"ofer},
|
| 139 |
+
title = {Pulsar: Efficient Sphere-based Neural Rendering},
|
| 140 |
+
journal = {arXiv:2004.07484},
|
| 141 |
+
year = {2020},
|
| 142 |
+
}
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
## News
|
| 146 |
+
|
| 147 |
+
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
| 148 |
+
|
| 149 |
+
**[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
|
| 150 |
+
|
| 151 |
+
**[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
|
| 152 |
+
|
| 153 |
+
**[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
|
| 154 |
+
|
| 155 |
+
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
| 156 |
+
|
| 157 |
+
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
| 158 |
+
|
| 159 |
+
**[Aug 10th 2022]:** PyTorch3D [v0.7.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.0) released with Implicitron and MeshRasterizerOpenGL.
|
| 160 |
+
|
| 161 |
+
**[Apr 28th 2022]:** PyTorch3D [v0.6.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.2) released
|
| 162 |
+
|
| 163 |
+
**[Dec 16th 2021]:** PyTorch3D [v0.6.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.1) released
|
| 164 |
+
|
| 165 |
+
**[Oct 6th 2021]:** PyTorch3D [v0.6.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.0) released
|
| 166 |
+
|
| 167 |
+
**[Aug 5th 2021]:** PyTorch3D [v0.5.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.5.0) released
|
| 168 |
+
|
| 169 |
+
**[Feb 9th 2021]:** PyTorch3D [v0.4.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.4.0) released with support for implicit functions, volume rendering and a [reimplementation of NeRF](https://github.com/facebookresearch/pytorch3d/tree/main/projects/nerf).
|
| 170 |
+
|
| 171 |
+
**[November 2nd 2020]:** PyTorch3D [v0.3.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.3.0) released, integrating the pulsar backend.
|
| 172 |
+
|
| 173 |
+
**[Aug 28th 2020]:** PyTorch3D [v0.2.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.2.5) released
|
| 174 |
+
|
| 175 |
+
**[July 17th 2020]:** PyTorch3D tech report published on ArXiv: https://arxiv.org/abs/2007.08501
|
| 176 |
+
|
| 177 |
+
**[April 24th 2020]:** PyTorch3D [v0.2.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.2.0) released
|
| 178 |
+
|
| 179 |
+
**[March 25th 2020]:** [SynSin](https://arxiv.org/abs/1912.08804) codebase released using PyTorch3D: https://github.com/facebookresearch/synsin
|
| 180 |
+
|
| 181 |
+
**[March 8th 2020]:** PyTorch3D [v0.1.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.1.1) bug fix release
|
| 182 |
+
|
| 183 |
+
**[Jan 23rd 2020]:** PyTorch3D [v0.1.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.1.0) released. [Mesh R-CNN](https://arxiv.org/abs/1906.02739) codebase released: https://github.com/facebookresearch/meshrcnn
|
third_party/AnyBimanual/third_party/pytorch3d/setup.cfg
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
[isort]
|
| 8 |
+
line_length = 88
|
| 9 |
+
multi_line_output = 3
|
| 10 |
+
include_trailing_comma = True
|
| 11 |
+
force_grid_warp = 0
|
| 12 |
+
default_section = THIRDPARTY
|
| 13 |
+
lines_after_imports = 2
|
| 14 |
+
combine_as_imports = True
|
third_party/AnyBimanual/third_party/pytorch3d/setup.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
import glob
|
| 9 |
+
import os
|
| 10 |
+
import runpy
|
| 11 |
+
import sys
|
| 12 |
+
import warnings
|
| 13 |
+
from typing import List, Optional
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from setuptools import find_packages, setup
|
| 17 |
+
from torch.utils.cpp_extension import CppExtension, CUDA_HOME, CUDAExtension
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_existing_ccbin(nvcc_args: List[str]) -> Optional[str]:
|
| 21 |
+
"""
|
| 22 |
+
Given a list of nvcc arguments, return the compiler if specified.
|
| 23 |
+
|
| 24 |
+
Note from CUDA doc: Single value options and list options must have
|
| 25 |
+
arguments, which must follow the name of the option itself by either
|
| 26 |
+
one of more spaces or an equals character.
|
| 27 |
+
"""
|
| 28 |
+
last_arg = None
|
| 29 |
+
for arg in reversed(nvcc_args):
|
| 30 |
+
if arg == "-ccbin":
|
| 31 |
+
return last_arg
|
| 32 |
+
if arg.startswith("-ccbin="):
|
| 33 |
+
return arg[7:]
|
| 34 |
+
last_arg = arg
|
| 35 |
+
return None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_extensions():
|
| 39 |
+
no_extension = os.getenv("PYTORCH3D_NO_EXTENSION", "0") == "1"
|
| 40 |
+
if no_extension:
|
| 41 |
+
msg = "SKIPPING EXTENSION BUILD. PYTORCH3D WILL NOT WORK!"
|
| 42 |
+
print(msg, file=sys.stderr)
|
| 43 |
+
warnings.warn(msg)
|
| 44 |
+
return []
|
| 45 |
+
|
| 46 |
+
this_dir = os.path.dirname(os.path.abspath(__file__))
|
| 47 |
+
extensions_dir = os.path.join(this_dir, "pytorch3d", "csrc")
|
| 48 |
+
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"), recursive=True)
|
| 49 |
+
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu"), recursive=True)
|
| 50 |
+
extension = CppExtension
|
| 51 |
+
|
| 52 |
+
extra_compile_args = {"cxx": ["-std=c++17"]}
|
| 53 |
+
define_macros = []
|
| 54 |
+
include_dirs = [extensions_dir]
|
| 55 |
+
|
| 56 |
+
force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
|
| 57 |
+
force_no_cuda = os.getenv("PYTORCH3D_FORCE_NO_CUDA", "0") == "1"
|
| 58 |
+
if (
|
| 59 |
+
not force_no_cuda and torch.cuda.is_available() and CUDA_HOME is not None
|
| 60 |
+
) or force_cuda:
|
| 61 |
+
extension = CUDAExtension
|
| 62 |
+
sources += source_cuda
|
| 63 |
+
define_macros += [("WITH_CUDA", None)]
|
| 64 |
+
# Thrust is only used for its tuple objects.
|
| 65 |
+
# With CUDA 11.0 we can't use the cudatoolkit's version of cub.
|
| 66 |
+
# We take the risk that CUB and Thrust are incompatible, because
|
| 67 |
+
# we aren't using parts of Thrust which actually use CUB.
|
| 68 |
+
define_macros += [("THRUST_IGNORE_CUB_VERSION_CHECK", None)]
|
| 69 |
+
cub_home = os.environ.get("CUB_HOME", None)
|
| 70 |
+
nvcc_args = [
|
| 71 |
+
"-DCUDA_HAS_FP16=1",
|
| 72 |
+
"-D__CUDA_NO_HALF_OPERATORS__",
|
| 73 |
+
"-D__CUDA_NO_HALF_CONVERSIONS__",
|
| 74 |
+
"-D__CUDA_NO_HALF2_OPERATORS__",
|
| 75 |
+
]
|
| 76 |
+
if os.name != "nt":
|
| 77 |
+
nvcc_args.append("-std=c++17")
|
| 78 |
+
if cub_home is None:
|
| 79 |
+
prefix = os.environ.get("CONDA_PREFIX", None)
|
| 80 |
+
if prefix is not None and os.path.isdir(prefix + "/include/cub"):
|
| 81 |
+
cub_home = prefix + "/include"
|
| 82 |
+
|
| 83 |
+
if cub_home is None:
|
| 84 |
+
warnings.warn(
|
| 85 |
+
"The environment variable `CUB_HOME` was not found. "
|
| 86 |
+
"NVIDIA CUB is required for compilation and can be downloaded "
|
| 87 |
+
"from `https://github.com/NVIDIA/cub/releases`. You can unpack "
|
| 88 |
+
"it to a location of your choice and set the environment variable "
|
| 89 |
+
"`CUB_HOME` to the folder containing the `CMakeListst.txt` file."
|
| 90 |
+
)
|
| 91 |
+
else:
|
| 92 |
+
include_dirs.append(os.path.realpath(cub_home).replace("\\ ", " "))
|
| 93 |
+
nvcc_flags_env = os.getenv("NVCC_FLAGS", "")
|
| 94 |
+
if nvcc_flags_env != "":
|
| 95 |
+
nvcc_args.extend(nvcc_flags_env.split(" "))
|
| 96 |
+
|
| 97 |
+
# This is needed for pytorch 1.6 and earlier. See e.g.
|
| 98 |
+
# https://github.com/facebookresearch/pytorch3d/issues/436
|
| 99 |
+
# It is harmless after https://github.com/pytorch/pytorch/pull/47404 .
|
| 100 |
+
# But it can be problematic in torch 1.7.0 and 1.7.1
|
| 101 |
+
if torch.__version__[:4] != "1.7.":
|
| 102 |
+
CC = os.environ.get("CC", None)
|
| 103 |
+
if CC is not None:
|
| 104 |
+
existing_CC = get_existing_ccbin(nvcc_args)
|
| 105 |
+
if existing_CC is None:
|
| 106 |
+
CC_arg = "-ccbin={}".format(CC)
|
| 107 |
+
nvcc_args.append(CC_arg)
|
| 108 |
+
elif existing_CC != CC:
|
| 109 |
+
msg = f"Inconsistent ccbins: {CC} and {existing_CC}"
|
| 110 |
+
raise ValueError(msg)
|
| 111 |
+
|
| 112 |
+
extra_compile_args["nvcc"] = nvcc_args
|
| 113 |
+
|
| 114 |
+
sources = [os.path.join(extensions_dir, s) for s in sources]
|
| 115 |
+
|
| 116 |
+
ext_modules = [
|
| 117 |
+
extension(
|
| 118 |
+
"pytorch3d._C",
|
| 119 |
+
sources,
|
| 120 |
+
include_dirs=include_dirs,
|
| 121 |
+
define_macros=define_macros,
|
| 122 |
+
extra_compile_args=extra_compile_args,
|
| 123 |
+
)
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
return ext_modules
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
# Retrieve __version__ from the package.
|
| 130 |
+
__version__ = runpy.run_path("pytorch3d/__init__.py")["__version__"]
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if os.getenv("PYTORCH3D_NO_NINJA", "0") == "1":
|
| 134 |
+
|
| 135 |
+
class BuildExtension(torch.utils.cpp_extension.BuildExtension):
|
| 136 |
+
def __init__(self, *args, **kwargs):
|
| 137 |
+
super().__init__(use_ninja=False, *args, **kwargs)
|
| 138 |
+
|
| 139 |
+
else:
|
| 140 |
+
BuildExtension = torch.utils.cpp_extension.BuildExtension
|
| 141 |
+
|
| 142 |
+
trainer = "pytorch3d.implicitron_trainer"
|
| 143 |
+
|
| 144 |
+
setup(
|
| 145 |
+
name="pytorch3d",
|
| 146 |
+
version=__version__,
|
| 147 |
+
author="FAIR",
|
| 148 |
+
url="https://github.com/facebookresearch/pytorch3d",
|
| 149 |
+
description="PyTorch3D is FAIR's library of reusable components "
|
| 150 |
+
"for deep Learning with 3D data.",
|
| 151 |
+
packages=find_packages(
|
| 152 |
+
exclude=("configs", "tests", "tests.*", "docs.*", "projects.*")
|
| 153 |
+
)
|
| 154 |
+
+ [trainer],
|
| 155 |
+
package_dir={trainer: "projects/implicitron_trainer"},
|
| 156 |
+
install_requires=["iopath"],
|
| 157 |
+
extras_require={
|
| 158 |
+
"all": ["matplotlib", "tqdm>4.29.0", "imageio", "ipywidgets"],
|
| 159 |
+
"dev": ["flake8", "usort"],
|
| 160 |
+
"implicitron": [
|
| 161 |
+
"hydra-core>=1.1",
|
| 162 |
+
"visdom",
|
| 163 |
+
"lpips",
|
| 164 |
+
"tqdm>4.29.0",
|
| 165 |
+
"matplotlib",
|
| 166 |
+
"accelerate",
|
| 167 |
+
"sqlalchemy>=2.0",
|
| 168 |
+
],
|
| 169 |
+
},
|
| 170 |
+
entry_points={
|
| 171 |
+
"console_scripts": [
|
| 172 |
+
f"pytorch3d_implicitron_runner={trainer}.experiment:experiment",
|
| 173 |
+
f"pytorch3d_implicitron_visualizer={trainer}.visualize_reconstruction:main",
|
| 174 |
+
]
|
| 175 |
+
},
|
| 176 |
+
ext_modules=get_extensions(),
|
| 177 |
+
cmdclass={"build_ext": BuildExtension},
|
| 178 |
+
package_data={
|
| 179 |
+
"": ["*.json"],
|
| 180 |
+
},
|
| 181 |
+
)
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_shapenet_core.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Sanity checks for loading ShapeNetCore.
|
| 9 |
+
"""
|
| 10 |
+
import os
|
| 11 |
+
import unittest
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import torch
|
| 15 |
+
from PIL import Image
|
| 16 |
+
from pytorch3d.datasets import collate_batched_meshes, ShapeNetCore
|
| 17 |
+
from pytorch3d.renderer import (
|
| 18 |
+
FoVPerspectiveCameras,
|
| 19 |
+
look_at_view_transform,
|
| 20 |
+
PointLights,
|
| 21 |
+
RasterizationSettings,
|
| 22 |
+
)
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
|
| 25 |
+
from .common_testing import get_tests_dir, load_rgb_image, TestCaseMixin
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# Set the SHAPENET_PATH to the local path to the dataset
|
| 29 |
+
SHAPENET_PATH = None
|
| 30 |
+
VERSION = 1
|
| 31 |
+
# If DEBUG=True, save out images generated in the tests for debugging.
|
| 32 |
+
# All saved images have prefix DEBUG_
|
| 33 |
+
DEBUG = False
|
| 34 |
+
DATA_DIR = get_tests_dir() / "data"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class TestShapenetCore(TestCaseMixin, unittest.TestCase):
|
| 38 |
+
def setUp(self):
|
| 39 |
+
"""
|
| 40 |
+
Check if the ShapeNet dataset is provided in the repo.
|
| 41 |
+
If not, download this separately and update the shapenet_path`
|
| 42 |
+
with the location of the dataset in order to run the tests.
|
| 43 |
+
"""
|
| 44 |
+
if SHAPENET_PATH is None or not os.path.exists(SHAPENET_PATH):
|
| 45 |
+
url = "https://www.shapenet.org/"
|
| 46 |
+
msg = (
|
| 47 |
+
"ShapeNet data not found, download from %s, update "
|
| 48 |
+
"SHAPENET_PATH at the top of the file, and rerun."
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
self.skipTest(msg % url)
|
| 52 |
+
|
| 53 |
+
def test_load_shapenet_core(self):
|
| 54 |
+
"""
|
| 55 |
+
Test loading both the entire ShapeNetCore dataset and a subset of the ShapeNetCore
|
| 56 |
+
dataset. Check the loaded datasets return items of the correct shapes and types.
|
| 57 |
+
"""
|
| 58 |
+
# Try loading ShapeNetCore with an invalid version number and catch error.
|
| 59 |
+
with self.assertRaises(ValueError) as err:
|
| 60 |
+
ShapeNetCore(SHAPENET_PATH, version=3)
|
| 61 |
+
self.assertTrue("Version number must be either 1 or 2." in str(err.exception))
|
| 62 |
+
|
| 63 |
+
# Load ShapeNetCore without specifying any particular categories.
|
| 64 |
+
shapenet_dataset = ShapeNetCore(SHAPENET_PATH, version=VERSION)
|
| 65 |
+
|
| 66 |
+
# Count the number of grandchildren directories (which should be equal to
|
| 67 |
+
# the total number of objects in the dataset) by walking through the given
|
| 68 |
+
# directory.
|
| 69 |
+
wnsynset_list = [
|
| 70 |
+
wnsynset
|
| 71 |
+
for wnsynset in os.listdir(SHAPENET_PATH)
|
| 72 |
+
if os.path.isdir(os.path.join(SHAPENET_PATH, wnsynset))
|
| 73 |
+
]
|
| 74 |
+
model_num_list = [
|
| 75 |
+
(len(next(os.walk(os.path.join(SHAPENET_PATH, wnsynset)))[1]))
|
| 76 |
+
for wnsynset in wnsynset_list
|
| 77 |
+
]
|
| 78 |
+
# Check total number of objects in the dataset is correct.
|
| 79 |
+
self.assertEqual(len(shapenet_dataset), sum(model_num_list))
|
| 80 |
+
|
| 81 |
+
# Randomly retrieve an object from the dataset.
|
| 82 |
+
rand_obj = shapenet_dataset[torch.randint(len(shapenet_dataset), (1,))]
|
| 83 |
+
# Check that data types and shapes of items returned by __getitem__ are correct.
|
| 84 |
+
verts, faces = rand_obj["verts"], rand_obj["faces"]
|
| 85 |
+
self.assertTrue(verts.dtype == torch.float32)
|
| 86 |
+
self.assertTrue(faces.dtype == torch.int64)
|
| 87 |
+
self.assertEqual(verts.ndim, 2)
|
| 88 |
+
self.assertEqual(verts.shape[-1], 3)
|
| 89 |
+
self.assertEqual(faces.ndim, 2)
|
| 90 |
+
self.assertEqual(faces.shape[-1], 3)
|
| 91 |
+
|
| 92 |
+
# Load six categories from ShapeNetCore.
|
| 93 |
+
# Specify categories with a combination of offsets and labels.
|
| 94 |
+
shapenet_subset = ShapeNetCore(
|
| 95 |
+
SHAPENET_PATH,
|
| 96 |
+
synsets=[
|
| 97 |
+
"04330267",
|
| 98 |
+
"guitar",
|
| 99 |
+
"02801938",
|
| 100 |
+
"birdhouse",
|
| 101 |
+
"03991062",
|
| 102 |
+
"tower",
|
| 103 |
+
],
|
| 104 |
+
version=1,
|
| 105 |
+
)
|
| 106 |
+
subset_offsets = [
|
| 107 |
+
"04330267",
|
| 108 |
+
"03467517",
|
| 109 |
+
"02801938",
|
| 110 |
+
"02843684",
|
| 111 |
+
"03991062",
|
| 112 |
+
"04460130",
|
| 113 |
+
]
|
| 114 |
+
subset_model_nums = [
|
| 115 |
+
(len(next(os.walk(os.path.join(SHAPENET_PATH, offset)))[1]))
|
| 116 |
+
for offset in subset_offsets
|
| 117 |
+
]
|
| 118 |
+
self.assertEqual(len(shapenet_subset), sum(subset_model_nums))
|
| 119 |
+
|
| 120 |
+
def test_collate_models(self):
|
| 121 |
+
"""
|
| 122 |
+
Test collate_batched_meshes returns items of the correct shapes and types.
|
| 123 |
+
Check that when collate_batched_meshes is passed to Dataloader, batches of
|
| 124 |
+
the correct shapes and types are returned.
|
| 125 |
+
"""
|
| 126 |
+
# Load ShapeNetCore without specifying any particular categories.
|
| 127 |
+
shapenet_dataset = ShapeNetCore(SHAPENET_PATH)
|
| 128 |
+
# Randomly retrieve several objects from the dataset.
|
| 129 |
+
rand_idxs = torch.randint(len(shapenet_dataset), (6,))
|
| 130 |
+
rand_objs = [shapenet_dataset[idx] for idx in rand_idxs]
|
| 131 |
+
|
| 132 |
+
# Collate the randomly selected objects
|
| 133 |
+
collated_meshes = collate_batched_meshes(rand_objs)
|
| 134 |
+
verts, faces = (collated_meshes["verts"], collated_meshes["faces"])
|
| 135 |
+
self.assertEqual(len(verts), 6)
|
| 136 |
+
self.assertEqual(len(faces), 6)
|
| 137 |
+
|
| 138 |
+
# Pass the custom collate_fn function to DataLoader and check elements
|
| 139 |
+
# in batch have the correct shape.
|
| 140 |
+
batch_size = 12
|
| 141 |
+
shapenet_core_loader = DataLoader(
|
| 142 |
+
shapenet_dataset, batch_size=batch_size, collate_fn=collate_batched_meshes
|
| 143 |
+
)
|
| 144 |
+
it = iter(shapenet_core_loader)
|
| 145 |
+
object_batch = next(it)
|
| 146 |
+
self.assertEqual(len(object_batch["synset_id"]), batch_size)
|
| 147 |
+
self.assertEqual(len(object_batch["model_id"]), batch_size)
|
| 148 |
+
self.assertEqual(len(object_batch["label"]), batch_size)
|
| 149 |
+
self.assertEqual(object_batch["mesh"].verts_padded().shape[0], batch_size)
|
| 150 |
+
self.assertEqual(object_batch["mesh"].faces_padded().shape[0], batch_size)
|
| 151 |
+
|
| 152 |
+
def test_catch_render_arg_errors(self):
|
| 153 |
+
"""
|
| 154 |
+
Test rendering ShapeNetCore with invalid model_ids, categories or indices,
|
| 155 |
+
and catch corresponding errors.
|
| 156 |
+
"""
|
| 157 |
+
# Load ShapeNetCore.
|
| 158 |
+
shapenet_dataset = ShapeNetCore(SHAPENET_PATH)
|
| 159 |
+
|
| 160 |
+
# Try loading with an invalid model_id and catch error.
|
| 161 |
+
with self.assertRaises(ValueError) as err:
|
| 162 |
+
shapenet_dataset.render(model_ids=["piano0"])
|
| 163 |
+
self.assertTrue("not found in the loaded dataset" in str(err.exception))
|
| 164 |
+
|
| 165 |
+
# Try loading with an index out of bounds and catch error.
|
| 166 |
+
with self.assertRaises(IndexError) as err:
|
| 167 |
+
shapenet_dataset.render(idxs=[100000])
|
| 168 |
+
self.assertTrue("are out of bounds" in str(err.exception))
|
| 169 |
+
|
| 170 |
+
def test_render_shapenet_core(self):
|
| 171 |
+
"""
|
| 172 |
+
Test rendering objects from ShapeNetCore.
|
| 173 |
+
"""
|
| 174 |
+
# Setup device and seed for random selections.
|
| 175 |
+
device = torch.device("cuda:0")
|
| 176 |
+
torch.manual_seed(39)
|
| 177 |
+
|
| 178 |
+
# Load category piano from ShapeNetCore.
|
| 179 |
+
piano_dataset = ShapeNetCore(SHAPENET_PATH, synsets=["piano"])
|
| 180 |
+
|
| 181 |
+
# Rendering settings.
|
| 182 |
+
R, T = look_at_view_transform(1.0, 1.0, 90)
|
| 183 |
+
cameras = FoVPerspectiveCameras(R=R, T=T, device=device)
|
| 184 |
+
raster_settings = RasterizationSettings(image_size=512)
|
| 185 |
+
lights = PointLights(
|
| 186 |
+
location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
|
| 187 |
+
# TODO: debug the source of the discrepancy in two images when rendering on GPU.
|
| 188 |
+
diffuse_color=((0, 0, 0),),
|
| 189 |
+
specular_color=((0, 0, 0),),
|
| 190 |
+
device=device,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Render first three models in the piano category.
|
| 194 |
+
pianos = piano_dataset.render(
|
| 195 |
+
idxs=list(range(3)),
|
| 196 |
+
device=device,
|
| 197 |
+
cameras=cameras,
|
| 198 |
+
raster_settings=raster_settings,
|
| 199 |
+
lights=lights,
|
| 200 |
+
)
|
| 201 |
+
# Check that there are three images in the batch.
|
| 202 |
+
self.assertEqual(pianos.shape[0], 3)
|
| 203 |
+
|
| 204 |
+
# Compare the rendered models to the reference images.
|
| 205 |
+
for idx in range(3):
|
| 206 |
+
piano_rgb = pianos[idx, ..., :3].squeeze().cpu()
|
| 207 |
+
if DEBUG:
|
| 208 |
+
Image.fromarray((piano_rgb.numpy() * 255).astype(np.uint8)).save(
|
| 209 |
+
DATA_DIR / ("DEBUG_shapenet_core_render_piano_by_idxs_%s.png" % idx)
|
| 210 |
+
)
|
| 211 |
+
image_ref = load_rgb_image(
|
| 212 |
+
"test_shapenet_core_render_piano_%s.png" % idx, DATA_DIR
|
| 213 |
+
)
|
| 214 |
+
self.assertClose(piano_rgb, image_ref, atol=0.05)
|
| 215 |
+
|
| 216 |
+
# Render the same piano models but by model_ids this time.
|
| 217 |
+
pianos_2 = piano_dataset.render(
|
| 218 |
+
model_ids=[
|
| 219 |
+
"13394ca47c89f91525a3aaf903a41c90",
|
| 220 |
+
"14755c2ee8e693aba508f621166382b0",
|
| 221 |
+
"156c4207af6d2c8f1fdc97905708b8ea",
|
| 222 |
+
],
|
| 223 |
+
device=device,
|
| 224 |
+
cameras=cameras,
|
| 225 |
+
raster_settings=raster_settings,
|
| 226 |
+
lights=lights,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# Compare the rendered models to the reference images.
|
| 230 |
+
for idx in range(3):
|
| 231 |
+
piano_rgb_2 = pianos_2[idx, ..., :3].squeeze().cpu()
|
| 232 |
+
if DEBUG:
|
| 233 |
+
Image.fromarray((piano_rgb_2.numpy() * 255).astype(np.uint8)).save(
|
| 234 |
+
DATA_DIR / ("DEBUG_shapenet_core_render_piano_by_ids_%s.png" % idx)
|
| 235 |
+
)
|
| 236 |
+
image_ref = load_rgb_image(
|
| 237 |
+
"test_shapenet_core_render_piano_%s.png" % idx, DATA_DIR
|
| 238 |
+
)
|
| 239 |
+
self.assertClose(piano_rgb_2, image_ref, atol=0.05)
|
| 240 |
+
|
| 241 |
+
#######################
|
| 242 |
+
# Render by categories
|
| 243 |
+
#######################
|
| 244 |
+
|
| 245 |
+
# Load ShapeNetCore.
|
| 246 |
+
shapenet_dataset = ShapeNetCore(SHAPENET_PATH)
|
| 247 |
+
|
| 248 |
+
# Render a mixture of categories and specify the number of models to be
|
| 249 |
+
# randomly sampled from each category.
|
| 250 |
+
mixed_objs = shapenet_dataset.render(
|
| 251 |
+
categories=["faucet", "chair"],
|
| 252 |
+
sample_nums=[2, 1],
|
| 253 |
+
device=device,
|
| 254 |
+
cameras=cameras,
|
| 255 |
+
raster_settings=raster_settings,
|
| 256 |
+
lights=lights,
|
| 257 |
+
)
|
| 258 |
+
# Compare the rendered models to the reference images.
|
| 259 |
+
for idx in range(3):
|
| 260 |
+
mixed_rgb = mixed_objs[idx, ..., :3].squeeze().cpu()
|
| 261 |
+
if DEBUG:
|
| 262 |
+
Image.fromarray((mixed_rgb.numpy() * 255).astype(np.uint8)).save(
|
| 263 |
+
DATA_DIR
|
| 264 |
+
/ ("DEBUG_shapenet_core_render_mixed_by_categories_%s.png" % idx)
|
| 265 |
+
)
|
| 266 |
+
image_ref = load_rgb_image(
|
| 267 |
+
"test_shapenet_core_render_mixed_by_categories_%s.png" % idx, DATA_DIR
|
| 268 |
+
)
|
| 269 |
+
self.assertClose(mixed_rgb, image_ref, atol=0.05)
|
| 270 |
+
|
| 271 |
+
# Render a mixture of categories without specifying sample_nums.
|
| 272 |
+
mixed_objs_2 = shapenet_dataset.render(
|
| 273 |
+
categories=["faucet", "chair"],
|
| 274 |
+
device=device,
|
| 275 |
+
cameras=cameras,
|
| 276 |
+
raster_settings=raster_settings,
|
| 277 |
+
lights=lights,
|
| 278 |
+
)
|
| 279 |
+
# Compare the rendered models to the reference images.
|
| 280 |
+
for idx in range(2):
|
| 281 |
+
mixed_rgb_2 = mixed_objs_2[idx, ..., :3].squeeze().cpu()
|
| 282 |
+
if DEBUG:
|
| 283 |
+
Image.fromarray((mixed_rgb_2.numpy() * 255).astype(np.uint8)).save(
|
| 284 |
+
DATA_DIR
|
| 285 |
+
/ ("DEBUG_shapenet_core_render_without_sample_nums_%s.png" % idx)
|
| 286 |
+
)
|
| 287 |
+
image_ref = load_rgb_image(
|
| 288 |
+
"test_shapenet_core_render_without_sample_nums_%s.png" % idx, DATA_DIR
|
| 289 |
+
)
|
| 290 |
+
self.assertClose(mixed_rgb_2, image_ref, atol=0.05)
|
| 291 |
+
|
| 292 |
+
def test_load_textures_false(self):
|
| 293 |
+
shapenet_dataset = ShapeNetCore(
|
| 294 |
+
SHAPENET_PATH, load_textures=False, version=VERSION
|
| 295 |
+
)
|
| 296 |
+
model = shapenet_dataset[0]
|
| 297 |
+
self.assertIsNone(model["textures"])
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_so3.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import math
|
| 9 |
+
import unittest
|
| 10 |
+
from distutils.version import LooseVersion
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import torch
|
| 14 |
+
from pytorch3d.transforms.so3 import (
|
| 15 |
+
hat,
|
| 16 |
+
so3_exp_map,
|
| 17 |
+
so3_log_map,
|
| 18 |
+
so3_relative_angle,
|
| 19 |
+
so3_rotation_angle,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from .common_testing import TestCaseMixin
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class TestSO3(TestCaseMixin, unittest.TestCase):
|
| 26 |
+
def setUp(self) -> None:
|
| 27 |
+
super().setUp()
|
| 28 |
+
torch.manual_seed(42)
|
| 29 |
+
np.random.seed(42)
|
| 30 |
+
|
| 31 |
+
@staticmethod
|
| 32 |
+
def init_log_rot(batch_size: int = 10):
|
| 33 |
+
"""
|
| 34 |
+
Initialize a list of `batch_size` 3-dimensional vectors representing
|
| 35 |
+
randomly generated logarithms of rotation matrices.
|
| 36 |
+
"""
|
| 37 |
+
device = torch.device("cuda:0")
|
| 38 |
+
log_rot = torch.randn((batch_size, 3), dtype=torch.float32, device=device)
|
| 39 |
+
return log_rot
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def init_rot(batch_size: int = 10):
|
| 43 |
+
"""
|
| 44 |
+
Randomly generate a batch of `batch_size` 3x3 rotation matrices.
|
| 45 |
+
"""
|
| 46 |
+
device = torch.device("cuda:0")
|
| 47 |
+
|
| 48 |
+
# TODO(dnovotny): replace with random_rotation from random_rotation.py
|
| 49 |
+
rot = []
|
| 50 |
+
for _ in range(batch_size):
|
| 51 |
+
r = torch.linalg.qr(torch.randn((3, 3), device=device))[0]
|
| 52 |
+
f = torch.randint(2, (3,), device=device, dtype=torch.float32)
|
| 53 |
+
if f.sum() % 2 == 0:
|
| 54 |
+
f = 1 - f
|
| 55 |
+
rot.append(r * (2 * f - 1).float())
|
| 56 |
+
rot = torch.stack(rot)
|
| 57 |
+
|
| 58 |
+
return rot
|
| 59 |
+
|
| 60 |
+
def test_determinant(self):
|
| 61 |
+
"""
|
| 62 |
+
Tests whether the determinants of 3x3 rotation matrices produced
|
| 63 |
+
by `so3_exp_map` are (almost) equal to 1.
|
| 64 |
+
"""
|
| 65 |
+
log_rot = TestSO3.init_log_rot(batch_size=30)
|
| 66 |
+
Rs = so3_exp_map(log_rot)
|
| 67 |
+
dets = torch.det(Rs)
|
| 68 |
+
self.assertClose(dets, torch.ones_like(dets), atol=1e-4)
|
| 69 |
+
|
| 70 |
+
def test_cross(self):
|
| 71 |
+
"""
|
| 72 |
+
For a pair of randomly generated 3-dimensional vectors `a` and `b`,
|
| 73 |
+
tests whether a matrix product of `hat(a)` and `b` equals the result
|
| 74 |
+
of a cross product between `a` and `b`.
|
| 75 |
+
"""
|
| 76 |
+
device = torch.device("cuda:0")
|
| 77 |
+
a, b = torch.randn((2, 100, 3), dtype=torch.float32, device=device)
|
| 78 |
+
hat_a = hat(a)
|
| 79 |
+
cross = torch.bmm(hat_a, b[:, :, None])[:, :, 0]
|
| 80 |
+
torch_cross = torch.cross(a, b, dim=1)
|
| 81 |
+
self.assertClose(torch_cross, cross, atol=1e-4)
|
| 82 |
+
|
| 83 |
+
def test_bad_so3_input_value_err(self):
|
| 84 |
+
"""
|
| 85 |
+
Tests whether `so3_exp_map` and `so3_log_map` correctly return
|
| 86 |
+
a ValueError if called with an argument of incorrect shape or, in case
|
| 87 |
+
of `so3_exp_map`, unexpected trace.
|
| 88 |
+
"""
|
| 89 |
+
device = torch.device("cuda:0")
|
| 90 |
+
log_rot = torch.randn(size=[5, 4], device=device)
|
| 91 |
+
with self.assertRaises(ValueError) as err:
|
| 92 |
+
so3_exp_map(log_rot)
|
| 93 |
+
self.assertTrue("Input tensor shape has to be Nx3." in str(err.exception))
|
| 94 |
+
|
| 95 |
+
rot = torch.randn(size=[5, 3, 5], device=device)
|
| 96 |
+
with self.assertRaises(ValueError) as err:
|
| 97 |
+
so3_log_map(rot)
|
| 98 |
+
self.assertTrue("Input has to be a batch of 3x3 Tensors." in str(err.exception))
|
| 99 |
+
|
| 100 |
+
def test_so3_exp_singularity(self, batch_size: int = 100):
|
| 101 |
+
"""
|
| 102 |
+
Tests whether the `so3_exp_map` is robust to the input vectors
|
| 103 |
+
the norms of which are close to the numerically unstable region
|
| 104 |
+
(vectors with low l2-norms).
|
| 105 |
+
"""
|
| 106 |
+
# generate random log-rotations with a tiny angle
|
| 107 |
+
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
|
| 108 |
+
log_rot_small = log_rot * 1e-6
|
| 109 |
+
log_rot_small.requires_grad = True
|
| 110 |
+
R = so3_exp_map(log_rot_small)
|
| 111 |
+
# tests whether all outputs are finite
|
| 112 |
+
self.assertTrue(torch.isfinite(R).all())
|
| 113 |
+
# tests whether the gradient is not None and all finite
|
| 114 |
+
loss = R.sum()
|
| 115 |
+
loss.backward()
|
| 116 |
+
self.assertIsNotNone(log_rot_small.grad)
|
| 117 |
+
self.assertTrue(torch.isfinite(log_rot_small.grad).all())
|
| 118 |
+
|
| 119 |
+
def test_so3_log_singularity(self, batch_size: int = 100):
|
| 120 |
+
"""
|
| 121 |
+
Tests whether the `so3_log_map` is robust to the input matrices
|
| 122 |
+
who's rotation angles are close to the numerically unstable region
|
| 123 |
+
(i.e. matrices with low rotation angles).
|
| 124 |
+
"""
|
| 125 |
+
# generate random rotations with a tiny angle
|
| 126 |
+
device = torch.device("cuda:0")
|
| 127 |
+
identity = torch.eye(3, device=device)
|
| 128 |
+
rot180 = identity * torch.tensor([[1.0, -1.0, -1.0]], device=device)
|
| 129 |
+
r = [identity, rot180]
|
| 130 |
+
# add random rotations and random almost orthonormal matrices
|
| 131 |
+
r.extend(
|
| 132 |
+
[
|
| 133 |
+
torch.linalg.qr(identity + torch.randn_like(identity) * 1e-4)[0]
|
| 134 |
+
+ float(i > batch_size // 2) * (0.5 - torch.rand_like(identity)) * 1e-3
|
| 135 |
+
# this adds random noise to the second half
|
| 136 |
+
# of the random orthogonal matrices to generate
|
| 137 |
+
# near-orthogonal matrices
|
| 138 |
+
for i in range(batch_size - 2)
|
| 139 |
+
]
|
| 140 |
+
)
|
| 141 |
+
r = torch.stack(r)
|
| 142 |
+
r.requires_grad = True
|
| 143 |
+
# the log of the rotation matrix r
|
| 144 |
+
r_log = so3_log_map(r, cos_bound=1e-4, eps=1e-2)
|
| 145 |
+
# tests whether all outputs are finite
|
| 146 |
+
self.assertTrue(torch.isfinite(r_log).all())
|
| 147 |
+
# tests whether the gradient is not None and all finite
|
| 148 |
+
loss = r.sum()
|
| 149 |
+
loss.backward()
|
| 150 |
+
self.assertIsNotNone(r.grad)
|
| 151 |
+
self.assertTrue(torch.isfinite(r.grad).all())
|
| 152 |
+
|
| 153 |
+
def test_so3_log_to_exp_to_log_to_exp(self, batch_size: int = 100):
|
| 154 |
+
"""
|
| 155 |
+
Check that
|
| 156 |
+
`so3_exp_map(so3_log_map(so3_exp_map(log_rot)))
|
| 157 |
+
== so3_exp_map(log_rot)`
|
| 158 |
+
for a randomly generated batch of rotation matrix logarithms `log_rot`.
|
| 159 |
+
Unlike `test_so3_log_to_exp_to_log`, this test checks the
|
| 160 |
+
correctness of converting a `log_rot` which contains values > math.pi.
|
| 161 |
+
"""
|
| 162 |
+
log_rot = 2.0 * TestSO3.init_log_rot(batch_size=batch_size)
|
| 163 |
+
# check also the singular cases where rot. angle = {0, 2pi}
|
| 164 |
+
log_rot[:2] = 0
|
| 165 |
+
log_rot[1, 0] = 2.0 * math.pi - 1e-6
|
| 166 |
+
rot = so3_exp_map(log_rot, eps=1e-4)
|
| 167 |
+
rot_ = so3_exp_map(so3_log_map(rot, eps=1e-4, cos_bound=1e-6), eps=1e-6)
|
| 168 |
+
self.assertClose(rot, rot_, atol=0.01)
|
| 169 |
+
angles = so3_relative_angle(rot, rot_, cos_bound=1e-6)
|
| 170 |
+
self.assertClose(angles, torch.zeros_like(angles), atol=0.01)
|
| 171 |
+
|
| 172 |
+
def test_so3_log_to_exp_to_log(self, batch_size: int = 100):
|
| 173 |
+
"""
|
| 174 |
+
Check that `so3_log_map(so3_exp_map(log_rot))==log_rot` for
|
| 175 |
+
a randomly generated batch of rotation matrix logarithms `log_rot`.
|
| 176 |
+
"""
|
| 177 |
+
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
|
| 178 |
+
# check also the singular cases where rot. angle = 0
|
| 179 |
+
log_rot[:1] = 0
|
| 180 |
+
log_rot_ = so3_log_map(so3_exp_map(log_rot))
|
| 181 |
+
self.assertClose(log_rot, log_rot_, atol=1e-4)
|
| 182 |
+
|
| 183 |
+
def test_so3_exp_to_log_to_exp(self, batch_size: int = 100):
|
| 184 |
+
"""
|
| 185 |
+
Check that `so3_exp_map(so3_log_map(R))==R` for
|
| 186 |
+
a batch of randomly generated rotation matrices `R`.
|
| 187 |
+
"""
|
| 188 |
+
rot = TestSO3.init_rot(batch_size=batch_size)
|
| 189 |
+
non_singular = (so3_rotation_angle(rot) - math.pi).abs() > 1e-2
|
| 190 |
+
rot = rot[non_singular]
|
| 191 |
+
rot_ = so3_exp_map(so3_log_map(rot, eps=1e-8, cos_bound=1e-8), eps=1e-8)
|
| 192 |
+
self.assertClose(rot_, rot, atol=0.1)
|
| 193 |
+
angles = so3_relative_angle(rot, rot_, cos_bound=1e-4)
|
| 194 |
+
self.assertClose(angles, torch.zeros_like(angles), atol=0.1)
|
| 195 |
+
|
| 196 |
+
def test_so3_cos_relative_angle(self, batch_size: int = 100):
|
| 197 |
+
"""
|
| 198 |
+
Check that `so3_relative_angle(R1, R2, cos_angle=False).cos()`
|
| 199 |
+
is the same as `so3_relative_angle(R1, R2, cos_angle=True)` for
|
| 200 |
+
batches of randomly generated rotation matrices `R1` and `R2`.
|
| 201 |
+
"""
|
| 202 |
+
rot1 = TestSO3.init_rot(batch_size=batch_size)
|
| 203 |
+
rot2 = TestSO3.init_rot(batch_size=batch_size)
|
| 204 |
+
angles = so3_relative_angle(rot1, rot2, cos_angle=False).cos()
|
| 205 |
+
angles_ = so3_relative_angle(rot1, rot2, cos_angle=True)
|
| 206 |
+
self.assertClose(angles, angles_, atol=1e-4)
|
| 207 |
+
|
| 208 |
+
def test_so3_cos_angle(self, batch_size: int = 100):
|
| 209 |
+
"""
|
| 210 |
+
Check that `so3_rotation_angle(R, cos_angle=False).cos()`
|
| 211 |
+
is the same as `so3_rotation_angle(R, cos_angle=True)` for
|
| 212 |
+
a batch of randomly generated rotation matrices `R`.
|
| 213 |
+
"""
|
| 214 |
+
rot = TestSO3.init_rot(batch_size=batch_size)
|
| 215 |
+
angles = so3_rotation_angle(rot, cos_angle=False).cos()
|
| 216 |
+
angles_ = so3_rotation_angle(rot, cos_angle=True)
|
| 217 |
+
self.assertClose(angles, angles_, atol=1e-4)
|
| 218 |
+
|
| 219 |
+
def test_so3_cos_bound(self, batch_size: int = 100):
|
| 220 |
+
"""
|
| 221 |
+
Checks that for an identity rotation `R=I`, the so3_rotation_angle returns
|
| 222 |
+
non-finite gradients when `cos_bound=None` and finite gradients
|
| 223 |
+
for `cos_bound > 0.0`.
|
| 224 |
+
"""
|
| 225 |
+
# generate random rotations with a tiny angle to generate cases
|
| 226 |
+
# with the gradient singularity
|
| 227 |
+
device = torch.device("cuda:0")
|
| 228 |
+
identity = torch.eye(3, device=device)
|
| 229 |
+
rot180 = identity * torch.tensor([[1.0, -1.0, -1.0]], device=device)
|
| 230 |
+
r = [identity, rot180]
|
| 231 |
+
r.extend(
|
| 232 |
+
[
|
| 233 |
+
torch.linalg.qr(identity + torch.randn_like(identity) * 1e-4)[0]
|
| 234 |
+
for _ in range(batch_size - 2)
|
| 235 |
+
]
|
| 236 |
+
)
|
| 237 |
+
r = torch.stack(r)
|
| 238 |
+
r.requires_grad = True
|
| 239 |
+
for is_grad_finite in (True, False):
|
| 240 |
+
# clear the gradients and decide the cos_bound:
|
| 241 |
+
# for is_grad_finite we run so3_rotation_angle with cos_bound
|
| 242 |
+
# set to a small float, otherwise we set to 0.0
|
| 243 |
+
r.grad = None
|
| 244 |
+
cos_bound = 1e-4 if is_grad_finite else 0.0
|
| 245 |
+
# compute the angles of r
|
| 246 |
+
angles = so3_rotation_angle(r, cos_bound=cos_bound)
|
| 247 |
+
# tests whether all outputs are finite in both cases
|
| 248 |
+
self.assertTrue(torch.isfinite(angles).all())
|
| 249 |
+
# compute the gradients
|
| 250 |
+
loss = angles.sum()
|
| 251 |
+
loss.backward()
|
| 252 |
+
# tests whether the gradient is not None for both cases
|
| 253 |
+
self.assertIsNotNone(r.grad)
|
| 254 |
+
if is_grad_finite:
|
| 255 |
+
# all grad values have to be finite
|
| 256 |
+
self.assertTrue(torch.isfinite(r.grad).all())
|
| 257 |
+
|
| 258 |
+
@unittest.skipIf(LooseVersion(torch.__version__) < "1.9", "recent torchscript only")
|
| 259 |
+
def test_scriptable(self):
|
| 260 |
+
torch.jit.script(so3_exp_map)
|
| 261 |
+
torch.jit.script(so3_log_map)
|
| 262 |
+
|
| 263 |
+
@staticmethod
|
| 264 |
+
def so3_expmap(batch_size: int = 10):
|
| 265 |
+
log_rot = TestSO3.init_log_rot(batch_size=batch_size)
|
| 266 |
+
torch.cuda.synchronize()
|
| 267 |
+
|
| 268 |
+
def compute_rots():
|
| 269 |
+
so3_exp_map(log_rot)
|
| 270 |
+
torch.cuda.synchronize()
|
| 271 |
+
|
| 272 |
+
return compute_rots
|
| 273 |
+
|
| 274 |
+
@staticmethod
|
| 275 |
+
def so3_logmap(batch_size: int = 10):
|
| 276 |
+
log_rot = TestSO3.init_rot(batch_size=batch_size)
|
| 277 |
+
torch.cuda.synchronize()
|
| 278 |
+
|
| 279 |
+
def compute_logs():
|
| 280 |
+
so3_log_map(log_rot)
|
| 281 |
+
torch.cuda.synchronize()
|
| 282 |
+
|
| 283 |
+
return compute_logs
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_splatter_blend.py
ADDED
|
@@ -0,0 +1,627 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import unittest
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from pytorch3d.common.compat import meshgrid_ij
|
| 11 |
+
from pytorch3d.renderer.cameras import FoVPerspectiveCameras
|
| 12 |
+
from pytorch3d.renderer.splatter_blend import (
|
| 13 |
+
_compute_occlusion_layers,
|
| 14 |
+
_compute_splatted_colors_and_weights,
|
| 15 |
+
_compute_splatting_colors_and_weights,
|
| 16 |
+
_get_splat_kernel_normalization,
|
| 17 |
+
_normalize_and_compose_all_layers,
|
| 18 |
+
_offset_splats,
|
| 19 |
+
_precompute,
|
| 20 |
+
_prepare_pixels_and_colors,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from .common_testing import TestCaseMixin
|
| 24 |
+
|
| 25 |
+
offsets = torch.tensor(
|
| 26 |
+
[
|
| 27 |
+
[-1, -1],
|
| 28 |
+
[-1, 0],
|
| 29 |
+
[-1, 1],
|
| 30 |
+
[0, -1],
|
| 31 |
+
[0, 0],
|
| 32 |
+
[0, 1],
|
| 33 |
+
[1, -1],
|
| 34 |
+
[1, 0],
|
| 35 |
+
[1, 1],
|
| 36 |
+
],
|
| 37 |
+
device=torch.device("cpu"),
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def compute_splatting_colors_and_weights_naive(pixel_coords_screen, colors, sigma):
|
| 42 |
+
normalizer = float(_get_splat_kernel_normalization(offsets))
|
| 43 |
+
N, H, W, K, _ = colors.shape
|
| 44 |
+
splat_weights_and_colors = torch.zeros((N, H, W, K, 9, 5))
|
| 45 |
+
for n in range(N):
|
| 46 |
+
for h in range(H):
|
| 47 |
+
for w in range(W):
|
| 48 |
+
for k in range(K):
|
| 49 |
+
q_xy = pixel_coords_screen[n, h, w, k]
|
| 50 |
+
q_to_px_center = torch.floor(q_xy) - q_xy + 0.5
|
| 51 |
+
color = colors[n, h, w, k]
|
| 52 |
+
alpha = colors[n, h, w, k, 3:4]
|
| 53 |
+
for d in range(9):
|
| 54 |
+
dist_p_q = torch.sum((q_to_px_center + offsets[d]) ** 2)
|
| 55 |
+
splat_weight = (
|
| 56 |
+
alpha * torch.exp(-dist_p_q / (2 * sigma**2)) * normalizer
|
| 57 |
+
)
|
| 58 |
+
splat_color = splat_weight * color
|
| 59 |
+
splat_weights_and_colors[n, h, w, k, d, :4] = splat_color
|
| 60 |
+
splat_weights_and_colors[n, h, w, k, d, 4:5] = splat_weight
|
| 61 |
+
return splat_weights_and_colors
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class TestPrecompute(TestCaseMixin, unittest.TestCase):
|
| 65 |
+
def setUp(self):
|
| 66 |
+
self.results_cpu = _precompute((2, 3, 4, 5), torch.device("cpu"))
|
| 67 |
+
self.results1_cpu = _precompute((1, 1, 1, 1), torch.device("cpu"))
|
| 68 |
+
|
| 69 |
+
def test_offsets(self):
|
| 70 |
+
self.assertClose(self.results_cpu[2].shape, offsets.shape, atol=0)
|
| 71 |
+
self.assertClose(self.results_cpu[2], offsets, atol=0)
|
| 72 |
+
|
| 73 |
+
# Offsets should be independent of input_size.
|
| 74 |
+
self.assertClose(self.results_cpu[2], self.results1_cpu[2], atol=0)
|
| 75 |
+
|
| 76 |
+
def test_crops_h(self):
|
| 77 |
+
target_crops_h1 = torch.tensor(
|
| 78 |
+
[
|
| 79 |
+
# chennels being offset:
|
| 80 |
+
# R G B A W(eight)
|
| 81 |
+
[0, 0, 0, 0, 0],
|
| 82 |
+
[1, 1, 1, 1, 1],
|
| 83 |
+
[2, 2, 2, 2, 2],
|
| 84 |
+
[0, 0, 0, 0, 0],
|
| 85 |
+
[1, 1, 1, 1, 1],
|
| 86 |
+
[2, 2, 2, 2, 2],
|
| 87 |
+
[0, 0, 0, 0, 0],
|
| 88 |
+
[1, 1, 1, 1, 1],
|
| 89 |
+
[2, 2, 2, 2, 2],
|
| 90 |
+
]
|
| 91 |
+
* 3, # 3 because we're aiming at (N, H, W+2, K, 9, 5) with W=1.
|
| 92 |
+
device=torch.device("cpu"),
|
| 93 |
+
).reshape(1, 1, 3, 1, 9, 5)
|
| 94 |
+
self.assertClose(self.results1_cpu[0], target_crops_h1, atol=0)
|
| 95 |
+
|
| 96 |
+
target_crops_h_base = target_crops_h1[0, 0, 0]
|
| 97 |
+
target_crops_h = torch.cat(
|
| 98 |
+
[target_crops_h_base, target_crops_h_base + 1, target_crops_h_base + 2],
|
| 99 |
+
dim=0,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Check that we have the right shape, and (after broadcasting) it has the right
|
| 103 |
+
# values. These should be repeated (tiled) for each n and k.
|
| 104 |
+
self.assertClose(
|
| 105 |
+
self.results_cpu[0].shape, torch.tensor([2, 3, 6, 5, 9, 5]), atol=0
|
| 106 |
+
)
|
| 107 |
+
for n in range(2):
|
| 108 |
+
for w in range(6):
|
| 109 |
+
for k in range(5):
|
| 110 |
+
self.assertClose(
|
| 111 |
+
self.results_cpu[0][n, :, w, k],
|
| 112 |
+
target_crops_h,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def test_crops_w(self):
|
| 116 |
+
target_crops_w1 = torch.tensor(
|
| 117 |
+
[
|
| 118 |
+
# chennels being offset:
|
| 119 |
+
# R G B A W(eight)
|
| 120 |
+
[0, 0, 0, 0, 0],
|
| 121 |
+
[0, 0, 0, 0, 0],
|
| 122 |
+
[0, 0, 0, 0, 0],
|
| 123 |
+
[1, 1, 1, 1, 1],
|
| 124 |
+
[1, 1, 1, 1, 1],
|
| 125 |
+
[1, 1, 1, 1, 1],
|
| 126 |
+
[2, 2, 2, 2, 2],
|
| 127 |
+
[2, 2, 2, 2, 2],
|
| 128 |
+
[2, 2, 2, 2, 2],
|
| 129 |
+
],
|
| 130 |
+
device=torch.device("cpu"),
|
| 131 |
+
).reshape(1, 1, 1, 1, 9, 5)
|
| 132 |
+
self.assertClose(self.results1_cpu[1], target_crops_w1)
|
| 133 |
+
|
| 134 |
+
target_crops_w_base = target_crops_w1[0, 0, 0]
|
| 135 |
+
target_crops_w = torch.cat(
|
| 136 |
+
[
|
| 137 |
+
target_crops_w_base,
|
| 138 |
+
target_crops_w_base + 1,
|
| 139 |
+
target_crops_w_base + 2,
|
| 140 |
+
target_crops_w_base + 3,
|
| 141 |
+
],
|
| 142 |
+
dim=0,
|
| 143 |
+
) # Each w value needs an increment.
|
| 144 |
+
|
| 145 |
+
# Check that we have the right shape, and (after broadcasting) it has the right
|
| 146 |
+
# values. These should be repeated (tiled) for each n and k.
|
| 147 |
+
self.assertClose(self.results_cpu[1].shape, torch.tensor([2, 3, 4, 5, 9, 5]))
|
| 148 |
+
for n in range(2):
|
| 149 |
+
for h in range(3):
|
| 150 |
+
for k in range(5):
|
| 151 |
+
self.assertClose(
|
| 152 |
+
self.results_cpu[1][n, h, :, k],
|
| 153 |
+
target_crops_w,
|
| 154 |
+
atol=0,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class TestPreparPixelsAndColors(TestCaseMixin, unittest.TestCase):
|
| 159 |
+
def setUp(self):
|
| 160 |
+
self.device = torch.device("cpu")
|
| 161 |
+
N, H, W, K = 2, 3, 4, 5
|
| 162 |
+
self.pixel_coords_cameras = torch.randn(
|
| 163 |
+
(N, H, W, K, 3), device=self.device, requires_grad=True
|
| 164 |
+
)
|
| 165 |
+
self.colors_before = torch.rand((N, H, W, K, 3), device=self.device)
|
| 166 |
+
self.cameras = FoVPerspectiveCameras(device=self.device)
|
| 167 |
+
self.background_mask = torch.rand((N, H, W, K), device=self.device) < 0.5
|
| 168 |
+
self.pixel_coords_screen, self.colors_after = _prepare_pixels_and_colors(
|
| 169 |
+
self.pixel_coords_cameras,
|
| 170 |
+
self.colors_before,
|
| 171 |
+
self.cameras,
|
| 172 |
+
self.background_mask,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
def test_background_z(self):
|
| 176 |
+
self.assertTrue(
|
| 177 |
+
torch.all(self.pixel_coords_screen[..., 2][self.background_mask] == 1.0)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
def test_background_alpha(self):
|
| 181 |
+
self.assertTrue(
|
| 182 |
+
torch.all(self.colors_after[..., 3][self.background_mask] == 0.0)
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class TestGetSplatKernelNormalization(TestCaseMixin, unittest.TestCase):
|
| 187 |
+
def test_splat_kernel_normalization(self):
|
| 188 |
+
self.assertAlmostEqual(
|
| 189 |
+
float(_get_splat_kernel_normalization(offsets)), 0.6503, places=3
|
| 190 |
+
)
|
| 191 |
+
self.assertAlmostEqual(
|
| 192 |
+
float(_get_splat_kernel_normalization(offsets, 0.01)), 1.05, places=3
|
| 193 |
+
)
|
| 194 |
+
with self.assertRaisesRegex(ValueError, "Only positive standard deviations"):
|
| 195 |
+
_get_splat_kernel_normalization(offsets, 0)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class TestComputeOcclusionLayers(TestCaseMixin, unittest.TestCase):
|
| 199 |
+
def test_single_layer(self):
|
| 200 |
+
# If there's only one layer, all splats must be on the surface level.
|
| 201 |
+
N, H, W, K = 2, 3, 4, 1
|
| 202 |
+
q_depth = torch.rand(N, H, W, K)
|
| 203 |
+
occlusion_layers = _compute_occlusion_layers(q_depth)
|
| 204 |
+
self.assertClose(occlusion_layers, torch.zeros(N, H, W, 9).long(), atol=0.0)
|
| 205 |
+
|
| 206 |
+
def test_all_equal(self):
|
| 207 |
+
# If all q-vals are equal, then all splats must be on the surface level.
|
| 208 |
+
N, H, W, K = 2, 3, 4, 5
|
| 209 |
+
q_depth = torch.ones((N, H, W, K)) * 0.1234
|
| 210 |
+
occlusion_layers = _compute_occlusion_layers(q_depth)
|
| 211 |
+
self.assertClose(occlusion_layers, torch.zeros(N, H, W, 9).long(), atol=0.0)
|
| 212 |
+
|
| 213 |
+
def test_mid_to_top_level_splatting(self):
|
| 214 |
+
# Check that occlusion buffers get accumulated as expected when the splatting
|
| 215 |
+
# and splatted pixels are co-surface on different intersection layers.
|
| 216 |
+
# This test will make best sense with accompanying Fig. 4 from "Differentiable
|
| 217 |
+
# Surface Rendering via Non-differentiable Sampling" by Cole et al.
|
| 218 |
+
for direction, offset in enumerate(offsets):
|
| 219 |
+
if direction == 4:
|
| 220 |
+
continue # Skip self-splatting which is always co-surface.
|
| 221 |
+
|
| 222 |
+
depths = torch.zeros(1, 3, 3, 3)
|
| 223 |
+
|
| 224 |
+
# This is our q, the pixel splatted onto, in the center of the image.
|
| 225 |
+
depths[0, 1, 1] = torch.tensor([0.71, 0.8, 1.0])
|
| 226 |
+
|
| 227 |
+
# This is our p, the splatting pixel.
|
| 228 |
+
depths[0, offset[0] + 1, offset[1] + 1] = torch.tensor([0.5, 0.7, 0.9])
|
| 229 |
+
|
| 230 |
+
occlusion_layers = _compute_occlusion_layers(depths)
|
| 231 |
+
|
| 232 |
+
# Check that we computed that it is the middle layer of p that is co-
|
| 233 |
+
# surface with q. (1, 1) is the id of q in the depth array, and offset_id
|
| 234 |
+
# is the id of p's direction w.r.t. q.
|
| 235 |
+
psurfaceid_onto_q = occlusion_layers[0, 1, 1, direction]
|
| 236 |
+
self.assertEqual(int(psurfaceid_onto_q), 1)
|
| 237 |
+
|
| 238 |
+
# Conversely, if we swap p and q, we have a top-level splatting onto
|
| 239 |
+
# mid-level. offset + 1 is the id of p, and 8-offset_id is the id of
|
| 240 |
+
# q's direction w.r.t. p (e.g. if p is [-1, -1] w.r.t. q, then q is
|
| 241 |
+
# [1, 1] w.r.t. p; we use the ids of these two directions in the offsets
|
| 242 |
+
# array).
|
| 243 |
+
qsurfaceid_onto_p = occlusion_layers[
|
| 244 |
+
0, offset[0] + 1, offset[1] + 1, 8 - direction
|
| 245 |
+
]
|
| 246 |
+
self.assertEqual(int(qsurfaceid_onto_p), -1)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class TestComputeSplattingColorsAndWeights(TestCaseMixin, unittest.TestCase):
|
| 250 |
+
def setUp(self):
|
| 251 |
+
self.N, self.H, self.W, self.K = 2, 3, 4, 5
|
| 252 |
+
self.pixel_coords_screen = (
|
| 253 |
+
torch.stack(
|
| 254 |
+
meshgrid_ij(torch.arange(self.H), torch.arange(self.W)),
|
| 255 |
+
dim=-1,
|
| 256 |
+
)
|
| 257 |
+
.reshape(1, self.H, self.W, 1, 2)
|
| 258 |
+
.expand(self.N, self.H, self.W, self.K, 2)
|
| 259 |
+
.float()
|
| 260 |
+
+ 0.5
|
| 261 |
+
)
|
| 262 |
+
self.colors = torch.ones((self.N, self.H, self.W, self.K, 4))
|
| 263 |
+
|
| 264 |
+
def test_all_equal(self):
|
| 265 |
+
# If all colors are equal and on a regular grid, all weights and reweighted
|
| 266 |
+
# colors should be equal given a specific splatting direction.
|
| 267 |
+
splatting_colors_and_weights = _compute_splatting_colors_and_weights(
|
| 268 |
+
self.pixel_coords_screen, self.colors * 0.2345, sigma=0.5, offsets=offsets
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Splatting directly to the top/bottom/left/right should have the same strenght.
|
| 272 |
+
non_diag_splats = splatting_colors_and_weights[
|
| 273 |
+
:, :, :, :, torch.tensor([1, 3, 5, 7])
|
| 274 |
+
]
|
| 275 |
+
|
| 276 |
+
# Same for diagonal splats.
|
| 277 |
+
diag_splats = splatting_colors_and_weights[
|
| 278 |
+
:, :, :, :, torch.tensor([0, 2, 6, 8])
|
| 279 |
+
]
|
| 280 |
+
|
| 281 |
+
# And for self-splats.
|
| 282 |
+
self_splats = splatting_colors_and_weights[:, :, :, :, torch.tensor([4])]
|
| 283 |
+
|
| 284 |
+
for splats in non_diag_splats, diag_splats, self_splats:
|
| 285 |
+
# Colors should be equal.
|
| 286 |
+
self.assertTrue(torch.all(splats[..., :4] == splats[0, 0, 0, 0, 0, 0]))
|
| 287 |
+
|
| 288 |
+
# Weights should be equal.
|
| 289 |
+
self.assertTrue(torch.all(splats[..., 4] == splats[0, 0, 0, 0, 0, 4]))
|
| 290 |
+
|
| 291 |
+
# Non-diagonal weights should be greater than diagonal weights.
|
| 292 |
+
self.assertGreater(
|
| 293 |
+
non_diag_splats[0, 0, 0, 0, 0, 0], diag_splats[0, 0, 0, 0, 0, 0]
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
# Self-splats should be strongest of all.
|
| 297 |
+
self.assertGreater(
|
| 298 |
+
self_splats[0, 0, 0, 0, 0, 0], non_diag_splats[0, 0, 0, 0, 0, 0]
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Splatting colors should be reweighted proportionally to their splat weights.
|
| 302 |
+
diag_self_color_ratio = (
|
| 303 |
+
diag_splats[0, 0, 0, 0, 0, 0] / self_splats[0, 0, 0, 0, 0, 0]
|
| 304 |
+
)
|
| 305 |
+
diag_self_weight_ratio = (
|
| 306 |
+
diag_splats[0, 0, 0, 0, 0, 4] / self_splats[0, 0, 0, 0, 0, 4]
|
| 307 |
+
)
|
| 308 |
+
self.assertEqual(diag_self_color_ratio, diag_self_weight_ratio)
|
| 309 |
+
|
| 310 |
+
non_diag_self_color_ratio = (
|
| 311 |
+
non_diag_splats[0, 0, 0, 0, 0, 0] / self_splats[0, 0, 0, 0, 0, 0]
|
| 312 |
+
)
|
| 313 |
+
non_diag_self_weight_ratio = (
|
| 314 |
+
non_diag_splats[0, 0, 0, 0, 0, 4] / self_splats[0, 0, 0, 0, 0, 4]
|
| 315 |
+
)
|
| 316 |
+
self.assertEqual(non_diag_self_color_ratio, non_diag_self_weight_ratio)
|
| 317 |
+
|
| 318 |
+
def test_zero_alpha_zero_weight(self):
|
| 319 |
+
# Pixels with zero alpha do no splatting, but should still be splatted on.
|
| 320 |
+
colors = self.colors.clone()
|
| 321 |
+
colors[0, 1, 1, 0, 3] = 0.0
|
| 322 |
+
splatting_colors_and_weights = _compute_splatting_colors_and_weights(
|
| 323 |
+
self.pixel_coords_screen, colors, sigma=0.5, offsets=offsets
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
# The transparent pixel should do no splatting.
|
| 327 |
+
self.assertTrue(torch.all(splatting_colors_and_weights[0, 1, 1, 0] == 0.0))
|
| 328 |
+
|
| 329 |
+
# Splatting *onto* the transparent pixel should be unaffected.
|
| 330 |
+
reference_weights_colors = splatting_colors_and_weights[0, 1, 1, 1]
|
| 331 |
+
for direction, offset in enumerate(offsets):
|
| 332 |
+
if direction == 4:
|
| 333 |
+
continue # Ignore self-splats
|
| 334 |
+
# We invert the direction to get the right (h, w, d) coordinate of each
|
| 335 |
+
# pixel splatting *onto* the pixel with zero alpha.
|
| 336 |
+
self.assertClose(
|
| 337 |
+
splatting_colors_and_weights[
|
| 338 |
+
0, 1 + offset[0], 1 + offset[1], 0, 8 - direction
|
| 339 |
+
],
|
| 340 |
+
reference_weights_colors[8 - direction],
|
| 341 |
+
atol=0.001,
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
def test_random_inputs(self):
|
| 345 |
+
pixel_coords_screen = (
|
| 346 |
+
self.pixel_coords_screen
|
| 347 |
+
+ torch.randn((self.N, self.H, self.W, self.K, 2)) * 0.1
|
| 348 |
+
)
|
| 349 |
+
colors = torch.rand((self.N, self.H, self.W, self.K, 4))
|
| 350 |
+
splatting_colors_and_weights = _compute_splatting_colors_and_weights(
|
| 351 |
+
pixel_coords_screen, colors, sigma=0.5, offsets=offsets
|
| 352 |
+
)
|
| 353 |
+
naive_colors_and_weights = compute_splatting_colors_and_weights_naive(
|
| 354 |
+
pixel_coords_screen, colors, sigma=0.5
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
self.assertClose(
|
| 358 |
+
splatting_colors_and_weights, naive_colors_and_weights, atol=0.01
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class TestOffsetSplats(TestCaseMixin, unittest.TestCase):
|
| 363 |
+
def test_offset(self):
|
| 364 |
+
device = torch.device("cuda:0")
|
| 365 |
+
N, H, W, K = 2, 3, 4, 5
|
| 366 |
+
colors_and_weights = torch.rand((N, H, W, K, 9, 5), device=device)
|
| 367 |
+
crop_ids_h, crop_ids_w, _ = _precompute((N, H, W, K), device=device)
|
| 368 |
+
offset_colors_and_weights = _offset_splats(
|
| 369 |
+
colors_and_weights, crop_ids_h, crop_ids_w
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
# Check each splatting direction individually, for clarity.
|
| 373 |
+
# offset_x, offset_y = (-1, -1)
|
| 374 |
+
direction = 0
|
| 375 |
+
self.assertClose(
|
| 376 |
+
offset_colors_and_weights[:, 1:, 1:, :, direction],
|
| 377 |
+
colors_and_weights[:, :-1, :-1, :, direction],
|
| 378 |
+
atol=0.001,
|
| 379 |
+
)
|
| 380 |
+
self.assertTrue(
|
| 381 |
+
torch.all(offset_colors_and_weights[:, 0, :, :, direction] == 0.0)
|
| 382 |
+
)
|
| 383 |
+
self.assertTrue(
|
| 384 |
+
torch.all(offset_colors_and_weights[:, :, 0, :, direction] == 0.0)
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
# offset_x, offset_y = (-1, 0)
|
| 388 |
+
direction = 1
|
| 389 |
+
self.assertClose(
|
| 390 |
+
offset_colors_and_weights[:, :, 1:, :, direction],
|
| 391 |
+
colors_and_weights[:, :, :-1, :, direction],
|
| 392 |
+
atol=0.001,
|
| 393 |
+
)
|
| 394 |
+
self.assertTrue(
|
| 395 |
+
torch.all(offset_colors_and_weights[:, :, 0, :, direction] == 0.0)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
# offset_x, offset_y = (-1, 1)
|
| 399 |
+
direction = 2
|
| 400 |
+
self.assertClose(
|
| 401 |
+
offset_colors_and_weights[:, :-1, 1:, :, direction],
|
| 402 |
+
colors_and_weights[:, 1:, :-1, :, direction],
|
| 403 |
+
atol=0.001,
|
| 404 |
+
)
|
| 405 |
+
self.assertTrue(
|
| 406 |
+
torch.all(offset_colors_and_weights[:, -1, :, :, direction] == 0.0)
|
| 407 |
+
)
|
| 408 |
+
self.assertTrue(
|
| 409 |
+
torch.all(offset_colors_and_weights[:, :, 0, :, direction] == 0.0)
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
# offset_x, offset_y = (0, -1)
|
| 413 |
+
direction = 3
|
| 414 |
+
self.assertClose(
|
| 415 |
+
offset_colors_and_weights[:, 1:, :, :, direction],
|
| 416 |
+
colors_and_weights[:, :-1, :, :, direction],
|
| 417 |
+
atol=0.001,
|
| 418 |
+
)
|
| 419 |
+
self.assertTrue(
|
| 420 |
+
torch.all(offset_colors_and_weights[:, 0, :, :, direction] == 0.0)
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# self-splat
|
| 424 |
+
direction = 4
|
| 425 |
+
self.assertClose(
|
| 426 |
+
offset_colors_and_weights[..., direction, :],
|
| 427 |
+
colors_and_weights[..., direction, :],
|
| 428 |
+
atol=0.001,
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
# offset_x, offset_y = (0, 1)
|
| 432 |
+
direction = 5
|
| 433 |
+
self.assertClose(
|
| 434 |
+
offset_colors_and_weights[:, :-1, :, :, direction],
|
| 435 |
+
colors_and_weights[:, 1:, :, :, direction],
|
| 436 |
+
atol=0.001,
|
| 437 |
+
)
|
| 438 |
+
self.assertTrue(
|
| 439 |
+
torch.all(offset_colors_and_weights[:, -1, :, :, direction] == 0.0)
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# offset_x, offset_y = (1, -1)
|
| 443 |
+
direction = 6
|
| 444 |
+
self.assertClose(
|
| 445 |
+
offset_colors_and_weights[:, 1:, :-1, :, direction],
|
| 446 |
+
colors_and_weights[:, :-1, 1:, :, direction],
|
| 447 |
+
atol=0.001,
|
| 448 |
+
)
|
| 449 |
+
self.assertTrue(
|
| 450 |
+
torch.all(offset_colors_and_weights[:, 0, :, :, direction] == 0.0)
|
| 451 |
+
)
|
| 452 |
+
self.assertTrue(
|
| 453 |
+
torch.all(offset_colors_and_weights[:, :, -1, :, direction] == 0.0)
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
# offset_x, offset_y = (1, 0)
|
| 457 |
+
direction = 7
|
| 458 |
+
self.assertClose(
|
| 459 |
+
offset_colors_and_weights[:, :, :-1, :, direction],
|
| 460 |
+
colors_and_weights[:, :, 1:, :, direction],
|
| 461 |
+
atol=0.001,
|
| 462 |
+
)
|
| 463 |
+
self.assertTrue(
|
| 464 |
+
torch.all(offset_colors_and_weights[:, :, -1, :, direction] == 0.0)
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
# offset_x, offset_y = (1, 1)
|
| 468 |
+
direction = 8
|
| 469 |
+
self.assertClose(
|
| 470 |
+
offset_colors_and_weights[:, :-1, :-1, :, direction],
|
| 471 |
+
colors_and_weights[:, 1:, 1:, :, direction],
|
| 472 |
+
atol=0.001,
|
| 473 |
+
)
|
| 474 |
+
self.assertTrue(
|
| 475 |
+
torch.all(offset_colors_and_weights[:, -1, :, :, direction] == 0.0)
|
| 476 |
+
)
|
| 477 |
+
self.assertTrue(
|
| 478 |
+
torch.all(offset_colors_and_weights[:, :, -1, :, direction] == 0.0)
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
class TestComputeSplattedColorsAndWeights(TestCaseMixin, unittest.TestCase):
|
| 483 |
+
def test_accumulation_background(self):
|
| 484 |
+
# Set occlusion_layers to all -1, so all splats are background splats.
|
| 485 |
+
splat_colors_and_weights = torch.rand((1, 1, 1, 3, 9, 5))
|
| 486 |
+
occlusion_layers = torch.zeros((1, 1, 1, 9)) - 1
|
| 487 |
+
splatted_colors, splatted_weights = _compute_splatted_colors_and_weights(
|
| 488 |
+
occlusion_layers, splat_colors_and_weights
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
# Foreground splats (there are none).
|
| 492 |
+
self.assertClose(
|
| 493 |
+
splatted_colors[0, 0, 0, :, 0],
|
| 494 |
+
torch.zeros((4)),
|
| 495 |
+
atol=0.001,
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
# Surface splats (there are none).
|
| 499 |
+
self.assertClose(
|
| 500 |
+
splatted_colors[0, 0, 0, :, 1],
|
| 501 |
+
torch.zeros((4)),
|
| 502 |
+
atol=0.001,
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
# Background splats.
|
| 506 |
+
self.assertClose(
|
| 507 |
+
splatted_colors[0, 0, 0, :, 2],
|
| 508 |
+
splat_colors_and_weights[0, 0, 0, :, :, :4].sum(dim=0).sum(dim=0),
|
| 509 |
+
atol=0.001,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
def test_accumulation_middle(self):
|
| 513 |
+
# Set occlusion_layers to all 0, so top splats are co-surface with splatted
|
| 514 |
+
# pixels. Thus, the top splatting layer should be accumulated to surface, and
|
| 515 |
+
# all other layers to background.
|
| 516 |
+
splat_colors_and_weights = torch.rand((1, 1, 1, 3, 9, 5))
|
| 517 |
+
occlusion_layers = torch.zeros((1, 1, 1, 9))
|
| 518 |
+
splatted_colors, splatted_weights = _compute_splatted_colors_and_weights(
|
| 519 |
+
occlusion_layers, splat_colors_and_weights
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
# Foreground splats (there are none).
|
| 523 |
+
self.assertClose(
|
| 524 |
+
splatted_colors[0, 0, 0, :, 0],
|
| 525 |
+
torch.zeros((4)),
|
| 526 |
+
atol=0.001,
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
# Surface splats
|
| 530 |
+
self.assertClose(
|
| 531 |
+
splatted_colors[0, 0, 0, :, 1],
|
| 532 |
+
splat_colors_and_weights[0, 0, 0, 0, :, :4].sum(dim=0),
|
| 533 |
+
atol=0.001,
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
# Background splats
|
| 537 |
+
self.assertClose(
|
| 538 |
+
splatted_colors[0, 0, 0, :, 2],
|
| 539 |
+
splat_colors_and_weights[0, 0, 0, 1:, :, :4].sum(dim=0).sum(dim=0),
|
| 540 |
+
atol=0.001,
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
def test_accumulation_foreground(self):
|
| 544 |
+
# Set occlusion_layers to all 1. Then the top splatter is a foreground
|
| 545 |
+
# splatter, mid splatter is surface, and bottom splatter is background.
|
| 546 |
+
splat_colors_and_weights = torch.rand((1, 1, 1, 3, 9, 5))
|
| 547 |
+
occlusion_layers = torch.zeros((1, 1, 1, 9)) + 1
|
| 548 |
+
splatted_colors, splatted_weights = _compute_splatted_colors_and_weights(
|
| 549 |
+
occlusion_layers, splat_colors_and_weights
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
# Foreground splats
|
| 553 |
+
self.assertClose(
|
| 554 |
+
splatted_colors[0, 0, 0, :, 0],
|
| 555 |
+
splat_colors_and_weights[0, 0, 0, 0:1, :, :4].sum(dim=0).sum(dim=0),
|
| 556 |
+
atol=0.001,
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
# Surface splats
|
| 560 |
+
self.assertClose(
|
| 561 |
+
splatted_colors[0, 0, 0, :, 1],
|
| 562 |
+
splat_colors_and_weights[0, 0, 0, 1:2, :, :4].sum(dim=0).sum(dim=0),
|
| 563 |
+
atol=0.001,
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
# Background splats
|
| 567 |
+
self.assertClose(
|
| 568 |
+
splatted_colors[0, 0, 0, :, 2],
|
| 569 |
+
splat_colors_and_weights[0, 0, 0, 2:3, :, :4].sum(dim=0).sum(dim=0),
|
| 570 |
+
atol=0.001,
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
class TestNormalizeAndComposeAllLayers(TestCaseMixin, unittest.TestCase):
|
| 575 |
+
def test_background_color(self):
|
| 576 |
+
# Background should always have alpha=0, and the chosen RGB.
|
| 577 |
+
N, H, W = 2, 3, 4
|
| 578 |
+
# Make a mask with background in the zeroth row of the first image.
|
| 579 |
+
bg_mask = torch.zeros([N, H, W, 1, 1])
|
| 580 |
+
bg_mask[0, :, 0] = 1
|
| 581 |
+
|
| 582 |
+
bg_color = torch.tensor([0.2, 0.3, 0.4])
|
| 583 |
+
|
| 584 |
+
color_layers = torch.rand((N, H, W, 4, 3)) * (1 - bg_mask)
|
| 585 |
+
color_weights = torch.rand((N, H, W, 1, 3)) * (1 - bg_mask)
|
| 586 |
+
|
| 587 |
+
colors = _normalize_and_compose_all_layers(
|
| 588 |
+
bg_color, color_layers, color_weights
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
# Background RGB should be .2, .3, .4, and alpha should be 0.
|
| 592 |
+
self.assertClose(
|
| 593 |
+
torch.masked_select(colors, bg_mask.bool()[..., 0]),
|
| 594 |
+
torch.tensor([0.2, 0.3, 0.4, 0, 0.2, 0.3, 0.4, 0, 0.2, 0.3, 0.4, 0.0]),
|
| 595 |
+
atol=0.001,
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
def test_compositing_opaque(self):
|
| 599 |
+
# When all colors are opaque, only the foreground layer should be visible.
|
| 600 |
+
N, H, W = 2, 3, 4
|
| 601 |
+
color_layers = torch.rand((N, H, W, 4, 3))
|
| 602 |
+
color_layers[..., 3, :] = 1.0
|
| 603 |
+
color_weights = torch.ones((N, H, W, 1, 3))
|
| 604 |
+
|
| 605 |
+
out_colors = _normalize_and_compose_all_layers(
|
| 606 |
+
torch.tensor([0.0, 0.0, 0.0]), color_layers, color_weights
|
| 607 |
+
)
|
| 608 |
+
self.assertClose(out_colors, color_layers[..., 0], atol=0.001)
|
| 609 |
+
|
| 610 |
+
def test_compositing_transparencies(self):
|
| 611 |
+
# When foreground layer is transparent and surface and bg are semi-transparent,
|
| 612 |
+
# we should return a mix of the two latter.
|
| 613 |
+
N, H, W = 2, 3, 4
|
| 614 |
+
color_layers = torch.rand((N, H, W, 4, 3))
|
| 615 |
+
color_layers[..., 3, 0] = 0.1 # fg
|
| 616 |
+
color_layers[..., 3, 1] = 0.2 # surface
|
| 617 |
+
color_layers[..., 3, 2] = 0.3 # bg
|
| 618 |
+
color_weights = torch.ones((N, H, W, 1, 3))
|
| 619 |
+
|
| 620 |
+
out_colors = _normalize_and_compose_all_layers(
|
| 621 |
+
torch.tensor([0.0, 0.0, 0.0]), color_layers, color_weights
|
| 622 |
+
)
|
| 623 |
+
self.assertClose(
|
| 624 |
+
out_colors,
|
| 625 |
+
color_layers[..., 0]
|
| 626 |
+
+ 0.9 * (color_layers[..., 1] + 0.8 * color_layers[..., 2]),
|
| 627 |
+
)
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_struct_utils.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from pytorch3d.structures import utils as struct_utils
|
| 12 |
+
|
| 13 |
+
from .common_testing import TestCaseMixin
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestStructUtils(TestCaseMixin, unittest.TestCase):
|
| 17 |
+
def setUp(self) -> None:
|
| 18 |
+
super().setUp()
|
| 19 |
+
torch.manual_seed(43)
|
| 20 |
+
|
| 21 |
+
def _check_list_to_padded_slices(self, x, x_padded, ndim):
|
| 22 |
+
N = len(x)
|
| 23 |
+
for i in range(N):
|
| 24 |
+
slices = [i]
|
| 25 |
+
for dim in range(ndim):
|
| 26 |
+
if x[i].nelement() == 0 and x[i].ndim == 1:
|
| 27 |
+
slice_ = slice(0, 0, 1)
|
| 28 |
+
else:
|
| 29 |
+
slice_ = slice(0, x[i].shape[dim], 1)
|
| 30 |
+
slices.append(slice_)
|
| 31 |
+
if x[i].nelement() == 0 and x[i].ndim == 1:
|
| 32 |
+
x_correct = x[i].new_zeros(*[[0] * ndim])
|
| 33 |
+
else:
|
| 34 |
+
x_correct = x[i]
|
| 35 |
+
self.assertClose(x_padded[slices], x_correct)
|
| 36 |
+
|
| 37 |
+
def test_list_to_padded(self):
|
| 38 |
+
device = torch.device("cuda:0")
|
| 39 |
+
N = 5
|
| 40 |
+
K = 20
|
| 41 |
+
for ndim in [1, 2, 3, 4]:
|
| 42 |
+
x = []
|
| 43 |
+
for _ in range(N):
|
| 44 |
+
dims = torch.randint(K, size=(ndim,)).tolist()
|
| 45 |
+
x.append(torch.rand(dims, device=device))
|
| 46 |
+
|
| 47 |
+
# set 0th element to an empty 1D tensor
|
| 48 |
+
x[0] = torch.tensor([], dtype=x[0].dtype, device=device)
|
| 49 |
+
|
| 50 |
+
# set 1st element to an empty tensor with correct number of dims
|
| 51 |
+
x[1] = x[1].new_zeros(*[[0] * ndim])
|
| 52 |
+
|
| 53 |
+
pad_size = [K] * ndim
|
| 54 |
+
x_padded = struct_utils.list_to_padded(
|
| 55 |
+
x, pad_size=pad_size, pad_value=0.0, equisized=False
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
for dim in range(ndim):
|
| 59 |
+
self.assertEqual(x_padded.shape[dim + 1], K)
|
| 60 |
+
|
| 61 |
+
self._check_list_to_padded_slices(x, x_padded, ndim)
|
| 62 |
+
|
| 63 |
+
# check for no pad size (defaults to max dimension)
|
| 64 |
+
x_padded = struct_utils.list_to_padded(x, pad_value=0.0, equisized=False)
|
| 65 |
+
max_sizes = (
|
| 66 |
+
max(
|
| 67 |
+
(0 if (y.nelement() == 0 and y.ndim == 1) else y.shape[dim])
|
| 68 |
+
for y in x
|
| 69 |
+
)
|
| 70 |
+
for dim in range(ndim)
|
| 71 |
+
)
|
| 72 |
+
for dim, max_size in enumerate(max_sizes):
|
| 73 |
+
self.assertEqual(x_padded.shape[dim + 1], max_size)
|
| 74 |
+
|
| 75 |
+
self._check_list_to_padded_slices(x, x_padded, ndim)
|
| 76 |
+
|
| 77 |
+
# check for equisized
|
| 78 |
+
x = [torch.rand((K, *([10] * (ndim - 1))), device=device) for _ in range(N)]
|
| 79 |
+
x_padded = struct_utils.list_to_padded(x, equisized=True)
|
| 80 |
+
self.assertClose(x_padded, torch.stack(x, 0))
|
| 81 |
+
|
| 82 |
+
# catch ValueError for invalid dimensions
|
| 83 |
+
pad_size = [K] * (ndim + 1)
|
| 84 |
+
with self.assertRaisesRegex(ValueError, "Pad size must"):
|
| 85 |
+
struct_utils.list_to_padded(
|
| 86 |
+
x, pad_size=pad_size, pad_value=0.0, equisized=False
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# invalid input tensor dimensions
|
| 90 |
+
x = []
|
| 91 |
+
ndim = 3
|
| 92 |
+
for _ in range(N):
|
| 93 |
+
dims = torch.randint(K, size=(ndim,)).tolist()
|
| 94 |
+
x.append(torch.rand(dims, device=device))
|
| 95 |
+
pad_size = [K] * 2
|
| 96 |
+
with self.assertRaisesRegex(ValueError, "Pad size must"):
|
| 97 |
+
x_padded = struct_utils.list_to_padded(
|
| 98 |
+
x, pad_size=pad_size, pad_value=0.0, equisized=False
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
def test_padded_to_list(self):
|
| 102 |
+
device = torch.device("cuda:0")
|
| 103 |
+
N = 5
|
| 104 |
+
K = 20
|
| 105 |
+
ndim = 2
|
| 106 |
+
|
| 107 |
+
for ndim in (2, 3, 4):
|
| 108 |
+
|
| 109 |
+
dims = [K] * ndim
|
| 110 |
+
x = torch.rand([N] + dims, device=device)
|
| 111 |
+
|
| 112 |
+
x_list = struct_utils.padded_to_list(x)
|
| 113 |
+
for i in range(N):
|
| 114 |
+
self.assertClose(x_list[i], x[i])
|
| 115 |
+
|
| 116 |
+
split_size = torch.randint(1, K, size=(N, ndim)).unbind(0)
|
| 117 |
+
x_list = struct_utils.padded_to_list(x, split_size)
|
| 118 |
+
for i in range(N):
|
| 119 |
+
slices = [i]
|
| 120 |
+
for dim in range(ndim):
|
| 121 |
+
slices.append(slice(0, split_size[i][dim], 1))
|
| 122 |
+
self.assertClose(x_list[i], x[slices])
|
| 123 |
+
|
| 124 |
+
# split size is a list of ints
|
| 125 |
+
split_size = [int(z) for z in torch.randint(1, K, size=(N,)).unbind(0)]
|
| 126 |
+
x_list = struct_utils.padded_to_list(x, split_size)
|
| 127 |
+
for i in range(N):
|
| 128 |
+
self.assertClose(x_list[i], x[i][: split_size[i]])
|
| 129 |
+
|
| 130 |
+
def test_padded_to_packed(self):
|
| 131 |
+
device = torch.device("cuda:0")
|
| 132 |
+
N = 5
|
| 133 |
+
K = 20
|
| 134 |
+
ndim = 2
|
| 135 |
+
dims = [K] * ndim
|
| 136 |
+
x = torch.rand([N] + dims, device=device)
|
| 137 |
+
|
| 138 |
+
# Case 1: no split_size or pad_value provided
|
| 139 |
+
# Check output is just the flattened input.
|
| 140 |
+
x_packed = struct_utils.padded_to_packed(x)
|
| 141 |
+
self.assertTrue(x_packed.shape == (x.shape[0] * x.shape[1], x.shape[2]))
|
| 142 |
+
self.assertClose(x_packed, x.reshape(-1, K))
|
| 143 |
+
|
| 144 |
+
# Case 2: pad_value is provided.
|
| 145 |
+
# Check each section of the packed tensor matches the
|
| 146 |
+
# corresponding unpadded elements of the padded tensor.
|
| 147 |
+
# Check that only rows where all the values are padded
|
| 148 |
+
# are removed in the conversion to packed.
|
| 149 |
+
pad_value = -1
|
| 150 |
+
x_list = []
|
| 151 |
+
split_size = []
|
| 152 |
+
for _ in range(N):
|
| 153 |
+
dim = torch.randint(K, size=(1,)).item()
|
| 154 |
+
# Add some random values in the input which are the same as the pad_value.
|
| 155 |
+
# These should not be filtered out.
|
| 156 |
+
x_list.append(
|
| 157 |
+
torch.randint(low=pad_value, high=10, size=(dim, K), device=device)
|
| 158 |
+
)
|
| 159 |
+
split_size.append(dim)
|
| 160 |
+
x_padded = struct_utils.list_to_padded(x_list, pad_value=pad_value)
|
| 161 |
+
x_packed = struct_utils.padded_to_packed(x_padded, pad_value=pad_value)
|
| 162 |
+
curr = 0
|
| 163 |
+
for i in range(N):
|
| 164 |
+
self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i])
|
| 165 |
+
self.assertClose(torch.cat(x_list), x_packed)
|
| 166 |
+
curr += split_size[i]
|
| 167 |
+
|
| 168 |
+
# Case 3: split_size is provided.
|
| 169 |
+
# Check each section of the packed tensor matches the corresponding
|
| 170 |
+
# unpadded elements.
|
| 171 |
+
x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
|
| 172 |
+
curr = 0
|
| 173 |
+
for i in range(N):
|
| 174 |
+
self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i])
|
| 175 |
+
self.assertClose(torch.cat(x_list), x_packed)
|
| 176 |
+
curr += split_size[i]
|
| 177 |
+
|
| 178 |
+
# Case 4: split_size of the wrong shape is provided.
|
| 179 |
+
# Raise an error.
|
| 180 |
+
split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0)
|
| 181 |
+
with self.assertRaisesRegex(ValueError, "1-dimensional"):
|
| 182 |
+
x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
|
| 183 |
+
|
| 184 |
+
split_size = torch.randint(1, K, size=(2 * N,)).view(N * 2).tolist()
|
| 185 |
+
with self.assertRaisesRegex(
|
| 186 |
+
ValueError, "same length as inputs first dimension"
|
| 187 |
+
):
|
| 188 |
+
x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size)
|
| 189 |
+
|
| 190 |
+
# Case 5: both pad_value and split_size are provided.
|
| 191 |
+
# Raise an error.
|
| 192 |
+
with self.assertRaisesRegex(ValueError, "Only one of"):
|
| 193 |
+
x_packed = struct_utils.padded_to_packed(
|
| 194 |
+
x_padded, split_size=split_size, pad_value=-1
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# Case 6: Input has more than 3 dims.
|
| 198 |
+
# Raise an error.
|
| 199 |
+
x = torch.rand((N, K, K, K, K), device=device)
|
| 200 |
+
split_size = torch.randint(1, K, size=(N,)).tolist()
|
| 201 |
+
with self.assertRaisesRegex(ValueError, "Supports only"):
|
| 202 |
+
struct_utils.padded_to_packed(x, split_size=split_size)
|
| 203 |
+
|
| 204 |
+
def test_list_to_packed(self):
|
| 205 |
+
device = torch.device("cuda:0")
|
| 206 |
+
N = 5
|
| 207 |
+
K = 20
|
| 208 |
+
x, x_dims = [], []
|
| 209 |
+
dim2 = torch.randint(K, size=(1,)).item()
|
| 210 |
+
for _ in range(N):
|
| 211 |
+
dim1 = torch.randint(K, size=(1,)).item()
|
| 212 |
+
x_dims.append(dim1)
|
| 213 |
+
x.append(torch.rand([dim1, dim2], device=device))
|
| 214 |
+
|
| 215 |
+
out = struct_utils.list_to_packed(x)
|
| 216 |
+
x_packed = out[0]
|
| 217 |
+
num_items = out[1]
|
| 218 |
+
item_packed_first_idx = out[2]
|
| 219 |
+
item_packed_to_list_idx = out[3]
|
| 220 |
+
|
| 221 |
+
cur = 0
|
| 222 |
+
for i in range(N):
|
| 223 |
+
self.assertTrue(num_items[i] == x_dims[i])
|
| 224 |
+
self.assertTrue(item_packed_first_idx[i] == cur)
|
| 225 |
+
self.assertTrue(item_packed_to_list_idx[cur : cur + x_dims[i]].eq(i).all())
|
| 226 |
+
self.assertClose(x_packed[cur : cur + x_dims[i]], x[i])
|
| 227 |
+
cur += x_dims[i]
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_subdivide_meshes.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from pytorch3d.ops.subdivide_meshes import SubdivideMeshes
|
| 12 |
+
from pytorch3d.structures.meshes import Meshes
|
| 13 |
+
from pytorch3d.utils.ico_sphere import ico_sphere
|
| 14 |
+
|
| 15 |
+
from .common_testing import TestCaseMixin
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TestSubdivideMeshes(TestCaseMixin, unittest.TestCase):
|
| 19 |
+
def simple_subdivide(self, with_init=False):
|
| 20 |
+
# Create a mesh with one face and check the subdivided mesh has
|
| 21 |
+
# 4 faces with the correct vertex coordinates.
|
| 22 |
+
device = torch.device("cuda:0")
|
| 23 |
+
verts = torch.tensor(
|
| 24 |
+
[[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
|
| 25 |
+
dtype=torch.float32,
|
| 26 |
+
device=device,
|
| 27 |
+
requires_grad=True,
|
| 28 |
+
)
|
| 29 |
+
faces = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
|
| 30 |
+
mesh = Meshes(verts=[verts], faces=[faces])
|
| 31 |
+
mesh_init = mesh.clone() if with_init else None
|
| 32 |
+
subdivide = SubdivideMeshes(meshes=mesh_init)
|
| 33 |
+
new_mesh = subdivide(mesh)
|
| 34 |
+
|
| 35 |
+
# Subdivided face:
|
| 36 |
+
#
|
| 37 |
+
# v0
|
| 38 |
+
# /\
|
| 39 |
+
# / \
|
| 40 |
+
# / f0 \
|
| 41 |
+
# v4 /______\ v3
|
| 42 |
+
# /\ /\
|
| 43 |
+
# / \ f3 / \
|
| 44 |
+
# / f2 \ / f1 \
|
| 45 |
+
# /______\/______\
|
| 46 |
+
# v2 v5 v1
|
| 47 |
+
#
|
| 48 |
+
gt_subdivide_verts = torch.tensor(
|
| 49 |
+
[
|
| 50 |
+
[0.5, 1.0, 0.0],
|
| 51 |
+
[1.0, 0.0, 0.0],
|
| 52 |
+
[0.0, 0.0, 0.0],
|
| 53 |
+
[0.75, 0.5, 0.0],
|
| 54 |
+
[0.25, 0.5, 0.0],
|
| 55 |
+
[0.5, 0.0, 0.0],
|
| 56 |
+
],
|
| 57 |
+
dtype=torch.float32,
|
| 58 |
+
device=device,
|
| 59 |
+
)
|
| 60 |
+
gt_subdivide_faces = torch.tensor(
|
| 61 |
+
[[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],
|
| 62 |
+
dtype=torch.int64,
|
| 63 |
+
device=device,
|
| 64 |
+
)
|
| 65 |
+
new_verts, new_faces = new_mesh.get_mesh_verts_faces(0)
|
| 66 |
+
self.assertClose(new_verts, gt_subdivide_verts)
|
| 67 |
+
self.assertClose(new_faces, gt_subdivide_faces)
|
| 68 |
+
self.assertTrue(new_verts.requires_grad == verts.requires_grad)
|
| 69 |
+
|
| 70 |
+
def test_simple_subdivide(self):
|
| 71 |
+
self.simple_subdivide()
|
| 72 |
+
|
| 73 |
+
def test_simple_subdivide_with_init(self):
|
| 74 |
+
self.simple_subdivide(with_init=True)
|
| 75 |
+
|
| 76 |
+
def test_heterogeneous_meshes(self):
|
| 77 |
+
device = torch.device("cuda:0")
|
| 78 |
+
verts1 = torch.tensor(
|
| 79 |
+
[[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
|
| 80 |
+
dtype=torch.float32,
|
| 81 |
+
device=device,
|
| 82 |
+
requires_grad=True,
|
| 83 |
+
)
|
| 84 |
+
faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
|
| 85 |
+
verts2 = torch.tensor(
|
| 86 |
+
[[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.5, 1.0, 0.0]],
|
| 87 |
+
dtype=torch.float32,
|
| 88 |
+
device=device,
|
| 89 |
+
requires_grad=True,
|
| 90 |
+
)
|
| 91 |
+
faces2 = torch.tensor([[0, 1, 2], [0, 3, 1]], dtype=torch.int64, device=device)
|
| 92 |
+
faces3 = torch.tensor([[0, 1, 2], [0, 2, 3]], dtype=torch.int64, device=device)
|
| 93 |
+
mesh = Meshes(verts=[verts1, verts2, verts2], faces=[faces1, faces2, faces3])
|
| 94 |
+
subdivide = SubdivideMeshes()
|
| 95 |
+
new_mesh = subdivide(mesh.clone())
|
| 96 |
+
|
| 97 |
+
gt_subdivided_verts1 = torch.tensor(
|
| 98 |
+
[
|
| 99 |
+
[0.5, 1.0, 0.0],
|
| 100 |
+
[1.0, 0.0, 0.0],
|
| 101 |
+
[0.0, 0.0, 0.0],
|
| 102 |
+
[0.75, 0.5, 0.0],
|
| 103 |
+
[0.25, 0.5, 0.0],
|
| 104 |
+
[0.5, 0.0, 0.0],
|
| 105 |
+
],
|
| 106 |
+
dtype=torch.float32,
|
| 107 |
+
device=device,
|
| 108 |
+
)
|
| 109 |
+
gt_subdivided_faces1 = torch.tensor(
|
| 110 |
+
[[0, 3, 4], [1, 5, 3], [2, 4, 5], [5, 4, 3]],
|
| 111 |
+
dtype=torch.int64,
|
| 112 |
+
device=device,
|
| 113 |
+
)
|
| 114 |
+
# faces2:
|
| 115 |
+
#
|
| 116 |
+
# v0 _______e2_______ v3
|
| 117 |
+
# /\ /
|
| 118 |
+
# / \ /
|
| 119 |
+
# / \ /
|
| 120 |
+
# e1 / \ e0 / e4
|
| 121 |
+
# / \ /
|
| 122 |
+
# / \ /
|
| 123 |
+
# / \ /
|
| 124 |
+
# /______________\/
|
| 125 |
+
# v2 e3 v1
|
| 126 |
+
#
|
| 127 |
+
# Subdivided faces2:
|
| 128 |
+
#
|
| 129 |
+
# v0 _______v6_______ v3
|
| 130 |
+
# /\ /\ /
|
| 131 |
+
# / \ f1 / \ f3 /
|
| 132 |
+
# / f0 \ / f7 \ /
|
| 133 |
+
# v5 /______v4______\/v8
|
| 134 |
+
# /\ /\ /
|
| 135 |
+
# / \ f6 / \ f5 /
|
| 136 |
+
# / f4 \ / f2 \ /
|
| 137 |
+
# /______\/______\/
|
| 138 |
+
# v2 v7 v1
|
| 139 |
+
#
|
| 140 |
+
gt_subdivided_verts2 = torch.tensor(
|
| 141 |
+
[
|
| 142 |
+
[0.5, 1.0, 0.0],
|
| 143 |
+
[1.0, 0.0, 0.0],
|
| 144 |
+
[0.0, 0.0, 0.0],
|
| 145 |
+
[1.5, 1.0, 0.0],
|
| 146 |
+
[0.75, 0.5, 0.0],
|
| 147 |
+
[0.25, 0.5, 0.0],
|
| 148 |
+
[1.0, 1.0, 0.0],
|
| 149 |
+
[0.5, 0.0, 0.0],
|
| 150 |
+
[1.25, 0.5, 0.0],
|
| 151 |
+
],
|
| 152 |
+
dtype=torch.float32,
|
| 153 |
+
device=device,
|
| 154 |
+
)
|
| 155 |
+
gt_subdivided_faces2 = torch.tensor(
|
| 156 |
+
[
|
| 157 |
+
[0, 4, 5],
|
| 158 |
+
[0, 6, 4],
|
| 159 |
+
[1, 7, 4],
|
| 160 |
+
[3, 8, 6],
|
| 161 |
+
[2, 5, 7],
|
| 162 |
+
[1, 4, 8],
|
| 163 |
+
[7, 5, 4],
|
| 164 |
+
[8, 4, 6],
|
| 165 |
+
],
|
| 166 |
+
dtype=torch.int64,
|
| 167 |
+
device=device,
|
| 168 |
+
)
|
| 169 |
+
gt_subdivided_verts3 = gt_subdivided_verts2.clone()
|
| 170 |
+
gt_subdivided_verts3[-1, :] = torch.tensor(
|
| 171 |
+
[0.75, 0.5, 0], dtype=torch.float32, device=device
|
| 172 |
+
)
|
| 173 |
+
gt_subdivided_faces3 = torch.tensor(
|
| 174 |
+
[
|
| 175 |
+
[0, 4, 5],
|
| 176 |
+
[0, 5, 6],
|
| 177 |
+
[1, 7, 4],
|
| 178 |
+
[2, 8, 5],
|
| 179 |
+
[2, 5, 7],
|
| 180 |
+
[3, 6, 8],
|
| 181 |
+
[7, 5, 4],
|
| 182 |
+
[8, 6, 5],
|
| 183 |
+
],
|
| 184 |
+
dtype=torch.int64,
|
| 185 |
+
device=device,
|
| 186 |
+
)
|
| 187 |
+
new_mesh_verts1, new_mesh_faces1 = new_mesh.get_mesh_verts_faces(0)
|
| 188 |
+
new_mesh_verts2, new_mesh_faces2 = new_mesh.get_mesh_verts_faces(1)
|
| 189 |
+
new_mesh_verts3, new_mesh_faces3 = new_mesh.get_mesh_verts_faces(2)
|
| 190 |
+
self.assertClose(new_mesh_verts1, gt_subdivided_verts1)
|
| 191 |
+
self.assertClose(new_mesh_faces1, gt_subdivided_faces1)
|
| 192 |
+
self.assertClose(new_mesh_verts2, gt_subdivided_verts2)
|
| 193 |
+
self.assertClose(new_mesh_faces2, gt_subdivided_faces2)
|
| 194 |
+
self.assertClose(new_mesh_verts3, gt_subdivided_verts3)
|
| 195 |
+
self.assertClose(new_mesh_faces3, gt_subdivided_faces3)
|
| 196 |
+
self.assertTrue(new_mesh_verts1.requires_grad == verts1.requires_grad)
|
| 197 |
+
self.assertTrue(new_mesh_verts2.requires_grad == verts2.requires_grad)
|
| 198 |
+
self.assertTrue(new_mesh_verts3.requires_grad == verts2.requires_grad)
|
| 199 |
+
|
| 200 |
+
def test_subdivide_features(self):
|
| 201 |
+
device = torch.device("cuda:0")
|
| 202 |
+
mesh = ico_sphere(0, device)
|
| 203 |
+
N = 10
|
| 204 |
+
mesh = mesh.extend(N)
|
| 205 |
+
edges = mesh.edges_packed()
|
| 206 |
+
V = mesh.num_verts_per_mesh()[0]
|
| 207 |
+
D = 256
|
| 208 |
+
feats = torch.rand(
|
| 209 |
+
(N * V, D), dtype=torch.float32, device=device, requires_grad=True
|
| 210 |
+
) # packed features
|
| 211 |
+
app_feats = feats[edges].mean(1)
|
| 212 |
+
subdivide = SubdivideMeshes()
|
| 213 |
+
new_mesh, new_feats = subdivide(mesh, feats)
|
| 214 |
+
gt_feats = torch.cat(
|
| 215 |
+
(feats.view(N, V, D), app_feats.view(N, -1, D)), dim=1
|
| 216 |
+
).view(-1, D)
|
| 217 |
+
self.assertClose(new_feats, gt_feats)
|
| 218 |
+
self.assertTrue(new_feats.requires_grad == gt_feats.requires_grad)
|
| 219 |
+
|
| 220 |
+
@staticmethod
|
| 221 |
+
def subdivide_meshes_with_init(num_meshes: int = 10, same_topo: bool = False):
|
| 222 |
+
device = torch.device("cuda:0")
|
| 223 |
+
meshes = ico_sphere(0, device=device)
|
| 224 |
+
if num_meshes > 1:
|
| 225 |
+
meshes = meshes.extend(num_meshes)
|
| 226 |
+
meshes_init = meshes.clone() if same_topo else None
|
| 227 |
+
torch.cuda.synchronize()
|
| 228 |
+
|
| 229 |
+
def subdivide_meshes():
|
| 230 |
+
subdivide = SubdivideMeshes(meshes=meshes_init)
|
| 231 |
+
subdivide(meshes=meshes.clone())
|
| 232 |
+
torch.cuda.synchronize()
|
| 233 |
+
|
| 234 |
+
return subdivide_meshes
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_symeig3x3.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from pytorch3d.common.workaround import symeig3x3
|
| 12 |
+
from pytorch3d.transforms.rotation_conversions import random_rotations
|
| 13 |
+
|
| 14 |
+
from .common_testing import get_random_cuda_device, TestCaseMixin
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TestSymEig3x3(TestCaseMixin, unittest.TestCase):
|
| 18 |
+
TEST_BATCH_SIZE = 1024
|
| 19 |
+
|
| 20 |
+
@staticmethod
|
| 21 |
+
def create_random_sym3x3(device, n):
|
| 22 |
+
random_3x3 = torch.randn((n, 3, 3), device=device)
|
| 23 |
+
random_3x3_T = torch.transpose(random_3x3, 1, 2)
|
| 24 |
+
random_sym_3x3 = (random_3x3 * random_3x3_T).contiguous()
|
| 25 |
+
|
| 26 |
+
return random_sym_3x3
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def create_diag_sym3x3(device, n, noise=0.0):
|
| 30 |
+
# Create purly diagonal matrices
|
| 31 |
+
random_diag_3x3 = torch.randn((n, 3), device=device).diag_embed()
|
| 32 |
+
|
| 33 |
+
# Make them 'almost' diagonal
|
| 34 |
+
random_diag_3x3 += noise * TestSymEig3x3.create_random_sym3x3(device, n)
|
| 35 |
+
|
| 36 |
+
return random_diag_3x3
|
| 37 |
+
|
| 38 |
+
def setUp(self) -> None:
|
| 39 |
+
super().setUp()
|
| 40 |
+
torch.manual_seed(42)
|
| 41 |
+
|
| 42 |
+
self._gpu = get_random_cuda_device()
|
| 43 |
+
self._cpu = torch.device("cpu")
|
| 44 |
+
|
| 45 |
+
def test_is_eigen_gpu(self):
|
| 46 |
+
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
|
| 47 |
+
|
| 48 |
+
self._test_is_eigen(test_input)
|
| 49 |
+
|
| 50 |
+
def test_is_eigen_cpu(self):
|
| 51 |
+
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
|
| 52 |
+
|
| 53 |
+
self._test_is_eigen(test_input)
|
| 54 |
+
|
| 55 |
+
def _test_is_eigen(self, test_input, atol=1e-04, rtol=1e-02):
|
| 56 |
+
"""
|
| 57 |
+
Verify that values and vectors produced are really eigenvalues and eigenvectors
|
| 58 |
+
and can restore the original input matrix with good precision
|
| 59 |
+
"""
|
| 60 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 61 |
+
|
| 62 |
+
self.assertClose(
|
| 63 |
+
test_input,
|
| 64 |
+
eigenvectors @ eigenvalues.diag_embed() @ eigenvectors.transpose(-2, -1),
|
| 65 |
+
atol=atol,
|
| 66 |
+
rtol=rtol,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
def test_eigenvectors_are_orthonormal_gpu(self):
|
| 70 |
+
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
|
| 71 |
+
|
| 72 |
+
self._test_eigenvectors_are_orthonormal(test_input)
|
| 73 |
+
|
| 74 |
+
def test_eigenvectors_are_orthonormal_cpu(self):
|
| 75 |
+
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
|
| 76 |
+
|
| 77 |
+
self._test_eigenvectors_are_orthonormal(test_input)
|
| 78 |
+
|
| 79 |
+
def _test_eigenvectors_are_orthonormal(self, test_input):
|
| 80 |
+
"""
|
| 81 |
+
Verify that eigenvectors are an orthonormal set
|
| 82 |
+
"""
|
| 83 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 84 |
+
|
| 85 |
+
batched_eye = torch.zeros_like(test_input)
|
| 86 |
+
batched_eye[..., :, :] = torch.eye(3, device=batched_eye.device)
|
| 87 |
+
|
| 88 |
+
self.assertClose(
|
| 89 |
+
batched_eye, eigenvectors @ eigenvectors.transpose(-2, -1), atol=1e-06
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def test_is_not_nan_or_inf_gpu(self):
|
| 93 |
+
test_input = self.create_random_sym3x3(self._gpu, n=self.TEST_BATCH_SIZE)
|
| 94 |
+
|
| 95 |
+
self._test_is_not_nan_or_inf(test_input)
|
| 96 |
+
|
| 97 |
+
def test_is_not_nan_or_inf_cpu(self):
|
| 98 |
+
test_input = self.create_random_sym3x3(self._cpu, n=self.TEST_BATCH_SIZE)
|
| 99 |
+
|
| 100 |
+
self._test_is_not_nan_or_inf(test_input)
|
| 101 |
+
|
| 102 |
+
def _test_is_not_nan_or_inf(self, test_input):
|
| 103 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 104 |
+
|
| 105 |
+
self.assertTrue(torch.isfinite(eigenvalues).all())
|
| 106 |
+
self.assertTrue(torch.isfinite(eigenvectors).all())
|
| 107 |
+
|
| 108 |
+
def test_degenerate_inputs_gpu(self):
|
| 109 |
+
self._test_degenerate_inputs(self._gpu)
|
| 110 |
+
|
| 111 |
+
def test_degenerate_inputs_cpu(self):
|
| 112 |
+
self._test_degenerate_inputs(self._cpu)
|
| 113 |
+
|
| 114 |
+
def _test_degenerate_inputs(self, device):
|
| 115 |
+
"""
|
| 116 |
+
Test degenerate case when input matrices are diagonal or near-diagonal
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
# Purely diagonal case
|
| 120 |
+
test_input = self.create_diag_sym3x3(device, self.TEST_BATCH_SIZE)
|
| 121 |
+
|
| 122 |
+
self._test_is_not_nan_or_inf(test_input)
|
| 123 |
+
self._test_is_eigen(test_input)
|
| 124 |
+
self._test_eigenvectors_are_orthonormal(test_input)
|
| 125 |
+
|
| 126 |
+
# Almost-diagonal case
|
| 127 |
+
test_input = self.create_diag_sym3x3(device, self.TEST_BATCH_SIZE, noise=1e-4)
|
| 128 |
+
|
| 129 |
+
self._test_is_not_nan_or_inf(test_input)
|
| 130 |
+
self._test_is_eigen(test_input)
|
| 131 |
+
self._test_eigenvectors_are_orthonormal(test_input)
|
| 132 |
+
|
| 133 |
+
def test_gradients_cpu(self):
|
| 134 |
+
self._test_gradients(self._cpu)
|
| 135 |
+
|
| 136 |
+
def test_gradients_gpu(self):
|
| 137 |
+
self._test_gradients(self._gpu)
|
| 138 |
+
|
| 139 |
+
def _test_gradients(self, device):
|
| 140 |
+
"""
|
| 141 |
+
Tests if gradients pass though without any problems (infs, nans etc) and
|
| 142 |
+
also performs gradcheck (compares numerical and analytical gradients)
|
| 143 |
+
"""
|
| 144 |
+
test_random_input = self.create_random_sym3x3(device, n=16)
|
| 145 |
+
test_diag_input = self.create_diag_sym3x3(device, n=16)
|
| 146 |
+
test_almost_diag_input = self.create_diag_sym3x3(device, n=16, noise=1e-4)
|
| 147 |
+
|
| 148 |
+
test_input = torch.cat(
|
| 149 |
+
(test_random_input, test_diag_input, test_almost_diag_input)
|
| 150 |
+
)
|
| 151 |
+
test_input.requires_grad = True
|
| 152 |
+
|
| 153 |
+
with torch.autograd.detect_anomaly():
|
| 154 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 155 |
+
|
| 156 |
+
loss = eigenvalues.mean() + eigenvectors.mean()
|
| 157 |
+
loss.backward()
|
| 158 |
+
|
| 159 |
+
test_random_input.requires_grad = True
|
| 160 |
+
# Inputs are converted to double to increase the precision of gradcheck.
|
| 161 |
+
torch.autograd.gradcheck(
|
| 162 |
+
symeig3x3, test_random_input.double(), eps=1e-6, atol=1e-2, rtol=1e-2
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def _test_eigenvalues_and_eigenvectors(
|
| 166 |
+
self, test_eigenvectors, test_eigenvalues, atol=1e-04, rtol=1e-04
|
| 167 |
+
):
|
| 168 |
+
test_input = (
|
| 169 |
+
test_eigenvectors.transpose(-2, -1)
|
| 170 |
+
@ test_eigenvalues.diag_embed()
|
| 171 |
+
@ test_eigenvectors
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
test_eigenvalues_sorted, _ = torch.sort(test_eigenvalues, dim=-1)
|
| 175 |
+
|
| 176 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 177 |
+
|
| 178 |
+
self.assertClose(
|
| 179 |
+
test_eigenvalues_sorted,
|
| 180 |
+
eigenvalues,
|
| 181 |
+
atol=atol,
|
| 182 |
+
rtol=rtol,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
self._test_is_not_nan_or_inf(test_input)
|
| 186 |
+
self._test_is_eigen(test_input, atol=atol, rtol=rtol)
|
| 187 |
+
self._test_eigenvectors_are_orthonormal(test_input)
|
| 188 |
+
|
| 189 |
+
def test_degenerate_eigenvalues_gpu(self):
|
| 190 |
+
self._test_degenerate_eigenvalues(self._gpu)
|
| 191 |
+
|
| 192 |
+
def test_degenerate_eigenvalues_cpu(self):
|
| 193 |
+
self._test_degenerate_eigenvalues(self._cpu)
|
| 194 |
+
|
| 195 |
+
def _test_degenerate_eigenvalues(self, device):
|
| 196 |
+
"""
|
| 197 |
+
Test degenerate eigenvalues like zero-valued and with 2-/3-multiplicity
|
| 198 |
+
"""
|
| 199 |
+
# Error tolerances for degenerate values are increased as things might become
|
| 200 |
+
# numerically unstable
|
| 201 |
+
deg_atol = 1e-3
|
| 202 |
+
deg_rtol = 1.0
|
| 203 |
+
|
| 204 |
+
# Construct random orthonormal sets
|
| 205 |
+
test_eigenvecs = random_rotations(n=self.TEST_BATCH_SIZE, device=device)
|
| 206 |
+
|
| 207 |
+
# Construct random eigenvalues
|
| 208 |
+
test_eigenvals = torch.randn(
|
| 209 |
+
(self.TEST_BATCH_SIZE, 3), device=test_eigenvecs.device
|
| 210 |
+
)
|
| 211 |
+
self._test_eigenvalues_and_eigenvectors(
|
| 212 |
+
test_eigenvecs, test_eigenvals, atol=deg_atol, rtol=deg_rtol
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# First eigenvalue is always 0.0 here: [0.0 X Y]
|
| 216 |
+
test_eigenvals_with_zero = test_eigenvals.clone()
|
| 217 |
+
test_eigenvals_with_zero[..., 0] = 0.0
|
| 218 |
+
self._test_eigenvalues_and_eigenvectors(
|
| 219 |
+
test_eigenvecs, test_eigenvals_with_zero, atol=deg_atol, rtol=deg_rtol
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# First two eigenvalues are always the same here: [X X Y]
|
| 223 |
+
test_eigenvals_with_multiplicity2 = test_eigenvals.clone()
|
| 224 |
+
test_eigenvals_with_multiplicity2[..., 1] = test_eigenvals_with_multiplicity2[
|
| 225 |
+
..., 0
|
| 226 |
+
]
|
| 227 |
+
self._test_eigenvalues_and_eigenvectors(
|
| 228 |
+
test_eigenvecs,
|
| 229 |
+
test_eigenvals_with_multiplicity2,
|
| 230 |
+
atol=deg_atol,
|
| 231 |
+
rtol=deg_rtol,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# All three eigenvalues are the same here: [X X X]
|
| 235 |
+
test_eigenvals_with_multiplicity3 = test_eigenvals_with_multiplicity2.clone()
|
| 236 |
+
test_eigenvals_with_multiplicity3[..., 2] = test_eigenvals_with_multiplicity2[
|
| 237 |
+
..., 0
|
| 238 |
+
]
|
| 239 |
+
self._test_eigenvalues_and_eigenvectors(
|
| 240 |
+
test_eigenvecs,
|
| 241 |
+
test_eigenvals_with_multiplicity3,
|
| 242 |
+
atol=deg_atol,
|
| 243 |
+
rtol=deg_rtol,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
def test_more_dimensions(self):
|
| 247 |
+
"""
|
| 248 |
+
Tests if function supports arbitrary leading dimensions
|
| 249 |
+
"""
|
| 250 |
+
repeat = 4
|
| 251 |
+
|
| 252 |
+
test_input = self.create_random_sym3x3(self._cpu, n=16)
|
| 253 |
+
test_input_4d = test_input[None, ...].expand((repeat,) + test_input.shape)
|
| 254 |
+
|
| 255 |
+
eigenvalues, eigenvectors = symeig3x3(test_input, eigenvectors=True)
|
| 256 |
+
eigenvalues_4d, eigenvectors_4d = symeig3x3(test_input_4d, eigenvectors=True)
|
| 257 |
+
|
| 258 |
+
eigenvalues_4d_gt = eigenvalues[None, ...].expand((repeat,) + eigenvalues.shape)
|
| 259 |
+
eigenvectors_4d_gt = eigenvectors[None, ...].expand(
|
| 260 |
+
(repeat,) + eigenvectors.shape
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
self.assertClose(eigenvalues_4d_gt, eigenvalues_4d)
|
| 264 |
+
self.assertClose(eigenvectors_4d_gt, eigenvectors_4d)
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_texturing.py
ADDED
|
@@ -0,0 +1,1325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from pytorch3d.renderer.mesh.rasterizer import Fragments
|
| 13 |
+
from pytorch3d.renderer.mesh.textures import (
|
| 14 |
+
_list_to_padded_wrapper,
|
| 15 |
+
TexturesAtlas,
|
| 16 |
+
TexturesUV,
|
| 17 |
+
TexturesVertex,
|
| 18 |
+
)
|
| 19 |
+
from pytorch3d.renderer.mesh.utils import (
|
| 20 |
+
pack_rectangles,
|
| 21 |
+
pack_unique_rectangles,
|
| 22 |
+
Rectangle,
|
| 23 |
+
)
|
| 24 |
+
from pytorch3d.structures import list_to_packed, Meshes, packed_to_list
|
| 25 |
+
|
| 26 |
+
from .common_testing import TestCaseMixin
|
| 27 |
+
from .test_meshes import init_mesh
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def tryindex(self, index, tex, meshes, source):
|
| 31 |
+
tex2 = tex[index]
|
| 32 |
+
meshes2 = meshes[index]
|
| 33 |
+
tex_from_meshes = meshes2.textures
|
| 34 |
+
for item in source:
|
| 35 |
+
basic = source[item][index]
|
| 36 |
+
from_texture = getattr(tex2, item + "_padded")()
|
| 37 |
+
from_meshes = getattr(tex_from_meshes, item + "_padded")()
|
| 38 |
+
if isinstance(index, int):
|
| 39 |
+
basic = basic[None]
|
| 40 |
+
|
| 41 |
+
if len(basic) == 0:
|
| 42 |
+
self.assertEqual(len(from_texture), 0)
|
| 43 |
+
self.assertEqual(len(from_meshes), 0)
|
| 44 |
+
else:
|
| 45 |
+
self.assertClose(basic, from_texture)
|
| 46 |
+
self.assertClose(basic, from_meshes)
|
| 47 |
+
self.assertEqual(from_texture.ndim, getattr(tex, item + "_padded")().ndim)
|
| 48 |
+
item_list = getattr(tex_from_meshes, item + "_list")()
|
| 49 |
+
self.assertEqual(basic.shape[0], len(item_list))
|
| 50 |
+
for i, elem in enumerate(item_list):
|
| 51 |
+
self.assertClose(elem, basic[i])
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TestTexturesVertex(TestCaseMixin, unittest.TestCase):
|
| 55 |
+
def test_sample_vertex_textures(self):
|
| 56 |
+
"""
|
| 57 |
+
This tests both interpolate_vertex_colors as well as
|
| 58 |
+
interpolate_face_attributes.
|
| 59 |
+
"""
|
| 60 |
+
verts = torch.randn((4, 3), dtype=torch.float32)
|
| 61 |
+
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
|
| 62 |
+
vert_tex = torch.tensor(
|
| 63 |
+
[[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32
|
| 64 |
+
)
|
| 65 |
+
verts_features = vert_tex
|
| 66 |
+
tex = TexturesVertex(verts_features=[verts_features])
|
| 67 |
+
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
|
| 68 |
+
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
|
| 69 |
+
barycentric_coords = torch.tensor(
|
| 70 |
+
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
|
| 71 |
+
).view(1, 1, 1, 2, -1)
|
| 72 |
+
expected_vals = torch.tensor(
|
| 73 |
+
[[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32
|
| 74 |
+
).view(1, 1, 1, 2, -1)
|
| 75 |
+
fragments = Fragments(
|
| 76 |
+
pix_to_face=pix_to_face,
|
| 77 |
+
bary_coords=barycentric_coords,
|
| 78 |
+
zbuf=torch.ones_like(pix_to_face),
|
| 79 |
+
dists=torch.ones_like(pix_to_face),
|
| 80 |
+
)
|
| 81 |
+
# sample_textures calls interpolate_vertex_colors
|
| 82 |
+
texels = mesh.sample_textures(fragments)
|
| 83 |
+
self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
|
| 84 |
+
|
| 85 |
+
def test_sample_vertex_textures_grad(self):
|
| 86 |
+
verts = torch.randn((4, 3), dtype=torch.float32)
|
| 87 |
+
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
|
| 88 |
+
vert_tex = torch.tensor(
|
| 89 |
+
[[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
|
| 90 |
+
dtype=torch.float32,
|
| 91 |
+
requires_grad=True,
|
| 92 |
+
)
|
| 93 |
+
verts_features = vert_tex
|
| 94 |
+
tex = TexturesVertex(verts_features=[verts_features])
|
| 95 |
+
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
|
| 96 |
+
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
|
| 97 |
+
barycentric_coords = torch.tensor(
|
| 98 |
+
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
|
| 99 |
+
).view(1, 1, 1, 2, -1)
|
| 100 |
+
fragments = Fragments(
|
| 101 |
+
pix_to_face=pix_to_face,
|
| 102 |
+
bary_coords=barycentric_coords,
|
| 103 |
+
zbuf=torch.ones_like(pix_to_face),
|
| 104 |
+
dists=torch.ones_like(pix_to_face),
|
| 105 |
+
)
|
| 106 |
+
grad_vert_tex = torch.tensor(
|
| 107 |
+
[[0.3, 0.3, 0.3], [0.9, 0.9, 0.9], [0.5, 0.5, 0.5], [0.3, 0.3, 0.3]],
|
| 108 |
+
dtype=torch.float32,
|
| 109 |
+
)
|
| 110 |
+
texels = mesh.sample_textures(fragments)
|
| 111 |
+
texels.sum().backward()
|
| 112 |
+
self.assertTrue(hasattr(vert_tex, "grad"))
|
| 113 |
+
self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
|
| 114 |
+
|
| 115 |
+
def test_textures_vertex_init_fail(self):
|
| 116 |
+
# Incorrect sized tensors
|
| 117 |
+
with self.assertRaisesRegex(ValueError, "verts_features"):
|
| 118 |
+
TexturesVertex(verts_features=torch.rand(size=(5, 10)))
|
| 119 |
+
|
| 120 |
+
# Not a list or a tensor
|
| 121 |
+
with self.assertRaisesRegex(ValueError, "verts_features"):
|
| 122 |
+
TexturesVertex(verts_features=(1, 1, 1))
|
| 123 |
+
|
| 124 |
+
def test_faces_verts_textures(self):
|
| 125 |
+
device = torch.device("cuda:0")
|
| 126 |
+
verts = torch.randn((2, 4, 3), dtype=torch.float32, device=device)
|
| 127 |
+
faces = torch.tensor(
|
| 128 |
+
[[[2, 1, 0], [3, 1, 0]], [[1, 3, 0], [2, 1, 3]]],
|
| 129 |
+
dtype=torch.int64,
|
| 130 |
+
device=device,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# define TexturesVertex
|
| 134 |
+
verts_texture = torch.rand(verts.shape, device=device)
|
| 135 |
+
textures = TexturesVertex(verts_features=verts_texture)
|
| 136 |
+
|
| 137 |
+
# compute packed faces
|
| 138 |
+
ff = faces.unbind(0)
|
| 139 |
+
faces_packed = torch.cat([ff[0], ff[1] + verts.shape[1]])
|
| 140 |
+
|
| 141 |
+
# face verts textures
|
| 142 |
+
faces_verts_texts = textures.faces_verts_textures_packed(faces_packed)
|
| 143 |
+
|
| 144 |
+
verts_texts_packed = torch.cat(verts_texture.unbind(0))
|
| 145 |
+
faces_verts_texts_packed = verts_texts_packed[faces_packed]
|
| 146 |
+
|
| 147 |
+
self.assertClose(faces_verts_texts_packed, faces_verts_texts)
|
| 148 |
+
|
| 149 |
+
def test_submeshes(self):
|
| 150 |
+
# define TexturesVertex
|
| 151 |
+
verts_features = torch.tensor(
|
| 152 |
+
[
|
| 153 |
+
[1, 0, 0],
|
| 154 |
+
[1, 0, 0],
|
| 155 |
+
[1, 0, 0],
|
| 156 |
+
[1, 0, 0],
|
| 157 |
+
[0, 1, 0],
|
| 158 |
+
[0, 1, 0],
|
| 159 |
+
[0, 1, 0],
|
| 160 |
+
[0, 1, 0],
|
| 161 |
+
],
|
| 162 |
+
dtype=torch.float32,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
textures = TexturesVertex(
|
| 166 |
+
verts_features=[verts_features, verts_features, verts_features]
|
| 167 |
+
)
|
| 168 |
+
subtextures = textures.submeshes(
|
| 169 |
+
[
|
| 170 |
+
[
|
| 171 |
+
torch.LongTensor([0, 2, 3]),
|
| 172 |
+
torch.LongTensor(list(range(8))),
|
| 173 |
+
],
|
| 174 |
+
[],
|
| 175 |
+
[
|
| 176 |
+
torch.LongTensor([4]),
|
| 177 |
+
],
|
| 178 |
+
],
|
| 179 |
+
None,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
subtextures_features = subtextures.verts_features_list()
|
| 183 |
+
|
| 184 |
+
self.assertEqual(len(subtextures_features), 3)
|
| 185 |
+
self.assertTrue(
|
| 186 |
+
torch.equal(
|
| 187 |
+
subtextures_features[0],
|
| 188 |
+
torch.FloatTensor([[1, 0, 0], [1, 0, 0], [1, 0, 0]]),
|
| 189 |
+
)
|
| 190 |
+
)
|
| 191 |
+
self.assertTrue(torch.equal(subtextures_features[1], verts_features))
|
| 192 |
+
self.assertTrue(
|
| 193 |
+
torch.equal(subtextures_features[2], torch.FloatTensor([[0, 1, 0]]))
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
def test_clone(self):
|
| 197 |
+
tex = TexturesVertex(verts_features=torch.rand(size=(10, 100, 128)))
|
| 198 |
+
tex.verts_features_list()
|
| 199 |
+
tex_cloned = tex.clone()
|
| 200 |
+
self.assertSeparate(
|
| 201 |
+
tex._verts_features_padded, tex_cloned._verts_features_padded
|
| 202 |
+
)
|
| 203 |
+
self.assertClose(tex._verts_features_padded, tex_cloned._verts_features_padded)
|
| 204 |
+
self.assertSeparate(tex.valid, tex_cloned.valid)
|
| 205 |
+
self.assertTrue(tex.valid.eq(tex_cloned.valid).all())
|
| 206 |
+
for i in range(tex._N):
|
| 207 |
+
self.assertSeparate(
|
| 208 |
+
tex._verts_features_list[i], tex_cloned._verts_features_list[i]
|
| 209 |
+
)
|
| 210 |
+
self.assertClose(
|
| 211 |
+
tex._verts_features_list[i], tex_cloned._verts_features_list[i]
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
def test_detach(self):
|
| 215 |
+
tex = TexturesVertex(
|
| 216 |
+
verts_features=torch.rand(size=(10, 100, 128), requires_grad=True)
|
| 217 |
+
)
|
| 218 |
+
tex.verts_features_list()
|
| 219 |
+
tex_detached = tex.detach()
|
| 220 |
+
self.assertFalse(tex_detached._verts_features_padded.requires_grad)
|
| 221 |
+
self.assertClose(
|
| 222 |
+
tex_detached._verts_features_padded, tex._verts_features_padded
|
| 223 |
+
)
|
| 224 |
+
for i in range(tex._N):
|
| 225 |
+
self.assertClose(
|
| 226 |
+
tex._verts_features_list[i], tex_detached._verts_features_list[i]
|
| 227 |
+
)
|
| 228 |
+
self.assertFalse(tex_detached._verts_features_list[i].requires_grad)
|
| 229 |
+
|
| 230 |
+
def test_extend(self):
|
| 231 |
+
B = 10
|
| 232 |
+
mesh = init_mesh(B, 30, 50)
|
| 233 |
+
V = mesh._V
|
| 234 |
+
tex_uv = TexturesVertex(verts_features=torch.randn((B, V, 3)))
|
| 235 |
+
tex_mesh = Meshes(
|
| 236 |
+
verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex_uv
|
| 237 |
+
)
|
| 238 |
+
N = 20
|
| 239 |
+
new_mesh = tex_mesh.extend(N)
|
| 240 |
+
|
| 241 |
+
self.assertEqual(len(tex_mesh) * N, len(new_mesh))
|
| 242 |
+
|
| 243 |
+
tex_init = tex_mesh.textures
|
| 244 |
+
new_tex = new_mesh.textures
|
| 245 |
+
|
| 246 |
+
for i in range(len(tex_mesh)):
|
| 247 |
+
for n in range(N):
|
| 248 |
+
self.assertClose(
|
| 249 |
+
tex_init.verts_features_list()[i],
|
| 250 |
+
new_tex.verts_features_list()[i * N + n],
|
| 251 |
+
)
|
| 252 |
+
self.assertClose(
|
| 253 |
+
tex_init._num_faces_per_mesh[i],
|
| 254 |
+
new_tex._num_faces_per_mesh[i * N + n],
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
self.assertAllSeparate(
|
| 258 |
+
[tex_init.verts_features_padded(), new_tex.verts_features_padded()]
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
with self.assertRaises(ValueError):
|
| 262 |
+
tex_mesh.extend(N=-1)
|
| 263 |
+
|
| 264 |
+
def test_padded_to_packed(self):
|
| 265 |
+
# Case where each face in the mesh has 3 unique uv vertex indices
|
| 266 |
+
# - i.e. even if a vertex is shared between multiple faces it will
|
| 267 |
+
# have a unique uv coordinate for each face.
|
| 268 |
+
num_verts_per_mesh = [9, 6]
|
| 269 |
+
D = 10
|
| 270 |
+
verts_features_list = [torch.rand(v, D) for v in num_verts_per_mesh]
|
| 271 |
+
verts_features_packed = list_to_packed(verts_features_list)[0]
|
| 272 |
+
verts_features_list = packed_to_list(verts_features_packed, num_verts_per_mesh)
|
| 273 |
+
tex = TexturesVertex(verts_features=verts_features_list)
|
| 274 |
+
|
| 275 |
+
# This is set inside Meshes when textures is passed as an input.
|
| 276 |
+
# Here we set _num_faces_per_mesh and _num_verts_per_mesh explicity.
|
| 277 |
+
tex1 = tex.clone()
|
| 278 |
+
tex1._num_verts_per_mesh = num_verts_per_mesh
|
| 279 |
+
verts_packed = tex1.verts_features_packed()
|
| 280 |
+
verts_verts_list = tex1.verts_features_list()
|
| 281 |
+
verts_padded = tex1.verts_features_padded()
|
| 282 |
+
|
| 283 |
+
for f1, f2 in zip(verts_verts_list, verts_features_list):
|
| 284 |
+
self.assertTrue((f1 == f2).all().item())
|
| 285 |
+
|
| 286 |
+
self.assertTrue(verts_packed.shape == (sum(num_verts_per_mesh), D))
|
| 287 |
+
self.assertTrue(verts_padded.shape == (2, 9, D))
|
| 288 |
+
|
| 289 |
+
# Case where num_verts_per_mesh is not set and textures
|
| 290 |
+
# are initialized with a padded tensor.
|
| 291 |
+
tex2 = TexturesVertex(verts_features=verts_padded)
|
| 292 |
+
verts_packed = tex2.verts_features_packed()
|
| 293 |
+
verts_list = tex2.verts_features_list()
|
| 294 |
+
|
| 295 |
+
# Packed is just flattened padded as num_verts_per_mesh
|
| 296 |
+
# has not been provided.
|
| 297 |
+
self.assertTrue(verts_packed.shape == (9 * 2, D))
|
| 298 |
+
|
| 299 |
+
for i, (f1, f2) in enumerate(zip(verts_list, verts_features_list)):
|
| 300 |
+
n = num_verts_per_mesh[i]
|
| 301 |
+
self.assertTrue((f1[:n] == f2).all().item())
|
| 302 |
+
|
| 303 |
+
def test_getitem(self):
|
| 304 |
+
N = 5
|
| 305 |
+
V = 20
|
| 306 |
+
source = {"verts_features": torch.randn(size=(N, V, 128))}
|
| 307 |
+
tex = TexturesVertex(verts_features=source["verts_features"])
|
| 308 |
+
|
| 309 |
+
verts = torch.rand(size=(N, V, 3))
|
| 310 |
+
faces = torch.randint(size=(N, 10, 3), high=V)
|
| 311 |
+
meshes = Meshes(verts=verts, faces=faces, textures=tex)
|
| 312 |
+
|
| 313 |
+
tryindex(self, 2, tex, meshes, source)
|
| 314 |
+
tryindex(self, slice(0, 2, 1), tex, meshes, source)
|
| 315 |
+
index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool)
|
| 316 |
+
tryindex(self, index, tex, meshes, source)
|
| 317 |
+
index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool)
|
| 318 |
+
tryindex(self, index, tex, meshes, source)
|
| 319 |
+
index = torch.tensor([1, 2], dtype=torch.int64)
|
| 320 |
+
tryindex(self, index, tex, meshes, source)
|
| 321 |
+
tryindex(self, [2, 4], tex, meshes, source)
|
| 322 |
+
|
| 323 |
+
def test_sample_textures_error(self):
|
| 324 |
+
N = 5
|
| 325 |
+
V = 20
|
| 326 |
+
verts = torch.rand(size=(N, V, 3))
|
| 327 |
+
faces = torch.randint(size=(N, 10, 3), high=V)
|
| 328 |
+
tex = TexturesVertex(verts_features=torch.randn(size=(N, 10, 128)))
|
| 329 |
+
|
| 330 |
+
# Verts features have the wrong number of verts
|
| 331 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 332 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 333 |
+
|
| 334 |
+
# Verts features have the wrong batch dim
|
| 335 |
+
tex = TexturesVertex(verts_features=torch.randn(size=(1, V, 128)))
|
| 336 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 337 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 338 |
+
|
| 339 |
+
meshes = Meshes(verts=verts, faces=faces)
|
| 340 |
+
meshes.textures = tex
|
| 341 |
+
|
| 342 |
+
# Cannot use the texture attribute set on meshes for sampling
|
| 343 |
+
# textures if the dimensions don't match
|
| 344 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 345 |
+
meshes.sample_textures(None)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
class TestTexturesAtlas(TestCaseMixin, unittest.TestCase):
|
| 349 |
+
def test_sample_texture_atlas(self):
|
| 350 |
+
N, F, R = 1, 2, 2
|
| 351 |
+
verts = torch.randn((4, 3), dtype=torch.float32)
|
| 352 |
+
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
|
| 353 |
+
faces_atlas = torch.rand(size=(N, F, R, R, 3))
|
| 354 |
+
tex = TexturesAtlas(atlas=faces_atlas)
|
| 355 |
+
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
|
| 356 |
+
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
|
| 357 |
+
barycentric_coords = torch.tensor(
|
| 358 |
+
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
|
| 359 |
+
).view(1, 1, 1, 2, -1)
|
| 360 |
+
expected_vals = torch.tensor(
|
| 361 |
+
[[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32
|
| 362 |
+
)
|
| 363 |
+
expected_vals = torch.zeros((1, 1, 1, 2, 3), dtype=torch.float32)
|
| 364 |
+
expected_vals[..., 0, :] = faces_atlas[0, 0, 0, 1, ...]
|
| 365 |
+
expected_vals[..., 1, :] = faces_atlas[0, 1, 1, 0, ...]
|
| 366 |
+
|
| 367 |
+
fragments = Fragments(
|
| 368 |
+
pix_to_face=pix_to_face,
|
| 369 |
+
bary_coords=barycentric_coords,
|
| 370 |
+
zbuf=torch.ones_like(pix_to_face),
|
| 371 |
+
dists=torch.ones_like(pix_to_face),
|
| 372 |
+
)
|
| 373 |
+
texels = mesh.textures.sample_textures(fragments)
|
| 374 |
+
self.assertTrue(torch.allclose(texels, expected_vals))
|
| 375 |
+
|
| 376 |
+
def test_textures_atlas_grad(self):
|
| 377 |
+
N, F, R = 1, 2, 2
|
| 378 |
+
verts = torch.randn((4, 3), dtype=torch.float32)
|
| 379 |
+
faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
|
| 380 |
+
faces_atlas = torch.rand(size=(N, F, R, R, 3), requires_grad=True)
|
| 381 |
+
tex = TexturesAtlas(atlas=faces_atlas)
|
| 382 |
+
mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
|
| 383 |
+
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
|
| 384 |
+
barycentric_coords = torch.tensor(
|
| 385 |
+
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
|
| 386 |
+
).view(1, 1, 1, 2, -1)
|
| 387 |
+
fragments = Fragments(
|
| 388 |
+
pix_to_face=pix_to_face,
|
| 389 |
+
bary_coords=barycentric_coords,
|
| 390 |
+
zbuf=torch.ones_like(pix_to_face),
|
| 391 |
+
dists=torch.ones_like(pix_to_face),
|
| 392 |
+
)
|
| 393 |
+
texels = mesh.textures.sample_textures(fragments)
|
| 394 |
+
grad_tex = torch.rand_like(texels)
|
| 395 |
+
grad_expected = torch.zeros_like(faces_atlas)
|
| 396 |
+
grad_expected[0, 0, 0, 1, :] = grad_tex[..., 0:1, :]
|
| 397 |
+
grad_expected[0, 1, 1, 0, :] = grad_tex[..., 1:2, :]
|
| 398 |
+
texels.backward(grad_tex)
|
| 399 |
+
self.assertTrue(hasattr(faces_atlas, "grad"))
|
| 400 |
+
self.assertTrue(torch.allclose(faces_atlas.grad, grad_expected))
|
| 401 |
+
|
| 402 |
+
def test_textures_atlas_init_fail(self):
|
| 403 |
+
# Incorrect sized tensors
|
| 404 |
+
with self.assertRaisesRegex(ValueError, "atlas"):
|
| 405 |
+
TexturesAtlas(atlas=torch.rand(size=(5, 10, 3)))
|
| 406 |
+
|
| 407 |
+
# Not a list or a tensor
|
| 408 |
+
with self.assertRaisesRegex(ValueError, "atlas"):
|
| 409 |
+
TexturesAtlas(atlas=(1, 1, 1))
|
| 410 |
+
|
| 411 |
+
def test_faces_verts_textures(self):
|
| 412 |
+
device = torch.device("cuda:0")
|
| 413 |
+
N, F, R = 2, 2, 8
|
| 414 |
+
num_faces = torch.randint(low=1, high=F, size=(N,))
|
| 415 |
+
faces_atlas = [
|
| 416 |
+
torch.rand(size=(num_faces[i].item(), R, R, 3), device=device)
|
| 417 |
+
for i in range(N)
|
| 418 |
+
]
|
| 419 |
+
tex = TexturesAtlas(atlas=faces_atlas)
|
| 420 |
+
|
| 421 |
+
# faces_verts naive
|
| 422 |
+
faces_verts = []
|
| 423 |
+
for n in range(N):
|
| 424 |
+
ff = num_faces[n].item()
|
| 425 |
+
temp = torch.zeros(ff, 3, 3)
|
| 426 |
+
for f in range(ff):
|
| 427 |
+
t0 = faces_atlas[n][f, 0, -1] # for v0, bary = (1, 0)
|
| 428 |
+
t1 = faces_atlas[n][f, -1, 0] # for v1, bary = (0, 1)
|
| 429 |
+
t2 = faces_atlas[n][f, 0, 0] # for v2, bary = (0, 0)
|
| 430 |
+
temp[f, 0] = t0
|
| 431 |
+
temp[f, 1] = t1
|
| 432 |
+
temp[f, 2] = t2
|
| 433 |
+
faces_verts.append(temp)
|
| 434 |
+
faces_verts = torch.cat(faces_verts, 0)
|
| 435 |
+
|
| 436 |
+
self.assertClose(faces_verts, tex.faces_verts_textures_packed().cpu())
|
| 437 |
+
|
| 438 |
+
def test_clone(self):
|
| 439 |
+
tex = TexturesAtlas(atlas=torch.rand(size=(1, 10, 2, 2, 3)))
|
| 440 |
+
tex.atlas_list()
|
| 441 |
+
tex_cloned = tex.clone()
|
| 442 |
+
self.assertSeparate(tex._atlas_padded, tex_cloned._atlas_padded)
|
| 443 |
+
self.assertClose(tex._atlas_padded, tex_cloned._atlas_padded)
|
| 444 |
+
self.assertSeparate(tex.valid, tex_cloned.valid)
|
| 445 |
+
self.assertTrue(tex.valid.eq(tex_cloned.valid).all())
|
| 446 |
+
for i in range(tex._N):
|
| 447 |
+
self.assertSeparate(tex._atlas_list[i], tex_cloned._atlas_list[i])
|
| 448 |
+
self.assertClose(tex._atlas_list[i], tex_cloned._atlas_list[i])
|
| 449 |
+
|
| 450 |
+
def test_detach(self):
|
| 451 |
+
tex = TexturesAtlas(atlas=torch.rand(size=(1, 10, 2, 2, 3), requires_grad=True))
|
| 452 |
+
tex.atlas_list()
|
| 453 |
+
tex_detached = tex.detach()
|
| 454 |
+
self.assertFalse(tex_detached._atlas_padded.requires_grad)
|
| 455 |
+
self.assertClose(tex_detached._atlas_padded, tex._atlas_padded)
|
| 456 |
+
for i in range(tex._N):
|
| 457 |
+
self.assertFalse(tex_detached._atlas_list[i].requires_grad)
|
| 458 |
+
self.assertClose(tex._atlas_list[i], tex_detached._atlas_list[i])
|
| 459 |
+
|
| 460 |
+
def test_extend(self):
|
| 461 |
+
B = 10
|
| 462 |
+
mesh = init_mesh(B, 30, 50)
|
| 463 |
+
F = mesh._F
|
| 464 |
+
tex_uv = TexturesAtlas(atlas=torch.randn((B, F, 2, 2, 3)))
|
| 465 |
+
tex_mesh = Meshes(
|
| 466 |
+
verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex_uv
|
| 467 |
+
)
|
| 468 |
+
N = 20
|
| 469 |
+
new_mesh = tex_mesh.extend(N)
|
| 470 |
+
|
| 471 |
+
self.assertEqual(len(tex_mesh) * N, len(new_mesh))
|
| 472 |
+
|
| 473 |
+
tex_init = tex_mesh.textures
|
| 474 |
+
new_tex = new_mesh.textures
|
| 475 |
+
|
| 476 |
+
for i in range(len(tex_mesh)):
|
| 477 |
+
for n in range(N):
|
| 478 |
+
self.assertClose(
|
| 479 |
+
tex_init.atlas_list()[i], new_tex.atlas_list()[i * N + n]
|
| 480 |
+
)
|
| 481 |
+
self.assertClose(
|
| 482 |
+
tex_init._num_faces_per_mesh[i],
|
| 483 |
+
new_tex._num_faces_per_mesh[i * N + n],
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
self.assertAllSeparate([tex_init.atlas_padded(), new_tex.atlas_padded()])
|
| 487 |
+
|
| 488 |
+
with self.assertRaises(ValueError):
|
| 489 |
+
tex_mesh.extend(N=-1)
|
| 490 |
+
|
| 491 |
+
def test_padded_to_packed(self):
|
| 492 |
+
# Case where each face in the mesh has 3 unique uv vertex indices
|
| 493 |
+
# - i.e. even if a vertex is shared between multiple faces it will
|
| 494 |
+
# have a unique uv coordinate for each face.
|
| 495 |
+
R = 2
|
| 496 |
+
N = 20
|
| 497 |
+
num_faces_per_mesh = torch.randint(size=(N,), low=0, high=30)
|
| 498 |
+
atlas_list = [torch.rand(f, R, R, 3) for f in num_faces_per_mesh]
|
| 499 |
+
tex = TexturesAtlas(atlas=atlas_list)
|
| 500 |
+
|
| 501 |
+
# This is set inside Meshes when textures is passed as an input.
|
| 502 |
+
# Here we set _num_faces_per_mesh explicity.
|
| 503 |
+
tex1 = tex.clone()
|
| 504 |
+
tex1._num_faces_per_mesh = num_faces_per_mesh.tolist()
|
| 505 |
+
atlas_packed = tex1.atlas_packed()
|
| 506 |
+
atlas_list_new = tex1.atlas_list()
|
| 507 |
+
atlas_padded = tex1.atlas_padded()
|
| 508 |
+
|
| 509 |
+
for f1, f2 in zip(atlas_list_new, atlas_list):
|
| 510 |
+
self.assertTrue((f1 == f2).all().item())
|
| 511 |
+
|
| 512 |
+
sum_F = num_faces_per_mesh.sum()
|
| 513 |
+
max_F = num_faces_per_mesh.max().item()
|
| 514 |
+
self.assertTrue(atlas_packed.shape == (sum_F, R, R, 3))
|
| 515 |
+
self.assertTrue(atlas_padded.shape == (N, max_F, R, R, 3))
|
| 516 |
+
|
| 517 |
+
# Case where num_faces_per_mesh is not set and textures
|
| 518 |
+
# are initialized with a padded tensor.
|
| 519 |
+
atlas_list_padded = _list_to_padded_wrapper(atlas_list)
|
| 520 |
+
tex2 = TexturesAtlas(atlas=atlas_list_padded)
|
| 521 |
+
atlas_packed = tex2.atlas_packed()
|
| 522 |
+
atlas_list_new = tex2.atlas_list()
|
| 523 |
+
|
| 524 |
+
# Packed is just flattened padded as num_faces_per_mesh
|
| 525 |
+
# has not been provided.
|
| 526 |
+
self.assertTrue(atlas_packed.shape == (N * max_F, R, R, 3))
|
| 527 |
+
|
| 528 |
+
for i, (f1, f2) in enumerate(zip(atlas_list_new, atlas_list)):
|
| 529 |
+
n = num_faces_per_mesh[i]
|
| 530 |
+
self.assertTrue((f1[:n] == f2).all().item())
|
| 531 |
+
|
| 532 |
+
def test_getitem(self):
|
| 533 |
+
N = 5
|
| 534 |
+
V = 20
|
| 535 |
+
F = 10
|
| 536 |
+
source = {"atlas": torch.randn(size=(N, F, 4, 4, 3))}
|
| 537 |
+
tex = TexturesAtlas(atlas=source["atlas"])
|
| 538 |
+
|
| 539 |
+
verts = torch.rand(size=(N, V, 3))
|
| 540 |
+
faces = torch.randint(size=(N, F, 3), high=V)
|
| 541 |
+
meshes = Meshes(verts=verts, faces=faces, textures=tex)
|
| 542 |
+
|
| 543 |
+
tryindex(self, 2, tex, meshes, source)
|
| 544 |
+
tryindex(self, slice(0, 2, 1), tex, meshes, source)
|
| 545 |
+
index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool)
|
| 546 |
+
tryindex(self, index, tex, meshes, source)
|
| 547 |
+
index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool)
|
| 548 |
+
tryindex(self, index, tex, meshes, source)
|
| 549 |
+
index = torch.tensor([1, 2], dtype=torch.int64)
|
| 550 |
+
tryindex(self, index, tex, meshes, source)
|
| 551 |
+
tryindex(self, [2, 4], tex, meshes, source)
|
| 552 |
+
|
| 553 |
+
def test_sample_textures_error(self):
|
| 554 |
+
N = 1
|
| 555 |
+
V = 20
|
| 556 |
+
F = 10
|
| 557 |
+
verts = torch.rand(size=(5, V, 3))
|
| 558 |
+
faces = torch.randint(size=(5, F, 3), high=V)
|
| 559 |
+
meshes = Meshes(verts=verts, faces=faces)
|
| 560 |
+
|
| 561 |
+
# TexturesAtlas have the wrong batch dim
|
| 562 |
+
tex = TexturesAtlas(atlas=torch.randn(size=(1, F, 4, 4, 3)))
|
| 563 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 564 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 565 |
+
|
| 566 |
+
# TexturesAtlas have the wrong number of faces
|
| 567 |
+
tex = TexturesAtlas(atlas=torch.randn(size=(N, 15, 4, 4, 3)))
|
| 568 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 569 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 570 |
+
|
| 571 |
+
meshes = Meshes(verts=verts, faces=faces)
|
| 572 |
+
meshes.textures = tex
|
| 573 |
+
|
| 574 |
+
# Cannot use the texture attribute set on meshes for sampling
|
| 575 |
+
# textures if the dimensions don't match
|
| 576 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 577 |
+
meshes.sample_textures(None)
|
| 578 |
+
|
| 579 |
+
def test_submeshes(self):
|
| 580 |
+
N = 2
|
| 581 |
+
V = 5
|
| 582 |
+
F = 5
|
| 583 |
+
tex = TexturesAtlas(
|
| 584 |
+
atlas=torch.arange(N * F * 4 * 4 * 3, dtype=torch.float32).reshape(
|
| 585 |
+
N, F, 4, 4, 3
|
| 586 |
+
)
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
+
verts = torch.rand(size=(N, V, 3))
|
| 590 |
+
faces = torch.randint(size=(N, F, 3), high=V)
|
| 591 |
+
mesh = Meshes(verts=verts, faces=faces, textures=tex)
|
| 592 |
+
|
| 593 |
+
sub_faces = [
|
| 594 |
+
[torch.tensor([0, 2]), torch.tensor([1, 2])],
|
| 595 |
+
[],
|
| 596 |
+
]
|
| 597 |
+
subtex = mesh.submeshes(sub_faces).textures
|
| 598 |
+
subtex_faces = subtex.atlas_list()
|
| 599 |
+
|
| 600 |
+
self.assertEqual(len(subtex_faces), 2)
|
| 601 |
+
self.assertClose(
|
| 602 |
+
subtex_faces[0].flatten().msort(),
|
| 603 |
+
torch.cat(
|
| 604 |
+
(
|
| 605 |
+
torch.arange(4 * 4 * 3, dtype=torch.float32),
|
| 606 |
+
torch.arange(96, 96 + 4 * 4 * 3, dtype=torch.float32),
|
| 607 |
+
),
|
| 608 |
+
0,
|
| 609 |
+
),
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
class TestTexturesUV(TestCaseMixin, unittest.TestCase):
|
| 614 |
+
def setUp(self) -> None:
|
| 615 |
+
super().setUp()
|
| 616 |
+
torch.manual_seed(42)
|
| 617 |
+
|
| 618 |
+
def test_sample_textures_uv(self):
|
| 619 |
+
barycentric_coords = torch.tensor(
|
| 620 |
+
[[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
|
| 621 |
+
).view(1, 1, 1, 2, -1)
|
| 622 |
+
dummy_verts = torch.zeros(4, 3)
|
| 623 |
+
vert_uvs = torch.tensor([[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32)
|
| 624 |
+
face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)
|
| 625 |
+
interpolated_uvs = torch.tensor(
|
| 626 |
+
[[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# Create a dummy texture map
|
| 630 |
+
H = 2
|
| 631 |
+
W = 2
|
| 632 |
+
x = torch.linspace(0, 1, W).view(1, W).expand(H, W)
|
| 633 |
+
y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)
|
| 634 |
+
tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)
|
| 635 |
+
pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
|
| 636 |
+
fragments = Fragments(
|
| 637 |
+
pix_to_face=pix_to_face,
|
| 638 |
+
bary_coords=barycentric_coords,
|
| 639 |
+
zbuf=pix_to_face,
|
| 640 |
+
dists=pix_to_face,
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
for align_corners in [True, False]:
|
| 644 |
+
tex = TexturesUV(
|
| 645 |
+
maps=tex_map,
|
| 646 |
+
faces_uvs=[face_uvs],
|
| 647 |
+
verts_uvs=[vert_uvs],
|
| 648 |
+
align_corners=align_corners,
|
| 649 |
+
)
|
| 650 |
+
meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex)
|
| 651 |
+
mesh_textures = meshes.textures
|
| 652 |
+
texels = mesh_textures.sample_textures(fragments)
|
| 653 |
+
|
| 654 |
+
# Expected output
|
| 655 |
+
pixel_uvs = interpolated_uvs * 2.0 - 1.0
|
| 656 |
+
pixel_uvs = pixel_uvs.view(2, 1, 1, 2)
|
| 657 |
+
tex_map_ = torch.flip(tex_map, [1]).permute(0, 3, 1, 2)
|
| 658 |
+
tex_map_ = torch.cat([tex_map_, tex_map_], dim=0)
|
| 659 |
+
expected_out = F.grid_sample(
|
| 660 |
+
tex_map_, pixel_uvs, align_corners=align_corners, padding_mode="border"
|
| 661 |
+
)
|
| 662 |
+
self.assertTrue(torch.allclose(texels.squeeze(), expected_out.squeeze()))
|
| 663 |
+
|
| 664 |
+
def test_textures_uv_init_fail(self):
|
| 665 |
+
# Maps has wrong shape
|
| 666 |
+
with self.assertRaisesRegex(ValueError, "maps"):
|
| 667 |
+
TexturesUV(
|
| 668 |
+
maps=torch.ones((5, 16, 16, 3, 4)),
|
| 669 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 670 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
# faces_uvs has wrong shape
|
| 674 |
+
with self.assertRaisesRegex(ValueError, "faces_uvs"):
|
| 675 |
+
TexturesUV(
|
| 676 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 677 |
+
faces_uvs=torch.rand(size=(5, 10, 3, 3)),
|
| 678 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
# verts_uvs has wrong shape
|
| 682 |
+
with self.assertRaisesRegex(ValueError, "verts_uvs"):
|
| 683 |
+
TexturesUV(
|
| 684 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 685 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 686 |
+
verts_uvs=torch.rand(size=(5, 15, 2, 3)),
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
# verts has different batch dim to faces
|
| 690 |
+
with self.assertRaisesRegex(ValueError, "verts_uvs"):
|
| 691 |
+
TexturesUV(
|
| 692 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 693 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 694 |
+
verts_uvs=torch.rand(size=(8, 15, 2)),
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
# maps has different batch dim to faces
|
| 698 |
+
with self.assertRaisesRegex(ValueError, "maps"):
|
| 699 |
+
TexturesUV(
|
| 700 |
+
maps=torch.ones((8, 16, 16, 3)),
|
| 701 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 702 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
# verts on different device to faces
|
| 706 |
+
with self.assertRaisesRegex(ValueError, "verts_uvs"):
|
| 707 |
+
TexturesUV(
|
| 708 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 709 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 710 |
+
verts_uvs=torch.rand(size=(5, 15, 2, 3), device="cuda"),
|
| 711 |
+
)
|
| 712 |
+
|
| 713 |
+
# maps on different device to faces
|
| 714 |
+
with self.assertRaisesRegex(ValueError, "map"):
|
| 715 |
+
TexturesUV(
|
| 716 |
+
maps=torch.ones((5, 16, 16, 3), device="cuda"),
|
| 717 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 718 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 719 |
+
)
|
| 720 |
+
|
| 721 |
+
# maps ids are not none but maps doesn't have multiple map indices
|
| 722 |
+
with self.assertRaisesRegex(ValueError, "map"):
|
| 723 |
+
TexturesUV(
|
| 724 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 725 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 726 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 727 |
+
maps_ids=torch.randint(0, 1, (5, 10), dtype=torch.long),
|
| 728 |
+
)
|
| 729 |
+
# maps ids is none but maps have multiple map indices
|
| 730 |
+
with self.assertRaisesRegex(ValueError, "map"):
|
| 731 |
+
TexturesUV(
|
| 732 |
+
maps=torch.ones((5, 2, 16, 16, 3)),
|
| 733 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 734 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 735 |
+
)
|
| 736 |
+
|
| 737 |
+
def test_faces_verts_textures(self):
|
| 738 |
+
device = torch.device("cuda:0")
|
| 739 |
+
N, V, F, H, W = 2, 5, 12, 8, 8
|
| 740 |
+
vert_uvs = torch.rand((N, V, 2), dtype=torch.float32, device=device)
|
| 741 |
+
face_uvs = torch.randint(
|
| 742 |
+
high=V, size=(N, F, 3), dtype=torch.int64, device=device
|
| 743 |
+
)
|
| 744 |
+
maps = torch.rand((N, H, W, 3), dtype=torch.float32, device=device)
|
| 745 |
+
|
| 746 |
+
tex = TexturesUV(maps=maps, verts_uvs=vert_uvs, faces_uvs=face_uvs)
|
| 747 |
+
|
| 748 |
+
# naive faces_verts_textures
|
| 749 |
+
faces_verts_texs = []
|
| 750 |
+
for n in range(N):
|
| 751 |
+
temp = torch.zeros((F, 3, 3), device=device, dtype=torch.float32)
|
| 752 |
+
for f in range(F):
|
| 753 |
+
uv0 = vert_uvs[n, face_uvs[n, f, 0]]
|
| 754 |
+
uv1 = vert_uvs[n, face_uvs[n, f, 1]]
|
| 755 |
+
uv2 = vert_uvs[n, face_uvs[n, f, 2]]
|
| 756 |
+
|
| 757 |
+
idx = torch.stack((uv0, uv1, uv2), dim=0).view(1, 1, 3, 2) # 1x1x3x2
|
| 758 |
+
idx = idx * 2.0 - 1.0
|
| 759 |
+
imap = maps[n].view(1, H, W, 3).permute(0, 3, 1, 2) # 1x3xHxW
|
| 760 |
+
imap = torch.flip(imap, [2])
|
| 761 |
+
|
| 762 |
+
texts = torch.nn.functional.grid_sample(
|
| 763 |
+
imap,
|
| 764 |
+
idx,
|
| 765 |
+
align_corners=tex.align_corners,
|
| 766 |
+
padding_mode=tex.padding_mode,
|
| 767 |
+
) # 1x3x1x3
|
| 768 |
+
temp[f] = texts[0, :, 0, :].permute(1, 0)
|
| 769 |
+
faces_verts_texs.append(temp)
|
| 770 |
+
faces_verts_texs = torch.cat(faces_verts_texs, 0)
|
| 771 |
+
|
| 772 |
+
self.assertClose(faces_verts_texs, tex.faces_verts_textures_packed())
|
| 773 |
+
|
| 774 |
+
def test_faces_verts_multiple_map_textures(self):
|
| 775 |
+
device = torch.device("cuda:0")
|
| 776 |
+
N, M, V, F, H, W = 2, 3, 5, 12, 8, 8
|
| 777 |
+
vert_uvs = torch.rand((N, V, 2), dtype=torch.float32, device=device)
|
| 778 |
+
face_uvs = torch.randint(
|
| 779 |
+
high=V, size=(N, F, 3), dtype=torch.int64, device=device
|
| 780 |
+
)
|
| 781 |
+
map_ids = torch.randint(0, M, (N, F), device=device)
|
| 782 |
+
maps = torch.rand((N, M, H, W, 3), dtype=torch.float32, device=device)
|
| 783 |
+
|
| 784 |
+
tex = TexturesUV(
|
| 785 |
+
maps=maps, verts_uvs=vert_uvs, faces_uvs=face_uvs, maps_ids=map_ids
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
# naive faces_verts_textures
|
| 789 |
+
faces_verts_texs = []
|
| 790 |
+
for n in range(N):
|
| 791 |
+
temp = torch.zeros((F, 3, 3), device=device, dtype=torch.float32)
|
| 792 |
+
for f in range(F):
|
| 793 |
+
uv0 = vert_uvs[n, face_uvs[n, f, 0]]
|
| 794 |
+
uv1 = vert_uvs[n, face_uvs[n, f, 1]]
|
| 795 |
+
uv2 = vert_uvs[n, face_uvs[n, f, 2]]
|
| 796 |
+
map_id = map_ids[n, f]
|
| 797 |
+
|
| 798 |
+
idx = torch.stack((uv0, uv1, uv2), dim=0).view(1, 1, 3, 2) # 1x1x3x2
|
| 799 |
+
idx = idx * 2.0 - 1.0
|
| 800 |
+
imap = maps[n, map_id].view(1, H, W, 3).permute(0, 3, 1, 2) # 1x3xHxW
|
| 801 |
+
imap = torch.flip(imap, [2])
|
| 802 |
+
|
| 803 |
+
texts = torch.nn.functional.grid_sample(
|
| 804 |
+
imap,
|
| 805 |
+
idx,
|
| 806 |
+
align_corners=tex.align_corners,
|
| 807 |
+
padding_mode=tex.padding_mode,
|
| 808 |
+
) # 1x3x1x3
|
| 809 |
+
temp[f] = texts[0, :, 0, :].permute(1, 0)
|
| 810 |
+
faces_verts_texs.append(temp)
|
| 811 |
+
faces_verts_texs = torch.cat(faces_verts_texs, 0)
|
| 812 |
+
|
| 813 |
+
self.assertClose(faces_verts_texs, tex.faces_verts_textures_packed())
|
| 814 |
+
|
| 815 |
+
def test_clone(self):
|
| 816 |
+
tex = TexturesUV(
|
| 817 |
+
maps=torch.ones((5, 16, 16, 3)),
|
| 818 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 819 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 820 |
+
)
|
| 821 |
+
tex.faces_uvs_list()
|
| 822 |
+
tex.verts_uvs_list()
|
| 823 |
+
tex_cloned = tex.clone()
|
| 824 |
+
self.assertSeparate(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
|
| 825 |
+
self.assertClose(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
|
| 826 |
+
self.assertSeparate(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
|
| 827 |
+
self.assertClose(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
|
| 828 |
+
self.assertSeparate(tex._maps_padded, tex_cloned._maps_padded)
|
| 829 |
+
self.assertClose(tex._maps_padded, tex_cloned._maps_padded)
|
| 830 |
+
self.assertSeparate(tex.valid, tex_cloned.valid)
|
| 831 |
+
self.assertTrue(tex.valid.eq(tex_cloned.valid).all())
|
| 832 |
+
for i in range(tex._N):
|
| 833 |
+
self.assertSeparate(tex._faces_uvs_list[i], tex_cloned._faces_uvs_list[i])
|
| 834 |
+
self.assertClose(tex._faces_uvs_list[i], tex_cloned._faces_uvs_list[i])
|
| 835 |
+
self.assertSeparate(tex._verts_uvs_list[i], tex_cloned._verts_uvs_list[i])
|
| 836 |
+
self.assertClose(tex._verts_uvs_list[i], tex_cloned._verts_uvs_list[i])
|
| 837 |
+
# tex._maps_list is not use anywhere so it's not stored. We call it explicitly
|
| 838 |
+
self.assertSeparate(tex.maps_list()[i], tex_cloned.maps_list()[i])
|
| 839 |
+
self.assertClose(tex.maps_list()[i], tex_cloned.maps_list()[i])
|
| 840 |
+
|
| 841 |
+
def test_multiple_maps_clone(self):
|
| 842 |
+
tex = TexturesUV(
|
| 843 |
+
maps=torch.ones((5, 3, 16, 16, 3)),
|
| 844 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 845 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 846 |
+
maps_ids=torch.randint(0, 3, (5, 10)),
|
| 847 |
+
)
|
| 848 |
+
tex.faces_uvs_list()
|
| 849 |
+
tex.verts_uvs_list()
|
| 850 |
+
tex_cloned = tex.clone()
|
| 851 |
+
self.assertSeparate(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
|
| 852 |
+
self.assertClose(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
|
| 853 |
+
self.assertSeparate(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
|
| 854 |
+
self.assertClose(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
|
| 855 |
+
self.assertSeparate(tex._maps_padded, tex_cloned._maps_padded)
|
| 856 |
+
self.assertClose(tex._maps_padded, tex_cloned._maps_padded)
|
| 857 |
+
self.assertSeparate(tex.valid, tex_cloned.valid)
|
| 858 |
+
self.assertTrue(tex.valid.eq(tex_cloned.valid).all())
|
| 859 |
+
self.assertSeparate(tex._maps_ids_padded, tex_cloned._maps_ids_padded)
|
| 860 |
+
self.assertClose(tex._maps_ids_padded, tex_cloned._maps_ids_padded)
|
| 861 |
+
for i in range(tex._N):
|
| 862 |
+
self.assertSeparate(tex._faces_uvs_list[i], tex_cloned._faces_uvs_list[i])
|
| 863 |
+
self.assertClose(tex._faces_uvs_list[i], tex_cloned._faces_uvs_list[i])
|
| 864 |
+
self.assertSeparate(tex._verts_uvs_list[i], tex_cloned._verts_uvs_list[i])
|
| 865 |
+
self.assertClose(tex._verts_uvs_list[i], tex_cloned._verts_uvs_list[i])
|
| 866 |
+
# tex._maps_list is not use anywhere so it's not stored. We call it explicitly
|
| 867 |
+
self.assertSeparate(tex.maps_list()[i], tex_cloned.maps_list()[i])
|
| 868 |
+
self.assertClose(tex.maps_list()[i], tex_cloned.maps_list()[i])
|
| 869 |
+
self.assertSeparate(tex.maps_ids_list()[i], tex_cloned.maps_ids_list()[i])
|
| 870 |
+
self.assertClose(tex.maps_ids_list()[i], tex_cloned.maps_ids_list()[i])
|
| 871 |
+
|
| 872 |
+
def test_detach(self):
|
| 873 |
+
tex = TexturesUV(
|
| 874 |
+
maps=torch.ones((5, 16, 16, 3), requires_grad=True),
|
| 875 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 876 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 877 |
+
)
|
| 878 |
+
tex.faces_uvs_list()
|
| 879 |
+
tex.verts_uvs_list()
|
| 880 |
+
tex_detached = tex.detach()
|
| 881 |
+
self.assertFalse(tex_detached._maps_padded.requires_grad)
|
| 882 |
+
self.assertClose(tex._maps_padded, tex_detached._maps_padded)
|
| 883 |
+
self.assertFalse(tex_detached._verts_uvs_padded.requires_grad)
|
| 884 |
+
self.assertClose(tex._verts_uvs_padded, tex_detached._verts_uvs_padded)
|
| 885 |
+
self.assertFalse(tex_detached._faces_uvs_padded.requires_grad)
|
| 886 |
+
self.assertClose(tex._faces_uvs_padded, tex_detached._faces_uvs_padded)
|
| 887 |
+
for i in range(tex._N):
|
| 888 |
+
self.assertFalse(tex_detached._verts_uvs_list[i].requires_grad)
|
| 889 |
+
self.assertClose(tex._verts_uvs_list[i], tex_detached._verts_uvs_list[i])
|
| 890 |
+
self.assertFalse(tex_detached._faces_uvs_list[i].requires_grad)
|
| 891 |
+
self.assertClose(tex._faces_uvs_list[i], tex_detached._faces_uvs_list[i])
|
| 892 |
+
# tex._maps_list is not use anywhere so it's not stored. We call it explicitly
|
| 893 |
+
self.assertFalse(tex_detached.maps_list()[i].requires_grad)
|
| 894 |
+
self.assertClose(tex.maps_list()[i], tex_detached.maps_list()[i])
|
| 895 |
+
|
| 896 |
+
def test_multiple_maps_detach(self):
|
| 897 |
+
tex = TexturesUV(
|
| 898 |
+
maps=torch.ones((5, 3, 16, 16, 3), requires_grad=True),
|
| 899 |
+
faces_uvs=torch.rand(size=(5, 10, 3)),
|
| 900 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 901 |
+
maps_ids=torch.randint(0, 3, (5, 10)),
|
| 902 |
+
)
|
| 903 |
+
tex.faces_uvs_list()
|
| 904 |
+
tex.verts_uvs_list()
|
| 905 |
+
tex_detached = tex.detach()
|
| 906 |
+
self.assertFalse(tex_detached._maps_padded.requires_grad)
|
| 907 |
+
self.assertClose(tex._maps_padded, tex_detached._maps_padded)
|
| 908 |
+
self.assertFalse(tex_detached._verts_uvs_padded.requires_grad)
|
| 909 |
+
self.assertClose(tex._verts_uvs_padded, tex_detached._verts_uvs_padded)
|
| 910 |
+
self.assertFalse(tex_detached._faces_uvs_padded.requires_grad)
|
| 911 |
+
self.assertClose(tex._faces_uvs_padded, tex_detached._faces_uvs_padded)
|
| 912 |
+
self.assertFalse(tex_detached._maps_ids_padded.requires_grad)
|
| 913 |
+
self.assertClose(tex._maps_ids_padded, tex_detached._maps_ids_padded)
|
| 914 |
+
for i in range(tex._N):
|
| 915 |
+
self.assertFalse(tex_detached._verts_uvs_list[i].requires_grad)
|
| 916 |
+
self.assertClose(tex._verts_uvs_list[i], tex_detached._verts_uvs_list[i])
|
| 917 |
+
self.assertFalse(tex_detached._faces_uvs_list[i].requires_grad)
|
| 918 |
+
self.assertClose(tex._faces_uvs_list[i], tex_detached._faces_uvs_list[i])
|
| 919 |
+
# tex._maps_list is not use anywhere so it's not stored. We call it explicitly
|
| 920 |
+
self.assertFalse(tex_detached.maps_list()[i].requires_grad)
|
| 921 |
+
self.assertClose(tex.maps_list()[i], tex_detached.maps_list()[i])
|
| 922 |
+
self.assertFalse(tex_detached.maps_ids_list()[i].requires_grad)
|
| 923 |
+
self.assertClose(tex.maps_ids_list()[i], tex_detached.maps_ids_list()[i])
|
| 924 |
+
|
| 925 |
+
def test_extend(self):
|
| 926 |
+
B = 5
|
| 927 |
+
mesh = init_mesh(B, 30, 50)
|
| 928 |
+
V = mesh._V
|
| 929 |
+
num_faces = mesh.num_faces_per_mesh()
|
| 930 |
+
num_verts = mesh.num_verts_per_mesh()
|
| 931 |
+
faces_uvs_list = [torch.randint(size=(f, 3), low=0, high=V) for f in num_faces]
|
| 932 |
+
verts_uvs_list = [torch.rand(v, 2) for v in num_verts]
|
| 933 |
+
tex_uv = TexturesUV(
|
| 934 |
+
maps=torch.ones((B, 16, 16, 3)),
|
| 935 |
+
faces_uvs=faces_uvs_list,
|
| 936 |
+
verts_uvs=verts_uvs_list,
|
| 937 |
+
)
|
| 938 |
+
tex_mesh = Meshes(
|
| 939 |
+
verts=mesh.verts_list(), faces=mesh.faces_list(), textures=tex_uv
|
| 940 |
+
)
|
| 941 |
+
N = 2
|
| 942 |
+
new_mesh = tex_mesh.extend(N)
|
| 943 |
+
|
| 944 |
+
self.assertEqual(len(tex_mesh) * N, len(new_mesh))
|
| 945 |
+
|
| 946 |
+
tex_init = tex_mesh.textures
|
| 947 |
+
new_tex = new_mesh.textures
|
| 948 |
+
|
| 949 |
+
new_tex_num_verts = new_mesh.num_verts_per_mesh()
|
| 950 |
+
for i in range(len(tex_mesh)):
|
| 951 |
+
for n in range(N):
|
| 952 |
+
tex_nv = new_tex_num_verts[i * N + n]
|
| 953 |
+
self.assertClose(
|
| 954 |
+
# The original textures were initialized using
|
| 955 |
+
# verts uvs list
|
| 956 |
+
tex_init.verts_uvs_list()[i],
|
| 957 |
+
# In the new textures, the verts_uvs are initialized
|
| 958 |
+
# from padded. The verts per mesh are not used to
|
| 959 |
+
# convert from padded to list. See TexturesUV for an
|
| 960 |
+
# explanation.
|
| 961 |
+
new_tex.verts_uvs_list()[i * N + n][:tex_nv, ...],
|
| 962 |
+
)
|
| 963 |
+
self.assertClose(
|
| 964 |
+
tex_init.faces_uvs_list()[i], new_tex.faces_uvs_list()[i * N + n]
|
| 965 |
+
)
|
| 966 |
+
self.assertClose(
|
| 967 |
+
tex_init.maps_padded()[i, ...], new_tex.maps_padded()[i * N + n]
|
| 968 |
+
)
|
| 969 |
+
self.assertClose(
|
| 970 |
+
tex_init._num_faces_per_mesh[i],
|
| 971 |
+
new_tex._num_faces_per_mesh[i * N + n],
|
| 972 |
+
)
|
| 973 |
+
|
| 974 |
+
self.assertAllSeparate(
|
| 975 |
+
[
|
| 976 |
+
tex_init.faces_uvs_padded(),
|
| 977 |
+
new_tex.faces_uvs_padded(),
|
| 978 |
+
tex_init.verts_uvs_padded(),
|
| 979 |
+
new_tex.verts_uvs_padded(),
|
| 980 |
+
tex_init.maps_padded(),
|
| 981 |
+
new_tex.maps_padded(),
|
| 982 |
+
]
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
with self.assertRaises(ValueError):
|
| 986 |
+
tex_mesh.extend(N=-1)
|
| 987 |
+
|
| 988 |
+
def test_padded_to_packed(self):
|
| 989 |
+
# Case where each face in the mesh has 3 unique uv vertex indices
|
| 990 |
+
# - i.e. even if a vertex is shared between multiple faces it will
|
| 991 |
+
# have a unique uv coordinate for each face.
|
| 992 |
+
N = 2
|
| 993 |
+
faces_uvs_list = [
|
| 994 |
+
torch.tensor([[0, 1, 2], [3, 5, 4], [7, 6, 8]]),
|
| 995 |
+
torch.tensor([[0, 1, 2], [3, 4, 5]]),
|
| 996 |
+
] # (N, 3, 3)
|
| 997 |
+
verts_uvs_list = [torch.ones(9, 2), torch.ones(6, 2)]
|
| 998 |
+
maps_ids_given_list = [torch.randint(0, 3, (3,)), torch.randint(0, 3, (2,))]
|
| 999 |
+
|
| 1000 |
+
num_faces_per_mesh = [f.shape[0] for f in faces_uvs_list]
|
| 1001 |
+
num_verts_per_mesh = [v.shape[0] for v in verts_uvs_list]
|
| 1002 |
+
tex = TexturesUV(
|
| 1003 |
+
maps=torch.ones((N, 3, 16, 16, 3)),
|
| 1004 |
+
faces_uvs=faces_uvs_list,
|
| 1005 |
+
verts_uvs=verts_uvs_list,
|
| 1006 |
+
maps_ids=maps_ids_given_list,
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
# This is set inside Meshes when textures is passed as an input.
|
| 1010 |
+
# Here we set _num_faces_per_mesh and _num_verts_per_mesh explicity.
|
| 1011 |
+
tex1 = tex.clone()
|
| 1012 |
+
tex1._num_faces_per_mesh = num_faces_per_mesh
|
| 1013 |
+
tex1._num_verts_per_mesh = num_verts_per_mesh
|
| 1014 |
+
verts_list = tex1.verts_uvs_list()
|
| 1015 |
+
verts_padded = tex1.verts_uvs_padded()
|
| 1016 |
+
|
| 1017 |
+
faces_list = tex1.faces_uvs_list()
|
| 1018 |
+
faces_padded = tex1.faces_uvs_padded()
|
| 1019 |
+
|
| 1020 |
+
maps_ids_list = tex1.maps_ids_list()
|
| 1021 |
+
maps_ids_padded = tex1.maps_ids_padded()
|
| 1022 |
+
|
| 1023 |
+
for f1, f2 in zip(faces_list, faces_uvs_list):
|
| 1024 |
+
self.assertTrue((f1 == f2).all().item())
|
| 1025 |
+
|
| 1026 |
+
for f1, f2 in zip(verts_list, verts_uvs_list):
|
| 1027 |
+
self.assertTrue((f1 == f2).all().item())
|
| 1028 |
+
|
| 1029 |
+
for f1, f2 in zip(maps_ids_given_list, maps_ids_list):
|
| 1030 |
+
self.assertTrue((f1 == f2).all().item())
|
| 1031 |
+
|
| 1032 |
+
self.assertTrue(faces_padded.shape == (2, 3, 3))
|
| 1033 |
+
self.assertTrue(verts_padded.shape == (2, 9, 2))
|
| 1034 |
+
self.assertTrue(maps_ids_padded.shape == (2, 3))
|
| 1035 |
+
|
| 1036 |
+
# Case where num_faces_per_mesh is not set and faces_verts_uvs
|
| 1037 |
+
# are initialized with a padded tensor.
|
| 1038 |
+
tex2 = TexturesUV(
|
| 1039 |
+
maps=torch.ones((N, 3, 16, 16, 3)),
|
| 1040 |
+
verts_uvs=verts_padded,
|
| 1041 |
+
faces_uvs=faces_padded,
|
| 1042 |
+
maps_ids=maps_ids_padded,
|
| 1043 |
+
)
|
| 1044 |
+
faces_list = tex2.faces_uvs_list()
|
| 1045 |
+
verts_list = tex2.verts_uvs_list()
|
| 1046 |
+
maps_ids_list = tex2.maps_ids_list()
|
| 1047 |
+
|
| 1048 |
+
for i, (f1, f2) in enumerate(zip(faces_list, faces_uvs_list)):
|
| 1049 |
+
n = num_faces_per_mesh[i]
|
| 1050 |
+
self.assertTrue((f1[:n] == f2).all().item())
|
| 1051 |
+
|
| 1052 |
+
for i, (f1, f2) in enumerate(zip(verts_list, verts_uvs_list)):
|
| 1053 |
+
n = num_verts_per_mesh[i]
|
| 1054 |
+
self.assertTrue((f1[:n] == f2).all().item())
|
| 1055 |
+
|
| 1056 |
+
for i, (f1, f2) in enumerate(zip(maps_ids_list, maps_ids_given_list)):
|
| 1057 |
+
n = num_faces_per_mesh[i]
|
| 1058 |
+
self.assertTrue((f1[:n] == f2).all().item())
|
| 1059 |
+
|
| 1060 |
+
def test_to(self):
|
| 1061 |
+
tex = TexturesUV(
|
| 1062 |
+
maps=torch.ones((5, 3, 16, 16, 3)),
|
| 1063 |
+
faces_uvs=torch.randint(size=(5, 10, 3), high=15),
|
| 1064 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 1065 |
+
maps_ids=torch.randint(0, 3, (5, 10)),
|
| 1066 |
+
)
|
| 1067 |
+
device = torch.device("cuda:0")
|
| 1068 |
+
tex = tex.to(device)
|
| 1069 |
+
self.assertEqual(tex._faces_uvs_padded.device, device)
|
| 1070 |
+
self.assertEqual(tex._verts_uvs_padded.device, device)
|
| 1071 |
+
self.assertEqual(tex._maps_padded.device, device)
|
| 1072 |
+
self.assertEqual(tex._maps_ids_padded.device, device)
|
| 1073 |
+
|
| 1074 |
+
def test_mesh_to(self):
|
| 1075 |
+
tex_cpu = TexturesUV(
|
| 1076 |
+
maps=torch.ones((5, 3, 16, 16, 3)),
|
| 1077 |
+
faces_uvs=torch.randint(size=(5, 10, 3), high=15),
|
| 1078 |
+
verts_uvs=torch.rand(size=(5, 15, 2)),
|
| 1079 |
+
maps_ids=torch.randint(0, 3, (5, 10)),
|
| 1080 |
+
)
|
| 1081 |
+
verts = torch.rand(size=(5, 15, 3))
|
| 1082 |
+
faces = torch.randint(size=(5, 10, 3), high=15)
|
| 1083 |
+
mesh_cpu = Meshes(faces=faces, verts=verts, textures=tex_cpu)
|
| 1084 |
+
cpu = torch.device("cpu")
|
| 1085 |
+
device = torch.device("cuda:0")
|
| 1086 |
+
tex = mesh_cpu.to(device).textures
|
| 1087 |
+
self.assertEqual(tex._faces_uvs_padded.device, device)
|
| 1088 |
+
self.assertEqual(tex._verts_uvs_padded.device, device)
|
| 1089 |
+
self.assertEqual(tex._maps_padded.device, device)
|
| 1090 |
+
self.assertEqual(tex._maps_ids_padded.device, device)
|
| 1091 |
+
self.assertEqual(tex_cpu._verts_uvs_padded.device, cpu)
|
| 1092 |
+
self.assertEqual(tex_cpu._maps_ids_padded.device, cpu)
|
| 1093 |
+
|
| 1094 |
+
self.assertEqual(tex_cpu.device, cpu)
|
| 1095 |
+
self.assertEqual(tex.device, device)
|
| 1096 |
+
|
| 1097 |
+
def test_getitem(self):
|
| 1098 |
+
N = 5
|
| 1099 |
+
M = 3
|
| 1100 |
+
V = 20
|
| 1101 |
+
F = 10
|
| 1102 |
+
source = {
|
| 1103 |
+
"maps": torch.rand(size=(N, M, 1, 1, 3)),
|
| 1104 |
+
"faces_uvs": torch.randint(size=(N, F, 3), high=V),
|
| 1105 |
+
"verts_uvs": torch.randn(size=(N, V, 2)),
|
| 1106 |
+
"maps_ids": torch.randint(0, M, (N, F)),
|
| 1107 |
+
}
|
| 1108 |
+
tex = TexturesUV(
|
| 1109 |
+
maps=source["maps"],
|
| 1110 |
+
faces_uvs=source["faces_uvs"],
|
| 1111 |
+
verts_uvs=source["verts_uvs"],
|
| 1112 |
+
maps_ids=source["maps_ids"],
|
| 1113 |
+
)
|
| 1114 |
+
|
| 1115 |
+
verts = torch.rand(size=(N, V, 3))
|
| 1116 |
+
faces = torch.randint(size=(N, F, 3), high=V)
|
| 1117 |
+
meshes = Meshes(verts=verts, faces=faces, textures=tex)
|
| 1118 |
+
|
| 1119 |
+
tryindex(self, 2, tex, meshes, source)
|
| 1120 |
+
tryindex(self, slice(0, 2, 1), tex, meshes, source)
|
| 1121 |
+
index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool)
|
| 1122 |
+
tryindex(self, index, tex, meshes, source)
|
| 1123 |
+
index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool)
|
| 1124 |
+
tryindex(self, index, tex, meshes, source)
|
| 1125 |
+
index = torch.tensor([1, 2], dtype=torch.int64)
|
| 1126 |
+
tryindex(self, index, tex, meshes, source)
|
| 1127 |
+
tryindex(self, [2, 4], tex, meshes, source)
|
| 1128 |
+
|
| 1129 |
+
def test_centers_for_image(self):
|
| 1130 |
+
maps = torch.rand(size=(1, 257, 129, 3))
|
| 1131 |
+
verts_uvs = torch.FloatTensor([[[0.25, 0.125], [0.5, 0.625], [0.5, 0.5]]])
|
| 1132 |
+
faces_uvs = torch.zeros(size=(1, 0, 3), dtype=torch.int64)
|
| 1133 |
+
tex = TexturesUV(maps=maps, faces_uvs=faces_uvs, verts_uvs=verts_uvs)
|
| 1134 |
+
|
| 1135 |
+
expected = torch.FloatTensor([[32, 224], [64, 96], [64, 128]])
|
| 1136 |
+
self.assertClose(tex.centers_for_image(0), expected)
|
| 1137 |
+
|
| 1138 |
+
def test_sample_textures_error(self):
|
| 1139 |
+
N = 1
|
| 1140 |
+
V = 20
|
| 1141 |
+
F = 10
|
| 1142 |
+
maps = torch.rand(size=(N, 1, 1, 3))
|
| 1143 |
+
verts_uvs = torch.randn(size=(N, V, 2))
|
| 1144 |
+
tex = TexturesUV(
|
| 1145 |
+
maps=maps,
|
| 1146 |
+
faces_uvs=torch.randint(size=(N, 15, 3), high=V),
|
| 1147 |
+
verts_uvs=verts_uvs,
|
| 1148 |
+
)
|
| 1149 |
+
verts = torch.rand(size=(5, V, 3))
|
| 1150 |
+
faces = torch.randint(size=(5, 10, 3), high=V)
|
| 1151 |
+
meshes = Meshes(verts=verts, faces=faces)
|
| 1152 |
+
|
| 1153 |
+
# Wrong number of faces
|
| 1154 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 1155 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 1156 |
+
|
| 1157 |
+
# Wrong batch dim for faces
|
| 1158 |
+
tex = TexturesUV(
|
| 1159 |
+
maps=maps,
|
| 1160 |
+
faces_uvs=torch.randint(size=(1, F, 3), high=V),
|
| 1161 |
+
verts_uvs=verts_uvs,
|
| 1162 |
+
)
|
| 1163 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 1164 |
+
Meshes(verts=verts, faces=faces, textures=tex)
|
| 1165 |
+
|
| 1166 |
+
# Wrong batch dim for verts_uvs is not necessary to check as
|
| 1167 |
+
# there is already a check inside TexturesUV for a batch dim
|
| 1168 |
+
# mismatch with faces_uvs
|
| 1169 |
+
|
| 1170 |
+
meshes = Meshes(verts=verts, faces=faces)
|
| 1171 |
+
meshes.textures = tex
|
| 1172 |
+
|
| 1173 |
+
# Cannot use the texture attribute set on meshes for sampling
|
| 1174 |
+
# textures if the dimensions don't match
|
| 1175 |
+
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
| 1176 |
+
meshes.sample_textures(None)
|
| 1177 |
+
|
| 1178 |
+
def test_submeshes(self):
|
| 1179 |
+
N = 2
|
| 1180 |
+
faces_uvs_list = [
|
| 1181 |
+
torch.LongTensor([[0, 1, 2], [3, 5, 4], [7, 6, 8]]),
|
| 1182 |
+
torch.LongTensor([[0, 1, 2], [3, 4, 5]]),
|
| 1183 |
+
]
|
| 1184 |
+
verts_uvs_list = [
|
| 1185 |
+
torch.arange(18, dtype=torch.float32).reshape(9, 2),
|
| 1186 |
+
torch.ones(6, 2),
|
| 1187 |
+
]
|
| 1188 |
+
tex = TexturesUV(
|
| 1189 |
+
maps=torch.rand((N, 16, 16, 3)),
|
| 1190 |
+
faces_uvs=faces_uvs_list,
|
| 1191 |
+
verts_uvs=verts_uvs_list,
|
| 1192 |
+
)
|
| 1193 |
+
|
| 1194 |
+
sub_faces = [
|
| 1195 |
+
[torch.tensor([0, 1]), torch.tensor([1, 2])],
|
| 1196 |
+
[],
|
| 1197 |
+
]
|
| 1198 |
+
|
| 1199 |
+
mesh = Meshes(
|
| 1200 |
+
verts=[torch.rand(9, 3), torch.rand(6, 3)],
|
| 1201 |
+
faces=faces_uvs_list,
|
| 1202 |
+
textures=tex,
|
| 1203 |
+
)
|
| 1204 |
+
subtex = mesh.submeshes(sub_faces).textures
|
| 1205 |
+
subtex_faces = subtex.faces_uvs_padded()
|
| 1206 |
+
self.assertEqual(len(subtex_faces), 2)
|
| 1207 |
+
self.assertClose(
|
| 1208 |
+
subtex_faces[0],
|
| 1209 |
+
torch.tensor([[0, 1, 2], [3, 5, 4]]),
|
| 1210 |
+
)
|
| 1211 |
+
self.assertClose(
|
| 1212 |
+
subtex.verts_uvs_list()[0][subtex.faces_uvs_list()[0].flatten()]
|
| 1213 |
+
.flatten()
|
| 1214 |
+
.msort(),
|
| 1215 |
+
torch.arange(12, dtype=torch.float32),
|
| 1216 |
+
)
|
| 1217 |
+
self.assertClose(
|
| 1218 |
+
subtex.maps_padded(), tex.maps_padded()[:1].expand(2, -1, -1, -1)
|
| 1219 |
+
)
|
| 1220 |
+
|
| 1221 |
+
|
| 1222 |
+
class TestRectanglePacking(TestCaseMixin, unittest.TestCase):
|
| 1223 |
+
def setUp(self) -> None:
|
| 1224 |
+
super().setUp()
|
| 1225 |
+
torch.manual_seed(42)
|
| 1226 |
+
|
| 1227 |
+
def wrap_pack(self, sizes):
|
| 1228 |
+
"""
|
| 1229 |
+
Call the pack_rectangles function, which we want to test,
|
| 1230 |
+
and return its outputs.
|
| 1231 |
+
Additionally makes some sanity checks on the output.
|
| 1232 |
+
"""
|
| 1233 |
+
res = pack_rectangles(sizes)
|
| 1234 |
+
total = res.total_size
|
| 1235 |
+
self.assertGreaterEqual(total[0], 0)
|
| 1236 |
+
self.assertGreaterEqual(total[1], 0)
|
| 1237 |
+
mask = torch.zeros(total, dtype=torch.bool)
|
| 1238 |
+
seen_x_bound = False
|
| 1239 |
+
seen_y_bound = False
|
| 1240 |
+
for (in_x, in_y), (out_x, out_y, flipped, is_first) in zip(
|
| 1241 |
+
sizes, res.locations
|
| 1242 |
+
):
|
| 1243 |
+
self.assertTrue(is_first)
|
| 1244 |
+
self.assertGreaterEqual(out_x, 0)
|
| 1245 |
+
self.assertGreaterEqual(out_y, 0)
|
| 1246 |
+
placed_x, placed_y = (in_y, in_x) if flipped else (in_x, in_y)
|
| 1247 |
+
upper_x = placed_x + out_x
|
| 1248 |
+
upper_y = placed_y + out_y
|
| 1249 |
+
self.assertGreaterEqual(total[0], upper_x)
|
| 1250 |
+
if total[0] == upper_x:
|
| 1251 |
+
seen_x_bound = True
|
| 1252 |
+
self.assertGreaterEqual(total[1], upper_y)
|
| 1253 |
+
if total[1] == upper_y:
|
| 1254 |
+
seen_y_bound = True
|
| 1255 |
+
already_taken = torch.sum(mask[out_x:upper_x, out_y:upper_y])
|
| 1256 |
+
self.assertEqual(already_taken, 0)
|
| 1257 |
+
mask[out_x:upper_x, out_y:upper_y] = 1
|
| 1258 |
+
self.assertTrue(seen_x_bound)
|
| 1259 |
+
self.assertTrue(seen_y_bound)
|
| 1260 |
+
|
| 1261 |
+
self.assertTrue(torch.all(torch.sum(mask, dim=0, dtype=torch.int32) > 0))
|
| 1262 |
+
self.assertTrue(torch.all(torch.sum(mask, dim=1, dtype=torch.int32) > 0))
|
| 1263 |
+
return res
|
| 1264 |
+
|
| 1265 |
+
def assert_bb(self, sizes, expected):
|
| 1266 |
+
"""
|
| 1267 |
+
Apply the pack_rectangles function to sizes and verify the
|
| 1268 |
+
bounding box dimensions are expected.
|
| 1269 |
+
"""
|
| 1270 |
+
self.assertSetEqual(set(self.wrap_pack(sizes).total_size), expected)
|
| 1271 |
+
|
| 1272 |
+
def test_simple(self):
|
| 1273 |
+
self.assert_bb([(3, 4), (4, 3)], {6, 4})
|
| 1274 |
+
self.assert_bb([(2, 2), (2, 4), (2, 2)], {4})
|
| 1275 |
+
|
| 1276 |
+
# many squares
|
| 1277 |
+
self.assert_bb([(2, 2)] * 9, {2, 18})
|
| 1278 |
+
|
| 1279 |
+
# One big square and many small ones.
|
| 1280 |
+
self.assert_bb([(3, 3)] + [(1, 1)] * 2, {3, 4})
|
| 1281 |
+
self.assert_bb([(3, 3)] + [(1, 1)] * 3, {3, 4})
|
| 1282 |
+
self.assert_bb([(3, 3)] + [(1, 1)] * 4, {3, 5})
|
| 1283 |
+
self.assert_bb([(3, 3)] + [(1, 1)] * 5, {3, 5})
|
| 1284 |
+
self.assert_bb([(1, 1)] * 6 + [(3, 3)], {3, 5})
|
| 1285 |
+
self.assert_bb([(3, 3)] + [(1, 1)] * 7, {3, 6})
|
| 1286 |
+
|
| 1287 |
+
# many identical rectangles
|
| 1288 |
+
self.assert_bb([(7, 190)] * 4 + [(190, 7)] * 4, {190, 56})
|
| 1289 |
+
|
| 1290 |
+
# require placing the flipped version of a rectangle
|
| 1291 |
+
self.assert_bb([(1, 100), (5, 96), (4, 5)], {100, 6})
|
| 1292 |
+
|
| 1293 |
+
def test_random(self):
|
| 1294 |
+
for _ in range(5):
|
| 1295 |
+
vals = torch.randint(size=(20, 2), low=1, high=18)
|
| 1296 |
+
sizes = []
|
| 1297 |
+
for j in range(vals.shape[0]):
|
| 1298 |
+
sizes.append((int(vals[j, 0]), int(vals[j, 1])))
|
| 1299 |
+
self.wrap_pack(sizes)
|
| 1300 |
+
|
| 1301 |
+
def test_all_identical(self):
|
| 1302 |
+
sizes = [Rectangle(xsize=61, ysize=82, identifier=1729)] * 3
|
| 1303 |
+
total_size, locations = pack_unique_rectangles(sizes)
|
| 1304 |
+
self.assertEqual(total_size, (61, 82))
|
| 1305 |
+
self.assertEqual(len(locations), 3)
|
| 1306 |
+
for i, (x, y, is_flipped, is_first) in enumerate(locations):
|
| 1307 |
+
self.assertEqual(x, 0)
|
| 1308 |
+
self.assertEqual(y, 0)
|
| 1309 |
+
self.assertFalse(is_flipped)
|
| 1310 |
+
self.assertEqual(is_first, i == 0)
|
| 1311 |
+
|
| 1312 |
+
def test_one_different_id(self):
|
| 1313 |
+
sizes = [Rectangle(xsize=61, ysize=82, identifier=220)] * 3
|
| 1314 |
+
sizes.extend([Rectangle(xsize=61, ysize=82, identifier=284)] * 3)
|
| 1315 |
+
total_size, locations = pack_unique_rectangles(sizes)
|
| 1316 |
+
self.assertEqual(total_size, (82, 122))
|
| 1317 |
+
self.assertEqual(len(locations), 6)
|
| 1318 |
+
for i, (x, y, is_flipped, is_first) in enumerate(locations):
|
| 1319 |
+
self.assertTrue(is_flipped)
|
| 1320 |
+
self.assertEqual(is_first, i % 3 == 0)
|
| 1321 |
+
self.assertEqual(x, 0)
|
| 1322 |
+
if i < 3:
|
| 1323 |
+
self.assertEqual(y, 61)
|
| 1324 |
+
else:
|
| 1325 |
+
self.assertEqual(y, 0)
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_transforms.py
ADDED
|
@@ -0,0 +1,1350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import math
|
| 8 |
+
import os
|
| 9 |
+
import unittest
|
| 10 |
+
from unittest import mock
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from pytorch3d.transforms import random_rotations
|
| 14 |
+
from pytorch3d.transforms.se3 import se3_log_map
|
| 15 |
+
from pytorch3d.transforms.so3 import so3_exp_map
|
| 16 |
+
from pytorch3d.transforms.transform3d import (
|
| 17 |
+
Rotate,
|
| 18 |
+
RotateAxisAngle,
|
| 19 |
+
Scale,
|
| 20 |
+
Transform3d,
|
| 21 |
+
Translate,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
from .common_testing import TestCaseMixin
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class TestTransform(TestCaseMixin, unittest.TestCase):
|
| 28 |
+
def setUp(self) -> None:
|
| 29 |
+
torch.manual_seed(42)
|
| 30 |
+
|
| 31 |
+
def test_to(self):
|
| 32 |
+
tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]]))
|
| 33 |
+
R = torch.FloatTensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
|
| 34 |
+
R = Rotate(R)
|
| 35 |
+
t = Transform3d().compose(R, tr)
|
| 36 |
+
|
| 37 |
+
cpu_device = torch.device("cpu")
|
| 38 |
+
|
| 39 |
+
cpu_t = t.to("cpu")
|
| 40 |
+
self.assertEqual(cpu_device, cpu_t.device)
|
| 41 |
+
self.assertEqual(cpu_device, t.device)
|
| 42 |
+
self.assertEqual(torch.float32, cpu_t.dtype)
|
| 43 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 44 |
+
self.assertIs(t, cpu_t)
|
| 45 |
+
|
| 46 |
+
cpu_t = t.to(cpu_device)
|
| 47 |
+
self.assertEqual(cpu_device, cpu_t.device)
|
| 48 |
+
self.assertEqual(cpu_device, t.device)
|
| 49 |
+
self.assertEqual(torch.float32, cpu_t.dtype)
|
| 50 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 51 |
+
self.assertIs(t, cpu_t)
|
| 52 |
+
|
| 53 |
+
cpu_t = t.to(dtype=torch.float64, device=cpu_device)
|
| 54 |
+
self.assertEqual(cpu_device, cpu_t.device)
|
| 55 |
+
self.assertEqual(cpu_device, t.device)
|
| 56 |
+
self.assertEqual(torch.float64, cpu_t.dtype)
|
| 57 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 58 |
+
self.assertIsNot(t, cpu_t)
|
| 59 |
+
|
| 60 |
+
cuda_device = torch.device("cuda:0")
|
| 61 |
+
|
| 62 |
+
cuda_t = t.to("cuda:0")
|
| 63 |
+
self.assertEqual(cuda_device, cuda_t.device)
|
| 64 |
+
self.assertEqual(cpu_device, t.device)
|
| 65 |
+
self.assertEqual(torch.float32, cuda_t.dtype)
|
| 66 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 67 |
+
self.assertIsNot(t, cuda_t)
|
| 68 |
+
|
| 69 |
+
cuda_t = t.to(cuda_device)
|
| 70 |
+
self.assertEqual(cuda_device, cuda_t.device)
|
| 71 |
+
self.assertEqual(cpu_device, t.device)
|
| 72 |
+
self.assertEqual(torch.float32, cuda_t.dtype)
|
| 73 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 74 |
+
self.assertIsNot(t, cuda_t)
|
| 75 |
+
|
| 76 |
+
cuda_t = t.to(dtype=torch.float64, device=cuda_device)
|
| 77 |
+
self.assertEqual(cuda_device, cuda_t.device)
|
| 78 |
+
self.assertEqual(cpu_device, t.device)
|
| 79 |
+
self.assertEqual(torch.float64, cuda_t.dtype)
|
| 80 |
+
self.assertEqual(torch.float32, t.dtype)
|
| 81 |
+
self.assertIsNot(t, cuda_t)
|
| 82 |
+
|
| 83 |
+
cpu_points = torch.rand(9, 3)
|
| 84 |
+
cuda_points = cpu_points.cuda()
|
| 85 |
+
for _ in range(3):
|
| 86 |
+
t = t.cpu()
|
| 87 |
+
t.transform_points(cpu_points)
|
| 88 |
+
t = t.cuda()
|
| 89 |
+
t.transform_points(cuda_points)
|
| 90 |
+
t = t.cuda()
|
| 91 |
+
t = t.cpu()
|
| 92 |
+
|
| 93 |
+
def test_dtype_propagation(self):
|
| 94 |
+
"""
|
| 95 |
+
Check that a given dtype is correctly passed along to child
|
| 96 |
+
transformations.
|
| 97 |
+
"""
|
| 98 |
+
# Use at least two dtypes so we avoid only testing on the
|
| 99 |
+
# default dtype.
|
| 100 |
+
for dtype in [torch.float32, torch.float64]:
|
| 101 |
+
R = torch.tensor(
|
| 102 |
+
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
|
| 103 |
+
dtype=dtype,
|
| 104 |
+
)
|
| 105 |
+
tf = (
|
| 106 |
+
Transform3d(dtype=dtype)
|
| 107 |
+
.rotate(R)
|
| 108 |
+
.rotate_axis_angle(
|
| 109 |
+
R[0],
|
| 110 |
+
"X",
|
| 111 |
+
)
|
| 112 |
+
.translate(3, 2, 1)
|
| 113 |
+
.scale(0.5)
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
self.assertEqual(tf.dtype, dtype)
|
| 117 |
+
for inner_tf in tf._transforms:
|
| 118 |
+
self.assertEqual(inner_tf.dtype, dtype)
|
| 119 |
+
|
| 120 |
+
transformed = tf.transform_points(R)
|
| 121 |
+
self.assertEqual(transformed.dtype, dtype)
|
| 122 |
+
|
| 123 |
+
def test_clone(self):
|
| 124 |
+
"""
|
| 125 |
+
Check that cloned transformations contain different _matrix objects.
|
| 126 |
+
Also, the clone of a composed translation and rotation has to be
|
| 127 |
+
the same as composition of clones of translation and rotation.
|
| 128 |
+
"""
|
| 129 |
+
tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]]))
|
| 130 |
+
R = torch.FloatTensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
|
| 131 |
+
R = Rotate(R)
|
| 132 |
+
|
| 133 |
+
# check that the _matrix property of clones of
|
| 134 |
+
# both transforms are different
|
| 135 |
+
for t in (R, tr):
|
| 136 |
+
self.assertTrue(t._matrix is not t.clone()._matrix)
|
| 137 |
+
|
| 138 |
+
# check that the _transforms lists of composition of R, tr contain
|
| 139 |
+
# different objects
|
| 140 |
+
t1 = Transform3d().compose(R, tr)
|
| 141 |
+
for t, t_clone in (t1._transforms, t1.clone()._transforms):
|
| 142 |
+
self.assertTrue(t is not t_clone)
|
| 143 |
+
self.assertTrue(t._matrix is not t_clone._matrix)
|
| 144 |
+
|
| 145 |
+
# check that all composed transforms are numerically equivalent
|
| 146 |
+
t2 = Transform3d().compose(R.clone(), tr.clone())
|
| 147 |
+
t3 = t1.clone()
|
| 148 |
+
for t_pair in ((t1, t2), (t1, t3), (t2, t3)):
|
| 149 |
+
matrix1 = t_pair[0].get_matrix()
|
| 150 |
+
matrix2 = t_pair[1].get_matrix()
|
| 151 |
+
self.assertTrue(torch.allclose(matrix1, matrix2))
|
| 152 |
+
|
| 153 |
+
def test_init_with_custom_matrix(self):
|
| 154 |
+
for matrix in (torch.randn(10, 4, 4), torch.randn(4, 4)):
|
| 155 |
+
t = Transform3d(matrix=matrix)
|
| 156 |
+
self.assertTrue(t.device == matrix.device)
|
| 157 |
+
self.assertTrue(t._matrix.dtype == matrix.dtype)
|
| 158 |
+
self.assertTrue(torch.allclose(t._matrix, matrix.view(t._matrix.shape)))
|
| 159 |
+
|
| 160 |
+
def test_init_with_custom_matrix_errors(self):
|
| 161 |
+
bad_shapes = [[10, 5, 4], [3, 4], [10, 4, 4, 1], [10, 4, 4, 2], [4, 4, 4, 3]]
|
| 162 |
+
for bad_shape in bad_shapes:
|
| 163 |
+
matrix = torch.randn(*bad_shape).float()
|
| 164 |
+
self.assertRaises(ValueError, Transform3d, matrix=matrix)
|
| 165 |
+
|
| 166 |
+
def test_get_se3(self):
|
| 167 |
+
N = 16
|
| 168 |
+
random_rotations(N)
|
| 169 |
+
tr = Translate(torch.rand((N, 3)))
|
| 170 |
+
R = Rotate(random_rotations(N))
|
| 171 |
+
transform = Transform3d().compose(R, tr)
|
| 172 |
+
se3_log = transform.get_se3_log()
|
| 173 |
+
gt_se3_log = se3_log_map(transform.get_matrix())
|
| 174 |
+
self.assertClose(se3_log, gt_se3_log)
|
| 175 |
+
|
| 176 |
+
def test_translate(self):
|
| 177 |
+
t = Transform3d().translate(1, 2, 3)
|
| 178 |
+
points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view(
|
| 179 |
+
1, 3, 3
|
| 180 |
+
)
|
| 181 |
+
normals = torch.tensor(
|
| 182 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 183 |
+
).view(1, 3, 3)
|
| 184 |
+
points_out = t.transform_points(points)
|
| 185 |
+
normals_out = t.transform_normals(normals)
|
| 186 |
+
points_out_expected = torch.tensor(
|
| 187 |
+
[[2.0, 2.0, 3.0], [1.0, 3.0, 3.0], [1.5, 2.5, 3.0]]
|
| 188 |
+
).view(1, 3, 3)
|
| 189 |
+
normals_out_expected = torch.tensor(
|
| 190 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 191 |
+
).view(1, 3, 3)
|
| 192 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected))
|
| 193 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
|
| 194 |
+
|
| 195 |
+
@mock.patch.dict(os.environ, {"PYTORCH3D_CHECK_ROTATION_MATRICES": "1"}, clear=True)
|
| 196 |
+
def test_rotate_check_rot_valid_on(self):
|
| 197 |
+
R = so3_exp_map(torch.randn((1, 3)))
|
| 198 |
+
t = Transform3d().rotate(R)
|
| 199 |
+
points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view(
|
| 200 |
+
1, 3, 3
|
| 201 |
+
)
|
| 202 |
+
normals = torch.tensor(
|
| 203 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 204 |
+
).view(1, 3, 3)
|
| 205 |
+
points_out = t.transform_points(points)
|
| 206 |
+
normals_out = t.transform_normals(normals)
|
| 207 |
+
points_out_expected = torch.bmm(points, R)
|
| 208 |
+
normals_out_expected = torch.bmm(normals, R)
|
| 209 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected))
|
| 210 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
|
| 211 |
+
|
| 212 |
+
@mock.patch.dict(os.environ, {"PYTORCH3D_CHECK_ROTATION_MATRICES": "0"}, clear=True)
|
| 213 |
+
def test_rotate_check_rot_valid_off(self):
|
| 214 |
+
R = so3_exp_map(torch.randn((1, 3)))
|
| 215 |
+
t = Transform3d().rotate(R)
|
| 216 |
+
points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view(
|
| 217 |
+
1, 3, 3
|
| 218 |
+
)
|
| 219 |
+
normals = torch.tensor(
|
| 220 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 221 |
+
).view(1, 3, 3)
|
| 222 |
+
points_out = t.transform_points(points)
|
| 223 |
+
normals_out = t.transform_normals(normals)
|
| 224 |
+
points_out_expected = torch.bmm(points, R)
|
| 225 |
+
normals_out_expected = torch.bmm(normals, R)
|
| 226 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected))
|
| 227 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
|
| 228 |
+
|
| 229 |
+
def test_scale(self):
|
| 230 |
+
t = Transform3d().scale(2.0).scale(0.5, 0.25, 1.0)
|
| 231 |
+
points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view(
|
| 232 |
+
1, 3, 3
|
| 233 |
+
)
|
| 234 |
+
normals = torch.tensor(
|
| 235 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 236 |
+
).view(1, 3, 3)
|
| 237 |
+
points_out = t.transform_points(points)
|
| 238 |
+
normals_out = t.transform_normals(normals)
|
| 239 |
+
points_out_expected = torch.tensor(
|
| 240 |
+
[[1.00, 0.00, 0.00], [0.00, 0.50, 0.00], [0.50, 0.25, 0.00]]
|
| 241 |
+
).view(1, 3, 3)
|
| 242 |
+
normals_out_expected = torch.tensor(
|
| 243 |
+
[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [1.0, 2.0, 0.0]]
|
| 244 |
+
).view(1, 3, 3)
|
| 245 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected))
|
| 246 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
|
| 247 |
+
|
| 248 |
+
def test_scale_translate(self):
|
| 249 |
+
t = Transform3d().scale(2, 1, 3).translate(1, 2, 3)
|
| 250 |
+
points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view(
|
| 251 |
+
1, 3, 3
|
| 252 |
+
)
|
| 253 |
+
normals = torch.tensor(
|
| 254 |
+
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]]
|
| 255 |
+
).view(1, 3, 3)
|
| 256 |
+
points_out = t.transform_points(points)
|
| 257 |
+
normals_out = t.transform_normals(normals)
|
| 258 |
+
points_out_expected = torch.tensor(
|
| 259 |
+
[[3.0, 2.0, 3.0], [1.0, 3.0, 3.0], [2.0, 2.5, 3.0]]
|
| 260 |
+
).view(1, 3, 3)
|
| 261 |
+
normals_out_expected = torch.tensor(
|
| 262 |
+
[[0.5, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 1.0, 0.0]]
|
| 263 |
+
).view(1, 3, 3)
|
| 264 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected))
|
| 265 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected))
|
| 266 |
+
|
| 267 |
+
def test_rotate_axis_angle(self):
|
| 268 |
+
t = Transform3d().rotate_axis_angle(90.0, axis="Z")
|
| 269 |
+
points = torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]]).view(
|
| 270 |
+
1, 3, 3
|
| 271 |
+
)
|
| 272 |
+
normals = torch.tensor(
|
| 273 |
+
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
|
| 274 |
+
).view(1, 3, 3)
|
| 275 |
+
points_out = t.transform_points(points)
|
| 276 |
+
normals_out = t.transform_normals(normals)
|
| 277 |
+
points_out_expected = torch.tensor(
|
| 278 |
+
[[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 1.0]]
|
| 279 |
+
).view(1, 3, 3)
|
| 280 |
+
normals_out_expected = torch.tensor(
|
| 281 |
+
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
|
| 282 |
+
).view(1, 3, 3)
|
| 283 |
+
self.assertTrue(torch.allclose(points_out, points_out_expected, atol=1e-7))
|
| 284 |
+
self.assertTrue(torch.allclose(normals_out, normals_out_expected, atol=1e-7))
|
| 285 |
+
|
| 286 |
+
def test_transform_points_fail(self):
|
| 287 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 288 |
+
P = 7
|
| 289 |
+
with self.assertRaises(ValueError):
|
| 290 |
+
t1.transform_points(torch.randn(P))
|
| 291 |
+
|
| 292 |
+
def test_compose_fail(self):
|
| 293 |
+
# Only composing Transform3d objects is possible
|
| 294 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 295 |
+
with self.assertRaises(ValueError):
|
| 296 |
+
t1.compose(torch.randn(100))
|
| 297 |
+
|
| 298 |
+
def test_transform_points_eps(self):
|
| 299 |
+
t1 = Transform3d()
|
| 300 |
+
persp_proj = [
|
| 301 |
+
[
|
| 302 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 303 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 304 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 305 |
+
[0.0, 0.0, 1.0, 0.0],
|
| 306 |
+
]
|
| 307 |
+
]
|
| 308 |
+
t1._matrix = torch.FloatTensor(persp_proj)
|
| 309 |
+
points = torch.tensor(
|
| 310 |
+
[[0.0, 1.0, 0.0], [0.0, 0.0, 1e-5], [-1.0, 0.0, 1e-5]]
|
| 311 |
+
).view(
|
| 312 |
+
1, 3, 3
|
| 313 |
+
) # a set of points with z-coord very close to 0
|
| 314 |
+
|
| 315 |
+
proj = t1.transform_points(points)
|
| 316 |
+
proj_eps = t1.transform_points(points, eps=1e-4)
|
| 317 |
+
|
| 318 |
+
self.assertTrue(not bool(torch.isfinite(proj.sum())))
|
| 319 |
+
self.assertTrue(bool(torch.isfinite(proj_eps.sum())))
|
| 320 |
+
|
| 321 |
+
def test_inverse(self, batch_size=5):
|
| 322 |
+
device = torch.device("cuda:0")
|
| 323 |
+
|
| 324 |
+
# generate a random chain of transforms
|
| 325 |
+
for _ in range(10): # 10 different tries
|
| 326 |
+
|
| 327 |
+
# list of transform matrices
|
| 328 |
+
ts = []
|
| 329 |
+
|
| 330 |
+
for i in range(10):
|
| 331 |
+
choice = float(torch.rand(1))
|
| 332 |
+
if choice <= 1.0 / 3.0:
|
| 333 |
+
t_ = Translate(
|
| 334 |
+
torch.randn(
|
| 335 |
+
(batch_size, 3), dtype=torch.float32, device=device
|
| 336 |
+
),
|
| 337 |
+
device=device,
|
| 338 |
+
)
|
| 339 |
+
elif choice <= 2.0 / 3.0:
|
| 340 |
+
t_ = Rotate(
|
| 341 |
+
so3_exp_map(
|
| 342 |
+
torch.randn(
|
| 343 |
+
(batch_size, 3), dtype=torch.float32, device=device
|
| 344 |
+
)
|
| 345 |
+
),
|
| 346 |
+
device=device,
|
| 347 |
+
)
|
| 348 |
+
else:
|
| 349 |
+
rand_t = torch.randn(
|
| 350 |
+
(batch_size, 3), dtype=torch.float32, device=device
|
| 351 |
+
)
|
| 352 |
+
rand_t = rand_t.sign() * torch.clamp(rand_t.abs(), 0.2)
|
| 353 |
+
t_ = Scale(rand_t, device=device)
|
| 354 |
+
ts.append(t_._matrix.clone())
|
| 355 |
+
|
| 356 |
+
if i == 0:
|
| 357 |
+
t = t_
|
| 358 |
+
else:
|
| 359 |
+
t = t.compose(t_)
|
| 360 |
+
|
| 361 |
+
# generate the inverse transformation in several possible ways
|
| 362 |
+
m1 = t.inverse(invert_composed=True).get_matrix()
|
| 363 |
+
m2 = t.inverse(invert_composed=True)._matrix
|
| 364 |
+
m3 = t.inverse(invert_composed=False).get_matrix()
|
| 365 |
+
m4 = t.get_matrix().inverse()
|
| 366 |
+
|
| 367 |
+
# compute the inverse explicitly ...
|
| 368 |
+
m5 = torch.eye(4, dtype=torch.float32, device=device)
|
| 369 |
+
m5 = m5[None].repeat(batch_size, 1, 1)
|
| 370 |
+
for t_ in ts:
|
| 371 |
+
m5 = torch.bmm(torch.inverse(t_), m5)
|
| 372 |
+
|
| 373 |
+
# assert all same
|
| 374 |
+
for m in (m1, m2, m3, m4):
|
| 375 |
+
self.assertTrue(torch.allclose(m, m5, atol=1e-3))
|
| 376 |
+
|
| 377 |
+
def _check_indexed_transforms(self, t3d, t3d_selected, indices):
|
| 378 |
+
t3d_matrix = t3d.get_matrix()
|
| 379 |
+
t3d_selected_matrix = t3d_selected.get_matrix()
|
| 380 |
+
for order_index, selected_index in indices:
|
| 381 |
+
self.assertClose(
|
| 382 |
+
t3d_matrix[selected_index], t3d_selected_matrix[order_index]
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
def test_get_item(self, batch_size=5):
|
| 386 |
+
device = torch.device("cuda:0")
|
| 387 |
+
|
| 388 |
+
matrices = torch.randn(
|
| 389 |
+
size=[batch_size, 4, 4], device=device, dtype=torch.float32
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# init the Transforms3D class
|
| 393 |
+
t3d = Transform3d(matrix=matrices)
|
| 394 |
+
|
| 395 |
+
# int index
|
| 396 |
+
index = 1
|
| 397 |
+
t3d_selected = t3d[index]
|
| 398 |
+
self.assertEqual(len(t3d_selected), 1)
|
| 399 |
+
self._check_indexed_transforms(t3d, t3d_selected, [(0, 1)])
|
| 400 |
+
|
| 401 |
+
# negative int index
|
| 402 |
+
index = -1
|
| 403 |
+
t3d_selected = t3d[index]
|
| 404 |
+
self.assertEqual(len(t3d_selected), 1)
|
| 405 |
+
self._check_indexed_transforms(t3d, t3d_selected, [(0, -1)])
|
| 406 |
+
|
| 407 |
+
# list index
|
| 408 |
+
index = [1, 2]
|
| 409 |
+
t3d_selected = t3d[index]
|
| 410 |
+
self.assertEqual(len(t3d_selected), len(index))
|
| 411 |
+
self._check_indexed_transforms(t3d, t3d_selected, enumerate(index))
|
| 412 |
+
|
| 413 |
+
# empty list index
|
| 414 |
+
index = []
|
| 415 |
+
t3d_selected = t3d[index]
|
| 416 |
+
self.assertEqual(len(t3d_selected), 0)
|
| 417 |
+
self.assertEqual(t3d_selected.get_matrix().nelement(), 0)
|
| 418 |
+
|
| 419 |
+
# slice index
|
| 420 |
+
index = slice(0, 2, 1)
|
| 421 |
+
t3d_selected = t3d[index]
|
| 422 |
+
self.assertEqual(len(t3d_selected), 2)
|
| 423 |
+
self._check_indexed_transforms(t3d, t3d_selected, [(0, 0), (1, 1)])
|
| 424 |
+
|
| 425 |
+
# empty slice index
|
| 426 |
+
index = slice(0, 0, 1)
|
| 427 |
+
t3d_selected = t3d[index]
|
| 428 |
+
self.assertEqual(len(t3d_selected), 0)
|
| 429 |
+
self.assertEqual(t3d_selected.get_matrix().nelement(), 0)
|
| 430 |
+
|
| 431 |
+
# bool tensor
|
| 432 |
+
index = (torch.rand(batch_size) > 0.5).to(device)
|
| 433 |
+
index[:2] = True # make sure smth is selected
|
| 434 |
+
t3d_selected = t3d[index]
|
| 435 |
+
self.assertEqual(len(t3d_selected), index.sum())
|
| 436 |
+
self._check_indexed_transforms(
|
| 437 |
+
t3d,
|
| 438 |
+
t3d_selected,
|
| 439 |
+
zip(
|
| 440 |
+
torch.arange(index.sum()),
|
| 441 |
+
torch.nonzero(index, as_tuple=False).squeeze(),
|
| 442 |
+
),
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# all false bool tensor
|
| 446 |
+
index = torch.zeros(batch_size).bool()
|
| 447 |
+
t3d_selected = t3d[index]
|
| 448 |
+
self.assertEqual(len(t3d_selected), 0)
|
| 449 |
+
self.assertEqual(t3d_selected.get_matrix().nelement(), 0)
|
| 450 |
+
|
| 451 |
+
# int tensor
|
| 452 |
+
index = torch.tensor([1, 2], dtype=torch.int64, device=device)
|
| 453 |
+
t3d_selected = t3d[index]
|
| 454 |
+
self.assertEqual(len(t3d_selected), index.numel())
|
| 455 |
+
self._check_indexed_transforms(t3d, t3d_selected, enumerate(index.tolist()))
|
| 456 |
+
|
| 457 |
+
# negative int tensor
|
| 458 |
+
index = -(torch.tensor([1, 2], dtype=torch.int64, device=device))
|
| 459 |
+
t3d_selected = t3d[index]
|
| 460 |
+
self.assertEqual(len(t3d_selected), index.numel())
|
| 461 |
+
self._check_indexed_transforms(t3d, t3d_selected, enumerate(index.tolist()))
|
| 462 |
+
|
| 463 |
+
# invalid index
|
| 464 |
+
for invalid_index in (
|
| 465 |
+
torch.tensor([1, 0, 1], dtype=torch.float32, device=device), # float tensor
|
| 466 |
+
1.2, # float index
|
| 467 |
+
):
|
| 468 |
+
with self.assertRaises(IndexError):
|
| 469 |
+
t3d_selected = t3d[invalid_index]
|
| 470 |
+
|
| 471 |
+
def test_stack(self):
|
| 472 |
+
rotations = random_rotations(3)
|
| 473 |
+
transform3 = Transform3d().rotate(rotations).translate(torch.full((3, 3), 0.3))
|
| 474 |
+
transform1 = Scale(37)
|
| 475 |
+
transform4 = transform1.stack(transform3)
|
| 476 |
+
self.assertEqual(len(transform1), 1)
|
| 477 |
+
self.assertEqual(len(transform3), 3)
|
| 478 |
+
self.assertEqual(len(transform4), 4)
|
| 479 |
+
self.assertClose(
|
| 480 |
+
transform4.get_matrix(),
|
| 481 |
+
torch.cat([transform1.get_matrix(), transform3.get_matrix()]),
|
| 482 |
+
)
|
| 483 |
+
points = torch.rand(4, 5, 3)
|
| 484 |
+
new_points_expect = torch.cat(
|
| 485 |
+
[
|
| 486 |
+
transform1.transform_points(points[:1]),
|
| 487 |
+
transform3.transform_points(points[1:]),
|
| 488 |
+
]
|
| 489 |
+
)
|
| 490 |
+
new_points = transform4.transform_points(points)
|
| 491 |
+
self.assertClose(new_points, new_points_expect)
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
class TestTranslate(unittest.TestCase):
|
| 495 |
+
def test_python_scalar(self):
|
| 496 |
+
t = Translate(0.2, 0.3, 0.4)
|
| 497 |
+
matrix = torch.tensor(
|
| 498 |
+
[
|
| 499 |
+
[
|
| 500 |
+
[1.0, 0.0, 0.0, 0],
|
| 501 |
+
[0.0, 1.0, 0.0, 0],
|
| 502 |
+
[0.0, 0.0, 1.0, 0],
|
| 503 |
+
[0.2, 0.3, 0.4, 1],
|
| 504 |
+
]
|
| 505 |
+
],
|
| 506 |
+
dtype=torch.float32,
|
| 507 |
+
)
|
| 508 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 509 |
+
|
| 510 |
+
def test_torch_scalar(self):
|
| 511 |
+
x = torch.tensor(0.2)
|
| 512 |
+
y = torch.tensor(0.3)
|
| 513 |
+
z = torch.tensor(0.4)
|
| 514 |
+
t = Translate(x, y, z)
|
| 515 |
+
matrix = torch.tensor(
|
| 516 |
+
[
|
| 517 |
+
[
|
| 518 |
+
[1.0, 0.0, 0.0, 0],
|
| 519 |
+
[0.0, 1.0, 0.0, 0],
|
| 520 |
+
[0.0, 0.0, 1.0, 0],
|
| 521 |
+
[0.2, 0.3, 0.4, 1],
|
| 522 |
+
]
|
| 523 |
+
],
|
| 524 |
+
dtype=torch.float32,
|
| 525 |
+
)
|
| 526 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 527 |
+
|
| 528 |
+
def test_mixed_scalars(self):
|
| 529 |
+
x = 0.2
|
| 530 |
+
y = torch.tensor(0.3)
|
| 531 |
+
z = 0.4
|
| 532 |
+
t = Translate(x, y, z)
|
| 533 |
+
matrix = torch.tensor(
|
| 534 |
+
[
|
| 535 |
+
[
|
| 536 |
+
[1.0, 0.0, 0.0, 0],
|
| 537 |
+
[0.0, 1.0, 0.0, 0],
|
| 538 |
+
[0.0, 0.0, 1.0, 0],
|
| 539 |
+
[0.2, 0.3, 0.4, 1],
|
| 540 |
+
]
|
| 541 |
+
],
|
| 542 |
+
dtype=torch.float32,
|
| 543 |
+
)
|
| 544 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 545 |
+
|
| 546 |
+
def test_torch_scalar_grads(self):
|
| 547 |
+
# Make sure backprop works if we give torch scalars
|
| 548 |
+
x = torch.tensor(0.2, requires_grad=True)
|
| 549 |
+
y = torch.tensor(0.3, requires_grad=True)
|
| 550 |
+
z = torch.tensor(0.4)
|
| 551 |
+
t = Translate(x, y, z)
|
| 552 |
+
t._matrix.sum().backward()
|
| 553 |
+
self.assertTrue(hasattr(x, "grad"))
|
| 554 |
+
self.assertTrue(hasattr(y, "grad"))
|
| 555 |
+
self.assertTrue(torch.allclose(x.grad, x.new_ones(x.shape)))
|
| 556 |
+
self.assertTrue(torch.allclose(y.grad, y.new_ones(y.shape)))
|
| 557 |
+
|
| 558 |
+
def test_torch_vectors(self):
|
| 559 |
+
x = torch.tensor([0.2, 2.0])
|
| 560 |
+
y = torch.tensor([0.3, 3.0])
|
| 561 |
+
z = torch.tensor([0.4, 4.0])
|
| 562 |
+
t = Translate(x, y, z)
|
| 563 |
+
matrix = torch.tensor(
|
| 564 |
+
[
|
| 565 |
+
[
|
| 566 |
+
[1.0, 0.0, 0.0, 0],
|
| 567 |
+
[0.0, 1.0, 0.0, 0],
|
| 568 |
+
[0.0, 0.0, 1.0, 0],
|
| 569 |
+
[0.2, 0.3, 0.4, 1],
|
| 570 |
+
],
|
| 571 |
+
[
|
| 572 |
+
[1.0, 0.0, 0.0, 0],
|
| 573 |
+
[0.0, 1.0, 0.0, 0],
|
| 574 |
+
[0.0, 0.0, 1.0, 0],
|
| 575 |
+
[2.0, 3.0, 4.0, 1],
|
| 576 |
+
],
|
| 577 |
+
],
|
| 578 |
+
dtype=torch.float32,
|
| 579 |
+
)
|
| 580 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 581 |
+
|
| 582 |
+
def test_vector_broadcast(self):
|
| 583 |
+
x = torch.tensor([0.2, 2.0])
|
| 584 |
+
y = torch.tensor([0.3, 3.0])
|
| 585 |
+
z = torch.tensor([0.4])
|
| 586 |
+
t = Translate(x, y, z)
|
| 587 |
+
matrix = torch.tensor(
|
| 588 |
+
[
|
| 589 |
+
[
|
| 590 |
+
[1.0, 0.0, 0.0, 0],
|
| 591 |
+
[0.0, 1.0, 0.0, 0],
|
| 592 |
+
[0.0, 0.0, 1.0, 0],
|
| 593 |
+
[0.2, 0.3, 0.4, 1],
|
| 594 |
+
],
|
| 595 |
+
[
|
| 596 |
+
[1.0, 0.0, 0.0, 0],
|
| 597 |
+
[0.0, 1.0, 0.0, 0],
|
| 598 |
+
[0.0, 0.0, 1.0, 0],
|
| 599 |
+
[2.0, 3.0, 0.4, 1],
|
| 600 |
+
],
|
| 601 |
+
],
|
| 602 |
+
dtype=torch.float32,
|
| 603 |
+
)
|
| 604 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 605 |
+
|
| 606 |
+
def test_bad_broadcast(self):
|
| 607 |
+
x = torch.tensor([0.2, 2.0, 20.0])
|
| 608 |
+
y = torch.tensor([0.3, 3.0])
|
| 609 |
+
z = torch.tensor([0.4])
|
| 610 |
+
with self.assertRaises(ValueError):
|
| 611 |
+
Translate(x, y, z)
|
| 612 |
+
|
| 613 |
+
def test_mixed_broadcast(self):
|
| 614 |
+
x = 0.2
|
| 615 |
+
y = torch.tensor(0.3)
|
| 616 |
+
z = torch.tensor([0.4, 4.0])
|
| 617 |
+
t = Translate(x, y, z)
|
| 618 |
+
matrix = torch.tensor(
|
| 619 |
+
[
|
| 620 |
+
[
|
| 621 |
+
[1.0, 0.0, 0.0, 0],
|
| 622 |
+
[0.0, 1.0, 0.0, 0],
|
| 623 |
+
[0.0, 0.0, 1.0, 0],
|
| 624 |
+
[0.2, 0.3, 0.4, 1],
|
| 625 |
+
],
|
| 626 |
+
[
|
| 627 |
+
[1.0, 0.0, 0.0, 0],
|
| 628 |
+
[0.0, 1.0, 0.0, 0],
|
| 629 |
+
[0.0, 0.0, 1.0, 0],
|
| 630 |
+
[0.2, 0.3, 4.0, 1],
|
| 631 |
+
],
|
| 632 |
+
],
|
| 633 |
+
dtype=torch.float32,
|
| 634 |
+
)
|
| 635 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 636 |
+
|
| 637 |
+
def test_mixed_broadcast_grad(self):
|
| 638 |
+
x = 0.2
|
| 639 |
+
y = torch.tensor(0.3, requires_grad=True)
|
| 640 |
+
z = torch.tensor([0.4, 4.0], requires_grad=True)
|
| 641 |
+
t = Translate(x, y, z)
|
| 642 |
+
t._matrix.sum().backward()
|
| 643 |
+
self.assertTrue(hasattr(y, "grad"))
|
| 644 |
+
self.assertTrue(hasattr(z, "grad"))
|
| 645 |
+
y_grad = torch.tensor(2.0)
|
| 646 |
+
z_grad = torch.tensor([1.0, 1.0])
|
| 647 |
+
self.assertEqual(y.grad.shape, y_grad.shape)
|
| 648 |
+
self.assertEqual(z.grad.shape, z_grad.shape)
|
| 649 |
+
self.assertTrue(torch.allclose(y.grad, y_grad))
|
| 650 |
+
self.assertTrue(torch.allclose(z.grad, z_grad))
|
| 651 |
+
|
| 652 |
+
def test_matrix(self):
|
| 653 |
+
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
|
| 654 |
+
t = Translate(xyz)
|
| 655 |
+
matrix = torch.tensor(
|
| 656 |
+
[
|
| 657 |
+
[
|
| 658 |
+
[1.0, 0.0, 0.0, 0],
|
| 659 |
+
[0.0, 1.0, 0.0, 0],
|
| 660 |
+
[0.0, 0.0, 1.0, 0],
|
| 661 |
+
[0.2, 0.3, 0.4, 1],
|
| 662 |
+
],
|
| 663 |
+
[
|
| 664 |
+
[1.0, 0.0, 0.0, 0],
|
| 665 |
+
[0.0, 1.0, 0.0, 0],
|
| 666 |
+
[0.0, 0.0, 1.0, 0],
|
| 667 |
+
[2.0, 3.0, 4.0, 1],
|
| 668 |
+
],
|
| 669 |
+
],
|
| 670 |
+
dtype=torch.float32,
|
| 671 |
+
)
|
| 672 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 673 |
+
|
| 674 |
+
def test_matrix_extra_args(self):
|
| 675 |
+
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
|
| 676 |
+
with self.assertRaises(ValueError):
|
| 677 |
+
Translate(xyz, xyz[:, 1], xyz[:, 2])
|
| 678 |
+
|
| 679 |
+
def test_inverse(self):
|
| 680 |
+
xyz = torch.tensor([[0.2, 0.3, 0.4], [2.0, 3.0, 4.0]])
|
| 681 |
+
t = Translate(xyz)
|
| 682 |
+
im = t.inverse()._matrix
|
| 683 |
+
im_2 = t._matrix.inverse()
|
| 684 |
+
im_comp = t.get_matrix().inverse()
|
| 685 |
+
self.assertTrue(torch.allclose(im, im_comp))
|
| 686 |
+
self.assertTrue(torch.allclose(im, im_2))
|
| 687 |
+
|
| 688 |
+
def test_get_item(self, batch_size=5):
|
| 689 |
+
device = torch.device("cuda:0")
|
| 690 |
+
xyz = torch.randn(size=[batch_size, 3], device=device, dtype=torch.float32)
|
| 691 |
+
t3d = Translate(xyz)
|
| 692 |
+
index = 1
|
| 693 |
+
t3d_selected = t3d[index]
|
| 694 |
+
self.assertEqual(len(t3d_selected), 1)
|
| 695 |
+
self.assertIsInstance(t3d_selected, Translate)
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
class TestScale(unittest.TestCase):
|
| 699 |
+
def test_single_python_scalar(self):
|
| 700 |
+
t = Scale(0.1)
|
| 701 |
+
matrix = torch.tensor(
|
| 702 |
+
[
|
| 703 |
+
[
|
| 704 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 705 |
+
[0.0, 0.1, 0.0, 0.0],
|
| 706 |
+
[0.0, 0.0, 0.1, 0.0],
|
| 707 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 708 |
+
]
|
| 709 |
+
],
|
| 710 |
+
dtype=torch.float32,
|
| 711 |
+
)
|
| 712 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 713 |
+
|
| 714 |
+
def test_single_torch_scalar(self):
|
| 715 |
+
t = Scale(torch.tensor(0.1))
|
| 716 |
+
matrix = torch.tensor(
|
| 717 |
+
[
|
| 718 |
+
[
|
| 719 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 720 |
+
[0.0, 0.1, 0.0, 0.0],
|
| 721 |
+
[0.0, 0.0, 0.1, 0.0],
|
| 722 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 723 |
+
]
|
| 724 |
+
],
|
| 725 |
+
dtype=torch.float32,
|
| 726 |
+
)
|
| 727 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 728 |
+
|
| 729 |
+
def test_single_vector(self):
|
| 730 |
+
t = Scale(torch.tensor([0.1, 0.2]))
|
| 731 |
+
matrix = torch.tensor(
|
| 732 |
+
[
|
| 733 |
+
[
|
| 734 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 735 |
+
[0.0, 0.1, 0.0, 0.0],
|
| 736 |
+
[0.0, 0.0, 0.1, 0.0],
|
| 737 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 738 |
+
],
|
| 739 |
+
[
|
| 740 |
+
[0.2, 0.0, 0.0, 0.0],
|
| 741 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 742 |
+
[0.0, 0.0, 0.2, 0.0],
|
| 743 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 744 |
+
],
|
| 745 |
+
],
|
| 746 |
+
dtype=torch.float32,
|
| 747 |
+
)
|
| 748 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 749 |
+
|
| 750 |
+
def test_single_matrix(self):
|
| 751 |
+
xyz = torch.tensor([[0.1, 0.2, 0.3], [1.0, 2.0, 3.0]])
|
| 752 |
+
t = Scale(xyz)
|
| 753 |
+
matrix = torch.tensor(
|
| 754 |
+
[
|
| 755 |
+
[
|
| 756 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 757 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 758 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 759 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 760 |
+
],
|
| 761 |
+
[
|
| 762 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 763 |
+
[0.0, 2.0, 0.0, 0.0],
|
| 764 |
+
[0.0, 0.0, 3.0, 0.0],
|
| 765 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 766 |
+
],
|
| 767 |
+
],
|
| 768 |
+
dtype=torch.float32,
|
| 769 |
+
)
|
| 770 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 771 |
+
|
| 772 |
+
def test_three_python_scalar(self):
|
| 773 |
+
t = Scale(0.1, 0.2, 0.3)
|
| 774 |
+
matrix = torch.tensor(
|
| 775 |
+
[
|
| 776 |
+
[
|
| 777 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 778 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 779 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 780 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 781 |
+
]
|
| 782 |
+
],
|
| 783 |
+
dtype=torch.float32,
|
| 784 |
+
)
|
| 785 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 786 |
+
|
| 787 |
+
def test_three_torch_scalar(self):
|
| 788 |
+
t = Scale(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
|
| 789 |
+
matrix = torch.tensor(
|
| 790 |
+
[
|
| 791 |
+
[
|
| 792 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 793 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 794 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 795 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 796 |
+
]
|
| 797 |
+
],
|
| 798 |
+
dtype=torch.float32,
|
| 799 |
+
)
|
| 800 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 801 |
+
|
| 802 |
+
def test_three_mixed_scalar(self):
|
| 803 |
+
t = Scale(torch.tensor(0.1), 0.2, torch.tensor(0.3))
|
| 804 |
+
matrix = torch.tensor(
|
| 805 |
+
[
|
| 806 |
+
[
|
| 807 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 808 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 809 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 810 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 811 |
+
]
|
| 812 |
+
],
|
| 813 |
+
dtype=torch.float32,
|
| 814 |
+
)
|
| 815 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 816 |
+
|
| 817 |
+
def test_three_vector_broadcast(self):
|
| 818 |
+
x = torch.tensor([0.1])
|
| 819 |
+
y = torch.tensor([0.2, 2.0])
|
| 820 |
+
z = torch.tensor([0.3, 3.0])
|
| 821 |
+
t = Scale(x, y, z)
|
| 822 |
+
matrix = torch.tensor(
|
| 823 |
+
[
|
| 824 |
+
[
|
| 825 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 826 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 827 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 828 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 829 |
+
],
|
| 830 |
+
[
|
| 831 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 832 |
+
[0.0, 2.0, 0.0, 0.0],
|
| 833 |
+
[0.0, 0.0, 3.0, 0.0],
|
| 834 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 835 |
+
],
|
| 836 |
+
],
|
| 837 |
+
dtype=torch.float32,
|
| 838 |
+
)
|
| 839 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 840 |
+
|
| 841 |
+
def test_three_mixed_broadcast_grad(self):
|
| 842 |
+
x = 0.1
|
| 843 |
+
y = torch.tensor(0.2, requires_grad=True)
|
| 844 |
+
z = torch.tensor([0.3, 3.0], requires_grad=True)
|
| 845 |
+
t = Scale(x, y, z)
|
| 846 |
+
matrix = torch.tensor(
|
| 847 |
+
[
|
| 848 |
+
[
|
| 849 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 850 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 851 |
+
[0.0, 0.0, 0.3, 0.0],
|
| 852 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 853 |
+
],
|
| 854 |
+
[
|
| 855 |
+
[0.1, 0.0, 0.0, 0.0],
|
| 856 |
+
[0.0, 0.2, 0.0, 0.0],
|
| 857 |
+
[0.0, 0.0, 3.0, 0.0],
|
| 858 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 859 |
+
],
|
| 860 |
+
],
|
| 861 |
+
dtype=torch.float32,
|
| 862 |
+
)
|
| 863 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 864 |
+
t._matrix.sum().backward()
|
| 865 |
+
self.assertTrue(hasattr(y, "grad"))
|
| 866 |
+
self.assertTrue(hasattr(z, "grad"))
|
| 867 |
+
y_grad = torch.tensor(2.0)
|
| 868 |
+
z_grad = torch.tensor([1.0, 1.0])
|
| 869 |
+
self.assertTrue(torch.allclose(y.grad, y_grad))
|
| 870 |
+
self.assertTrue(torch.allclose(z.grad, z_grad))
|
| 871 |
+
|
| 872 |
+
def test_inverse(self):
|
| 873 |
+
x = torch.tensor([0.1])
|
| 874 |
+
y = torch.tensor([0.2, 2.0])
|
| 875 |
+
z = torch.tensor([0.3, 3.0])
|
| 876 |
+
t = Scale(x, y, z)
|
| 877 |
+
im = t.inverse()._matrix
|
| 878 |
+
im_2 = t._matrix.inverse()
|
| 879 |
+
im_comp = t.get_matrix().inverse()
|
| 880 |
+
self.assertTrue(torch.allclose(im, im_comp))
|
| 881 |
+
self.assertTrue(torch.allclose(im, im_2))
|
| 882 |
+
|
| 883 |
+
def test_get_item(self, batch_size=5):
|
| 884 |
+
device = torch.device("cuda:0")
|
| 885 |
+
s = torch.randn(size=[batch_size, 3], device=device, dtype=torch.float32)
|
| 886 |
+
t3d = Scale(s)
|
| 887 |
+
index = 1
|
| 888 |
+
t3d_selected = t3d[index]
|
| 889 |
+
self.assertEqual(len(t3d_selected), 1)
|
| 890 |
+
self.assertIsInstance(t3d_selected, Scale)
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
class TestTransformBroadcast(unittest.TestCase):
|
| 894 |
+
def test_broadcast_transform_points(self):
|
| 895 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 896 |
+
N = 10
|
| 897 |
+
P = 7
|
| 898 |
+
M = 20
|
| 899 |
+
x = torch.tensor([0.2] * N)
|
| 900 |
+
y = torch.tensor([0.3] * N)
|
| 901 |
+
z = torch.tensor([0.4] * N)
|
| 902 |
+
tN = Translate(x, y, z)
|
| 903 |
+
p1 = t1.transform_points(torch.randn(P, 3))
|
| 904 |
+
self.assertTrue(p1.shape == (P, 3))
|
| 905 |
+
p2 = t1.transform_points(torch.randn(1, P, 3))
|
| 906 |
+
self.assertTrue(p2.shape == (1, P, 3))
|
| 907 |
+
p3 = t1.transform_points(torch.randn(M, P, 3))
|
| 908 |
+
self.assertTrue(p3.shape == (M, P, 3))
|
| 909 |
+
p4 = tN.transform_points(torch.randn(P, 3))
|
| 910 |
+
self.assertTrue(p4.shape == (N, P, 3))
|
| 911 |
+
p5 = tN.transform_points(torch.randn(1, P, 3))
|
| 912 |
+
self.assertTrue(p5.shape == (N, P, 3))
|
| 913 |
+
|
| 914 |
+
def test_broadcast_transform_normals(self):
|
| 915 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 916 |
+
N = 10
|
| 917 |
+
P = 7
|
| 918 |
+
M = 20
|
| 919 |
+
x = torch.tensor([0.2] * N)
|
| 920 |
+
y = torch.tensor([0.3] * N)
|
| 921 |
+
z = torch.tensor([0.4] * N)
|
| 922 |
+
tN = Translate(x, y, z)
|
| 923 |
+
p1 = t1.transform_normals(torch.randn(P, 3))
|
| 924 |
+
self.assertTrue(p1.shape == (P, 3))
|
| 925 |
+
p2 = t1.transform_normals(torch.randn(1, P, 3))
|
| 926 |
+
self.assertTrue(p2.shape == (1, P, 3))
|
| 927 |
+
p3 = t1.transform_normals(torch.randn(M, P, 3))
|
| 928 |
+
self.assertTrue(p3.shape == (M, P, 3))
|
| 929 |
+
p4 = tN.transform_normals(torch.randn(P, 3))
|
| 930 |
+
self.assertTrue(p4.shape == (N, P, 3))
|
| 931 |
+
p5 = tN.transform_normals(torch.randn(1, P, 3))
|
| 932 |
+
self.assertTrue(p5.shape == (N, P, 3))
|
| 933 |
+
|
| 934 |
+
def test_broadcast_compose(self):
|
| 935 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 936 |
+
N = 10
|
| 937 |
+
scale_n = torch.tensor([0.3] * N)
|
| 938 |
+
tN = Scale(scale_n)
|
| 939 |
+
t1N = t1.compose(tN)
|
| 940 |
+
self.assertTrue(t1._matrix.shape == (1, 4, 4))
|
| 941 |
+
self.assertTrue(tN._matrix.shape == (N, 4, 4))
|
| 942 |
+
self.assertTrue(t1N.get_matrix().shape == (N, 4, 4))
|
| 943 |
+
t11 = t1.compose(t1)
|
| 944 |
+
self.assertTrue(t11.get_matrix().shape == (1, 4, 4))
|
| 945 |
+
|
| 946 |
+
def test_broadcast_compose_fail(self):
|
| 947 |
+
# Cannot compose two transforms which have batch dimensions N and M
|
| 948 |
+
# other than the case where either N or M is 1
|
| 949 |
+
N = 10
|
| 950 |
+
M = 20
|
| 951 |
+
scale_n = torch.tensor([0.3] * N)
|
| 952 |
+
tN = Scale(scale_n)
|
| 953 |
+
x = torch.tensor([0.2] * M)
|
| 954 |
+
y = torch.tensor([0.3] * M)
|
| 955 |
+
z = torch.tensor([0.4] * M)
|
| 956 |
+
tM = Translate(x, y, z)
|
| 957 |
+
t = tN.compose(tM)
|
| 958 |
+
with self.assertRaises(ValueError):
|
| 959 |
+
t.get_matrix()
|
| 960 |
+
|
| 961 |
+
def test_multiple_broadcast_compose(self):
|
| 962 |
+
t1 = Scale(0.1, 0.1, 0.1)
|
| 963 |
+
t2 = Scale(0.2, 0.2, 0.2)
|
| 964 |
+
N = 10
|
| 965 |
+
scale_n = torch.tensor([0.3] * N)
|
| 966 |
+
tN = Scale(scale_n)
|
| 967 |
+
t1N2 = t1.compose(tN.compose(t2))
|
| 968 |
+
composed_mat = t1N2.get_matrix()
|
| 969 |
+
self.assertTrue(composed_mat.shape == (N, 4, 4))
|
| 970 |
+
expected_mat = torch.eye(3, dtype=torch.float32) * 0.3 * 0.2 * 0.1
|
| 971 |
+
self.assertTrue(torch.allclose(composed_mat[0, :3, :3], expected_mat))
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
class TestRotate(unittest.TestCase):
|
| 975 |
+
def test_single_matrix(self):
|
| 976 |
+
R = torch.eye(3)
|
| 977 |
+
t = Rotate(R)
|
| 978 |
+
matrix = torch.tensor(
|
| 979 |
+
[
|
| 980 |
+
[
|
| 981 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 982 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 983 |
+
[0.0, 0.0, 1.0, 0.0],
|
| 984 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 985 |
+
]
|
| 986 |
+
],
|
| 987 |
+
dtype=torch.float32,
|
| 988 |
+
)
|
| 989 |
+
self.assertTrue(torch.allclose(t._matrix, matrix))
|
| 990 |
+
|
| 991 |
+
def test_invalid_dimensions(self):
|
| 992 |
+
R = torch.eye(4)
|
| 993 |
+
with self.assertRaises(ValueError):
|
| 994 |
+
Rotate(R)
|
| 995 |
+
|
| 996 |
+
def test_inverse(self, batch_size=5):
|
| 997 |
+
device = torch.device("cuda:0")
|
| 998 |
+
log_rot = torch.randn((batch_size, 3), dtype=torch.float32, device=device)
|
| 999 |
+
R = so3_exp_map(log_rot)
|
| 1000 |
+
t = Rotate(R)
|
| 1001 |
+
im = t.inverse()._matrix
|
| 1002 |
+
im_2 = t._matrix.inverse()
|
| 1003 |
+
im_comp = t.get_matrix().inverse()
|
| 1004 |
+
self.assertTrue(torch.allclose(im, im_comp, atol=1e-4))
|
| 1005 |
+
self.assertTrue(torch.allclose(im, im_2, atol=1e-4))
|
| 1006 |
+
|
| 1007 |
+
def test_get_item(self, batch_size=5):
|
| 1008 |
+
device = torch.device("cuda:0")
|
| 1009 |
+
r = random_rotations(batch_size, dtype=torch.float32, device=device)
|
| 1010 |
+
t3d = Rotate(r)
|
| 1011 |
+
index = 1
|
| 1012 |
+
t3d_selected = t3d[index]
|
| 1013 |
+
self.assertEqual(len(t3d_selected), 1)
|
| 1014 |
+
self.assertIsInstance(t3d_selected, Rotate)
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
class TestRotateAxisAngle(unittest.TestCase):
|
| 1018 |
+
def test_rotate_x_python_scalar(self):
|
| 1019 |
+
t = RotateAxisAngle(angle=90, axis="X")
|
| 1020 |
+
# fmt: off
|
| 1021 |
+
matrix = torch.tensor(
|
| 1022 |
+
[
|
| 1023 |
+
[
|
| 1024 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1025 |
+
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1026 |
+
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1027 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1028 |
+
]
|
| 1029 |
+
],
|
| 1030 |
+
dtype=torch.float32,
|
| 1031 |
+
)
|
| 1032 |
+
# fmt: on
|
| 1033 |
+
points = torch.tensor([0.0, 1.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1034 |
+
transformed_points = t.transform_points(points)
|
| 1035 |
+
expected_points = torch.tensor([0.0, 0.0, 1.0])
|
| 1036 |
+
self.assertTrue(
|
| 1037 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1038 |
+
)
|
| 1039 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1040 |
+
|
| 1041 |
+
def test_rotate_x_torch_scalar(self):
|
| 1042 |
+
angle = torch.tensor(90.0)
|
| 1043 |
+
t = RotateAxisAngle(angle=angle, axis="X")
|
| 1044 |
+
# fmt: off
|
| 1045 |
+
matrix = torch.tensor(
|
| 1046 |
+
[
|
| 1047 |
+
[
|
| 1048 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1049 |
+
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1050 |
+
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1051 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1052 |
+
]
|
| 1053 |
+
],
|
| 1054 |
+
dtype=torch.float32,
|
| 1055 |
+
)
|
| 1056 |
+
# fmt: on
|
| 1057 |
+
points = torch.tensor([0.0, 1.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1058 |
+
transformed_points = t.transform_points(points)
|
| 1059 |
+
expected_points = torch.tensor([0.0, 0.0, 1.0])
|
| 1060 |
+
self.assertTrue(
|
| 1061 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1062 |
+
)
|
| 1063 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1064 |
+
|
| 1065 |
+
def test_rotate_x_torch_tensor(self):
|
| 1066 |
+
angle = torch.tensor([0, 45.0, 90.0]) # (N)
|
| 1067 |
+
t = RotateAxisAngle(angle=angle, axis="X")
|
| 1068 |
+
r2_i = 1 / math.sqrt(2)
|
| 1069 |
+
r2_2 = math.sqrt(2) / 2
|
| 1070 |
+
# fmt: off
|
| 1071 |
+
matrix = torch.tensor(
|
| 1072 |
+
[
|
| 1073 |
+
[
|
| 1074 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 1075 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 1076 |
+
[0.0, 0.0, 1.0, 0.0],
|
| 1077 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 1078 |
+
],
|
| 1079 |
+
[
|
| 1080 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1081 |
+
[0.0, r2_2, r2_i, 0.0], # noqa: E241, E201
|
| 1082 |
+
[0.0, -r2_i, r2_2, 0.0], # noqa: E241, E201
|
| 1083 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1084 |
+
],
|
| 1085 |
+
[
|
| 1086 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1087 |
+
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1088 |
+
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1089 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1090 |
+
]
|
| 1091 |
+
],
|
| 1092 |
+
dtype=torch.float32,
|
| 1093 |
+
)
|
| 1094 |
+
# fmt: on
|
| 1095 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1096 |
+
angle = angle
|
| 1097 |
+
t = RotateAxisAngle(angle=angle, axis="X")
|
| 1098 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1099 |
+
|
| 1100 |
+
def test_rotate_y_python_scalar(self):
|
| 1101 |
+
t = RotateAxisAngle(angle=90, axis="Y")
|
| 1102 |
+
# fmt: off
|
| 1103 |
+
matrix = torch.tensor(
|
| 1104 |
+
[
|
| 1105 |
+
[
|
| 1106 |
+
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
|
| 1107 |
+
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1108 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1109 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1110 |
+
]
|
| 1111 |
+
],
|
| 1112 |
+
dtype=torch.float32,
|
| 1113 |
+
)
|
| 1114 |
+
# fmt: on
|
| 1115 |
+
points = torch.tensor([1.0, 0.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1116 |
+
transformed_points = t.transform_points(points)
|
| 1117 |
+
expected_points = torch.tensor([0.0, 0.0, -1.0])
|
| 1118 |
+
self.assertTrue(
|
| 1119 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1120 |
+
)
|
| 1121 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1122 |
+
|
| 1123 |
+
def test_rotate_y_torch_scalar(self):
|
| 1124 |
+
"""
|
| 1125 |
+
Test rotation about Y axis. With a right hand coordinate system this
|
| 1126 |
+
should result in a vector pointing along the x-axis being rotated to
|
| 1127 |
+
point along the negative z axis.
|
| 1128 |
+
"""
|
| 1129 |
+
angle = torch.tensor(90.0)
|
| 1130 |
+
t = RotateAxisAngle(angle=angle, axis="Y")
|
| 1131 |
+
# fmt: off
|
| 1132 |
+
matrix = torch.tensor(
|
| 1133 |
+
[
|
| 1134 |
+
[
|
| 1135 |
+
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
|
| 1136 |
+
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1137 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1138 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1139 |
+
]
|
| 1140 |
+
],
|
| 1141 |
+
dtype=torch.float32,
|
| 1142 |
+
)
|
| 1143 |
+
# fmt: on
|
| 1144 |
+
points = torch.tensor([1.0, 0.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1145 |
+
transformed_points = t.transform_points(points)
|
| 1146 |
+
expected_points = torch.tensor([0.0, 0.0, -1.0])
|
| 1147 |
+
self.assertTrue(
|
| 1148 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1149 |
+
)
|
| 1150 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1151 |
+
|
| 1152 |
+
def test_rotate_y_torch_tensor(self):
|
| 1153 |
+
angle = torch.tensor([0, 45.0, 90.0])
|
| 1154 |
+
t = RotateAxisAngle(angle=angle, axis="Y")
|
| 1155 |
+
r2_i = 1 / math.sqrt(2)
|
| 1156 |
+
r2_2 = math.sqrt(2) / 2
|
| 1157 |
+
# fmt: off
|
| 1158 |
+
matrix = torch.tensor(
|
| 1159 |
+
[
|
| 1160 |
+
[
|
| 1161 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 1162 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 1163 |
+
[0.0, 0.0, 1.0, 0.0],
|
| 1164 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 1165 |
+
],
|
| 1166 |
+
[
|
| 1167 |
+
[r2_2, 0.0, -r2_i, 0.0], # noqa: E241, E201
|
| 1168 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1169 |
+
[r2_i, 0.0, r2_2, 0.0], # noqa: E241, E201
|
| 1170 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1171 |
+
],
|
| 1172 |
+
[
|
| 1173 |
+
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
|
| 1174 |
+
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1175 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1176 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1177 |
+
]
|
| 1178 |
+
],
|
| 1179 |
+
dtype=torch.float32,
|
| 1180 |
+
)
|
| 1181 |
+
# fmt: on
|
| 1182 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1183 |
+
|
| 1184 |
+
def test_rotate_z_python_scalar(self):
|
| 1185 |
+
t = RotateAxisAngle(angle=90, axis="Z")
|
| 1186 |
+
# fmt: off
|
| 1187 |
+
matrix = torch.tensor(
|
| 1188 |
+
[
|
| 1189 |
+
[
|
| 1190 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1191 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1192 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1193 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1194 |
+
]
|
| 1195 |
+
],
|
| 1196 |
+
dtype=torch.float32,
|
| 1197 |
+
)
|
| 1198 |
+
# fmt: on
|
| 1199 |
+
points = torch.tensor([1.0, 0.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1200 |
+
transformed_points = t.transform_points(points)
|
| 1201 |
+
expected_points = torch.tensor([0.0, 1.0, 0.0])
|
| 1202 |
+
self.assertTrue(
|
| 1203 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1204 |
+
)
|
| 1205 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1206 |
+
|
| 1207 |
+
def test_rotate_z_torch_scalar(self):
|
| 1208 |
+
angle = torch.tensor(90.0)
|
| 1209 |
+
t = RotateAxisAngle(angle=angle, axis="Z")
|
| 1210 |
+
# fmt: off
|
| 1211 |
+
matrix = torch.tensor(
|
| 1212 |
+
[
|
| 1213 |
+
[
|
| 1214 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1215 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1216 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1217 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1218 |
+
]
|
| 1219 |
+
],
|
| 1220 |
+
dtype=torch.float32,
|
| 1221 |
+
)
|
| 1222 |
+
# fmt: on
|
| 1223 |
+
points = torch.tensor([1.0, 0.0, 0.0])[None, None, :] # (1, 1, 3)
|
| 1224 |
+
transformed_points = t.transform_points(points)
|
| 1225 |
+
expected_points = torch.tensor([0.0, 1.0, 0.0])
|
| 1226 |
+
self.assertTrue(
|
| 1227 |
+
torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7)
|
| 1228 |
+
)
|
| 1229 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1230 |
+
|
| 1231 |
+
def test_rotate_z_torch_tensor(self):
|
| 1232 |
+
angle = torch.tensor([0, 45.0, 90.0])
|
| 1233 |
+
t = RotateAxisAngle(angle=angle, axis="Z")
|
| 1234 |
+
r2_i = 1 / math.sqrt(2)
|
| 1235 |
+
r2_2 = math.sqrt(2) / 2
|
| 1236 |
+
# fmt: off
|
| 1237 |
+
matrix = torch.tensor(
|
| 1238 |
+
[
|
| 1239 |
+
[
|
| 1240 |
+
[1.0, 0.0, 0.0, 0.0],
|
| 1241 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 1242 |
+
[0.0, 0.0, 1.0, 0.0],
|
| 1243 |
+
[0.0, 0.0, 0.0, 1.0],
|
| 1244 |
+
],
|
| 1245 |
+
[
|
| 1246 |
+
[ r2_2, r2_i, 0.0, 0.0], # noqa: E241, E201
|
| 1247 |
+
[-r2_i, r2_2, 0.0, 0.0], # noqa: E241, E201
|
| 1248 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1249 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1250 |
+
],
|
| 1251 |
+
[
|
| 1252 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1253 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1254 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1255 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1256 |
+
]
|
| 1257 |
+
],
|
| 1258 |
+
dtype=torch.float32,
|
| 1259 |
+
)
|
| 1260 |
+
# fmt: on
|
| 1261 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1262 |
+
|
| 1263 |
+
def test_rotate_compose_x_y_z(self):
|
| 1264 |
+
angle = torch.tensor(90.0)
|
| 1265 |
+
t1 = RotateAxisAngle(angle=angle, axis="X")
|
| 1266 |
+
t2 = RotateAxisAngle(angle=angle, axis="Y")
|
| 1267 |
+
t3 = RotateAxisAngle(angle=angle, axis="Z")
|
| 1268 |
+
t = t1.compose(t2, t3)
|
| 1269 |
+
# fmt: off
|
| 1270 |
+
matrix1 = torch.tensor(
|
| 1271 |
+
[
|
| 1272 |
+
[
|
| 1273 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1274 |
+
[0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1275 |
+
[0.0, -1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1276 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1277 |
+
]
|
| 1278 |
+
],
|
| 1279 |
+
dtype=torch.float32,
|
| 1280 |
+
)
|
| 1281 |
+
matrix2 = torch.tensor(
|
| 1282 |
+
[
|
| 1283 |
+
[
|
| 1284 |
+
[0.0, 0.0, -1.0, 0.0], # noqa: E241, E201
|
| 1285 |
+
[0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1286 |
+
[1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1287 |
+
[0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1288 |
+
]
|
| 1289 |
+
],
|
| 1290 |
+
dtype=torch.float32,
|
| 1291 |
+
)
|
| 1292 |
+
matrix3 = torch.tensor(
|
| 1293 |
+
[
|
| 1294 |
+
[
|
| 1295 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1296 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1297 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1298 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1299 |
+
]
|
| 1300 |
+
],
|
| 1301 |
+
dtype=torch.float32,
|
| 1302 |
+
)
|
| 1303 |
+
# fmt: on
|
| 1304 |
+
# order of transforms is t1 -> t2
|
| 1305 |
+
matrix = torch.matmul(matrix1, torch.matmul(matrix2, matrix3))
|
| 1306 |
+
composed_matrix = t.get_matrix()
|
| 1307 |
+
self.assertTrue(torch.allclose(composed_matrix, matrix, atol=1e-7))
|
| 1308 |
+
|
| 1309 |
+
def test_rotate_angle_radians(self):
|
| 1310 |
+
t = RotateAxisAngle(angle=math.pi / 2, degrees=False, axis="Z")
|
| 1311 |
+
# fmt: off
|
| 1312 |
+
matrix = torch.tensor(
|
| 1313 |
+
[
|
| 1314 |
+
[
|
| 1315 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1316 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1317 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1318 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1319 |
+
]
|
| 1320 |
+
],
|
| 1321 |
+
dtype=torch.float32,
|
| 1322 |
+
)
|
| 1323 |
+
# fmt: on
|
| 1324 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1325 |
+
|
| 1326 |
+
def test_lower_case_axis(self):
|
| 1327 |
+
t = RotateAxisAngle(angle=90.0, axis="z")
|
| 1328 |
+
# fmt: off
|
| 1329 |
+
matrix = torch.tensor(
|
| 1330 |
+
[
|
| 1331 |
+
[
|
| 1332 |
+
[ 0.0, 1.0, 0.0, 0.0], # noqa: E241, E201
|
| 1333 |
+
[-1.0, 0.0, 0.0, 0.0], # noqa: E241, E201
|
| 1334 |
+
[ 0.0, 0.0, 1.0, 0.0], # noqa: E241, E201
|
| 1335 |
+
[ 0.0, 0.0, 0.0, 1.0], # noqa: E241, E201
|
| 1336 |
+
]
|
| 1337 |
+
],
|
| 1338 |
+
dtype=torch.float32,
|
| 1339 |
+
)
|
| 1340 |
+
# fmt: on
|
| 1341 |
+
self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7))
|
| 1342 |
+
|
| 1343 |
+
def test_axis_fail(self):
|
| 1344 |
+
with self.assertRaises(ValueError):
|
| 1345 |
+
RotateAxisAngle(angle=90.0, axis="P")
|
| 1346 |
+
|
| 1347 |
+
def test_rotate_angle_fail(self):
|
| 1348 |
+
angle = torch.tensor([[0, 45.0, 90.0], [0, 45.0, 90.0]])
|
| 1349 |
+
with self.assertRaises(ValueError):
|
| 1350 |
+
RotateAxisAngle(angle=angle, axis="X")
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_vert_align.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from pytorch3d.ops.vert_align import vert_align
|
| 13 |
+
from pytorch3d.structures.meshes import Meshes
|
| 14 |
+
from pytorch3d.structures.pointclouds import Pointclouds
|
| 15 |
+
|
| 16 |
+
from .common_testing import TestCaseMixin
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestVertAlign(TestCaseMixin, unittest.TestCase):
|
| 20 |
+
@staticmethod
|
| 21 |
+
def vert_align_naive(
|
| 22 |
+
feats, verts, return_packed: bool = False, align_corners: bool = True
|
| 23 |
+
):
|
| 24 |
+
"""
|
| 25 |
+
Naive implementation of vert_align.
|
| 26 |
+
"""
|
| 27 |
+
if torch.is_tensor(feats):
|
| 28 |
+
feats = [feats]
|
| 29 |
+
N = feats[0].shape[0]
|
| 30 |
+
|
| 31 |
+
out_feats = []
|
| 32 |
+
# sample every example in the batch separately
|
| 33 |
+
for i in range(N):
|
| 34 |
+
out_i_feats = []
|
| 35 |
+
for feat in feats:
|
| 36 |
+
feats_i = feat[i][None, :, :, :] # (1, C, H, W)
|
| 37 |
+
if torch.is_tensor(verts):
|
| 38 |
+
grid = verts[i][None, None, :, :2] # (1, 1, V, 2)
|
| 39 |
+
elif hasattr(verts, "verts_list"):
|
| 40 |
+
grid = verts.verts_list()[i][None, None, :, :2] # (1, 1, V, 2)
|
| 41 |
+
elif hasattr(verts, "points_list"):
|
| 42 |
+
grid = verts.points_list()[i][None, None, :, :2] # (1, 1, V, 2)
|
| 43 |
+
else:
|
| 44 |
+
raise ValueError("verts_or_meshes is invalid")
|
| 45 |
+
feat_sampled_i = F.grid_sample(
|
| 46 |
+
feats_i,
|
| 47 |
+
grid,
|
| 48 |
+
mode="bilinear",
|
| 49 |
+
padding_mode="zeros",
|
| 50 |
+
align_corners=align_corners,
|
| 51 |
+
) # (1, C, 1, V)
|
| 52 |
+
feat_sampled_i = feat_sampled_i.squeeze(2).squeeze(0) # (C, V)
|
| 53 |
+
feat_sampled_i = feat_sampled_i.transpose(1, 0) # (V, C)
|
| 54 |
+
out_i_feats.append(feat_sampled_i)
|
| 55 |
+
out_i_feats = torch.cat(out_i_feats, 1) # (V, sum(C))
|
| 56 |
+
out_feats.append(out_i_feats)
|
| 57 |
+
|
| 58 |
+
if return_packed:
|
| 59 |
+
out_feats = torch.cat(out_feats, 0) # (sum(V), sum(C))
|
| 60 |
+
else:
|
| 61 |
+
out_feats = torch.stack(out_feats, 0) # (N, V, sum(C))
|
| 62 |
+
return out_feats
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def init_meshes(
|
| 66 |
+
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
|
| 67 |
+
) -> Meshes:
|
| 68 |
+
device = torch.device("cuda:0")
|
| 69 |
+
verts_list = []
|
| 70 |
+
faces_list = []
|
| 71 |
+
for _ in range(num_meshes):
|
| 72 |
+
verts = (
|
| 73 |
+
torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0
|
| 74 |
+
- 1.0
|
| 75 |
+
) # verts in the space of [-1, 1]
|
| 76 |
+
faces = torch.randint(
|
| 77 |
+
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
|
| 78 |
+
)
|
| 79 |
+
verts_list.append(verts)
|
| 80 |
+
faces_list.append(faces)
|
| 81 |
+
meshes = Meshes(verts_list, faces_list)
|
| 82 |
+
|
| 83 |
+
return meshes
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
def init_pointclouds(num_clouds: int = 10, num_points: int = 1000) -> Pointclouds:
|
| 87 |
+
device = torch.device("cuda:0")
|
| 88 |
+
points_list = []
|
| 89 |
+
for _ in range(num_clouds):
|
| 90 |
+
points = (
|
| 91 |
+
torch.rand((num_points, 3), dtype=torch.float32, device=device) * 2.0
|
| 92 |
+
- 1.0
|
| 93 |
+
) # points in the space of [-1, 1]
|
| 94 |
+
points_list.append(points)
|
| 95 |
+
pointclouds = Pointclouds(points=points_list)
|
| 96 |
+
|
| 97 |
+
return pointclouds
|
| 98 |
+
|
| 99 |
+
@staticmethod
|
| 100 |
+
def init_feats(batch_size: int = 10, num_channels: int = 256, device: str = "cuda"):
|
| 101 |
+
H, W = [14, 28], [14, 28]
|
| 102 |
+
feats = []
|
| 103 |
+
for h, w in zip(H, W):
|
| 104 |
+
feats.append(torch.rand((batch_size, num_channels, h, w), device=device))
|
| 105 |
+
return feats
|
| 106 |
+
|
| 107 |
+
def test_vert_align_with_meshes(self):
|
| 108 |
+
"""
|
| 109 |
+
Test vert align vs naive implementation with meshes.
|
| 110 |
+
"""
|
| 111 |
+
meshes = TestVertAlign.init_meshes(10, 1000, 3000)
|
| 112 |
+
feats = TestVertAlign.init_feats(10, 256)
|
| 113 |
+
|
| 114 |
+
# feats in list
|
| 115 |
+
out = vert_align(feats, meshes, return_packed=True)
|
| 116 |
+
naive_out = TestVertAlign.vert_align_naive(feats, meshes, return_packed=True)
|
| 117 |
+
self.assertClose(out, naive_out)
|
| 118 |
+
|
| 119 |
+
# feats as tensor
|
| 120 |
+
out = vert_align(feats[0], meshes, return_packed=True)
|
| 121 |
+
naive_out = TestVertAlign.vert_align_naive(feats[0], meshes, return_packed=True)
|
| 122 |
+
self.assertClose(out, naive_out)
|
| 123 |
+
|
| 124 |
+
def test_vert_align_with_pointclouds(self):
|
| 125 |
+
"""
|
| 126 |
+
Test vert align vs naive implementation with meshes.
|
| 127 |
+
"""
|
| 128 |
+
pointclouds = TestVertAlign.init_pointclouds(10, 1000)
|
| 129 |
+
feats = TestVertAlign.init_feats(10, 256)
|
| 130 |
+
|
| 131 |
+
# feats in list
|
| 132 |
+
out = vert_align(feats, pointclouds, return_packed=True)
|
| 133 |
+
naive_out = TestVertAlign.vert_align_naive(
|
| 134 |
+
feats, pointclouds, return_packed=True
|
| 135 |
+
)
|
| 136 |
+
self.assertClose(out, naive_out)
|
| 137 |
+
|
| 138 |
+
# feats as tensor
|
| 139 |
+
out = vert_align(feats[0], pointclouds, return_packed=True)
|
| 140 |
+
naive_out = TestVertAlign.vert_align_naive(
|
| 141 |
+
feats[0], pointclouds, return_packed=True
|
| 142 |
+
)
|
| 143 |
+
self.assertClose(out, naive_out)
|
| 144 |
+
|
| 145 |
+
def test_vert_align_with_verts(self):
|
| 146 |
+
"""
|
| 147 |
+
Test vert align vs naive implementation with verts as tensor.
|
| 148 |
+
"""
|
| 149 |
+
feats = TestVertAlign.init_feats(10, 256)
|
| 150 |
+
verts = (
|
| 151 |
+
torch.rand((10, 100, 3), dtype=torch.float32, device=feats[0].device) * 2.0
|
| 152 |
+
- 1.0
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# feats in list
|
| 156 |
+
out = vert_align(feats, verts, return_packed=True)
|
| 157 |
+
naive_out = TestVertAlign.vert_align_naive(feats, verts, return_packed=True)
|
| 158 |
+
self.assertClose(out, naive_out)
|
| 159 |
+
|
| 160 |
+
# feats as tensor
|
| 161 |
+
out = vert_align(feats[0], verts, return_packed=True)
|
| 162 |
+
naive_out = TestVertAlign.vert_align_naive(feats[0], verts, return_packed=True)
|
| 163 |
+
self.assertClose(out, naive_out)
|
| 164 |
+
|
| 165 |
+
out2 = vert_align(feats[0], verts, return_packed=True, align_corners=False)
|
| 166 |
+
naive_out2 = TestVertAlign.vert_align_naive(
|
| 167 |
+
feats[0], verts, return_packed=True, align_corners=False
|
| 168 |
+
)
|
| 169 |
+
self.assertFalse(torch.allclose(out, out2))
|
| 170 |
+
self.assertTrue(torch.allclose(out2, naive_out2))
|
| 171 |
+
|
| 172 |
+
@staticmethod
|
| 173 |
+
def vert_align_with_init(
|
| 174 |
+
num_meshes: int, num_verts: int, num_faces: int, device: str = "cpu"
|
| 175 |
+
):
|
| 176 |
+
device = torch.device(device)
|
| 177 |
+
verts_list = []
|
| 178 |
+
faces_list = []
|
| 179 |
+
for _ in range(num_meshes):
|
| 180 |
+
verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
|
| 181 |
+
faces = torch.randint(
|
| 182 |
+
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
|
| 183 |
+
)
|
| 184 |
+
verts_list.append(verts)
|
| 185 |
+
faces_list.append(faces)
|
| 186 |
+
meshes = Meshes(verts_list, faces_list)
|
| 187 |
+
feats = TestVertAlign.init_feats(num_meshes, device=device)
|
| 188 |
+
torch.cuda.synchronize()
|
| 189 |
+
|
| 190 |
+
def sample_features():
|
| 191 |
+
vert_align(feats, meshes, return_packed=True)
|
| 192 |
+
torch.cuda.synchronize()
|
| 193 |
+
|
| 194 |
+
return sample_features
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_vis.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import unittest
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from pytorch3d.renderer import HeterogeneousRayBundle, PerspectiveCameras, RayBundle
|
| 11 |
+
from pytorch3d.structures import Meshes, Pointclouds
|
| 12 |
+
from pytorch3d.transforms import random_rotations
|
| 13 |
+
|
| 14 |
+
# Some of these imports are only needed for testing code coverage
|
| 15 |
+
from pytorch3d.vis import ( # noqa: F401
|
| 16 |
+
get_camera_wireframe, # noqa: F401
|
| 17 |
+
plot_batch_individually, # noqa: F401
|
| 18 |
+
plot_scene,
|
| 19 |
+
texturesuv_image_PIL, # noqa: F401
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TestPlotlyVis(unittest.TestCase):
|
| 24 |
+
def test_plot_scene(
|
| 25 |
+
self,
|
| 26 |
+
B: int = 3,
|
| 27 |
+
n_rays: int = 128,
|
| 28 |
+
n_pts_per_ray: int = 32,
|
| 29 |
+
n_verts: int = 32,
|
| 30 |
+
n_edges: int = 64,
|
| 31 |
+
n_pts: int = 256,
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Tests plotting of all supported structures using plot_scene.
|
| 35 |
+
"""
|
| 36 |
+
for device in ["cpu", "cuda:0"]:
|
| 37 |
+
plot_scene(
|
| 38 |
+
{
|
| 39 |
+
"scene": {
|
| 40 |
+
"ray_bundle": RayBundle(
|
| 41 |
+
origins=torch.randn(B, n_rays, 3, device=device),
|
| 42 |
+
xys=torch.randn(B, n_rays, 2, device=device),
|
| 43 |
+
directions=torch.randn(B, n_rays, 3, device=device),
|
| 44 |
+
lengths=torch.randn(
|
| 45 |
+
B, n_rays, n_pts_per_ray, device=device
|
| 46 |
+
),
|
| 47 |
+
),
|
| 48 |
+
"heterogeneous_ray_bundle": HeterogeneousRayBundle(
|
| 49 |
+
origins=torch.randn(B * n_rays, 3, device=device),
|
| 50 |
+
xys=torch.randn(B * n_rays, 2, device=device),
|
| 51 |
+
directions=torch.randn(B * n_rays, 3, device=device),
|
| 52 |
+
lengths=torch.randn(
|
| 53 |
+
B * n_rays, n_pts_per_ray, device=device
|
| 54 |
+
),
|
| 55 |
+
camera_ids=torch.randint(
|
| 56 |
+
low=0, high=B, size=(B * n_rays,), device=device
|
| 57 |
+
),
|
| 58 |
+
),
|
| 59 |
+
"camera": PerspectiveCameras(
|
| 60 |
+
R=random_rotations(B, device=device),
|
| 61 |
+
T=torch.randn(B, 3, device=device),
|
| 62 |
+
),
|
| 63 |
+
"mesh": Meshes(
|
| 64 |
+
verts=torch.randn(B, n_verts, 3, device=device),
|
| 65 |
+
faces=torch.randint(
|
| 66 |
+
low=0, high=n_verts, size=(B, n_edges, 3), device=device
|
| 67 |
+
),
|
| 68 |
+
),
|
| 69 |
+
"point_clouds": Pointclouds(
|
| 70 |
+
points=torch.randn(B, n_pts, 3, device=device),
|
| 71 |
+
),
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
)
|
third_party/AnyBimanual/third_party/pytorch3d/tests/test_volumes.py
ADDED
|
@@ -0,0 +1,987 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
import itertools
|
| 9 |
+
import random
|
| 10 |
+
import unittest
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import torch
|
| 14 |
+
from pytorch3d.structures.volumes import VolumeLocator, Volumes
|
| 15 |
+
from pytorch3d.transforms import Scale
|
| 16 |
+
|
| 17 |
+
from .common_testing import TestCaseMixin
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class TestVolumes(TestCaseMixin, unittest.TestCase):
|
| 21 |
+
def setUp(self) -> None:
|
| 22 |
+
np.random.seed(42)
|
| 23 |
+
torch.manual_seed(42)
|
| 24 |
+
random.seed(42)
|
| 25 |
+
|
| 26 |
+
@staticmethod
|
| 27 |
+
def _random_volume_list(
|
| 28 |
+
num_volumes, min_size, max_size, num_channels, device, rand_sizes=None
|
| 29 |
+
):
|
| 30 |
+
"""
|
| 31 |
+
Init a list of `num_volumes` random tensors of size [num_channels, *rand_size].
|
| 32 |
+
If `rand_sizes` is None, rand_size is a 3D long vector sampled
|
| 33 |
+
from [min_size, max_size]. Otherwise, rand_size should be a list
|
| 34 |
+
[rand_size_1, rand_size_2, ..., rand_size_num_volumes] where each
|
| 35 |
+
`rand_size_i` denotes the size of the corresponding `i`-th tensor.
|
| 36 |
+
"""
|
| 37 |
+
if rand_sizes is None:
|
| 38 |
+
rand_sizes = [
|
| 39 |
+
[random.randint(min_size, vs) for vs in max_size]
|
| 40 |
+
for _ in range(num_volumes)
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
volume_list = [
|
| 44 |
+
torch.randn(
|
| 45 |
+
size=[num_channels, *rand_size], device=device, dtype=torch.float32
|
| 46 |
+
)
|
| 47 |
+
for rand_size in rand_sizes
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
return volume_list, rand_sizes
|
| 51 |
+
|
| 52 |
+
def _check_indexed_volumes(self, v, selected, indices):
|
| 53 |
+
for selectedIdx, index in indices:
|
| 54 |
+
self.assertClose(selected.densities()[selectedIdx], v.densities()[index])
|
| 55 |
+
self.assertClose(
|
| 56 |
+
v.locator._local_to_world_transform.get_matrix()[index],
|
| 57 |
+
selected.locator._local_to_world_transform.get_matrix()[selectedIdx],
|
| 58 |
+
)
|
| 59 |
+
if selected.features() is not None:
|
| 60 |
+
self.assertClose(selected.features()[selectedIdx], v.features()[index])
|
| 61 |
+
|
| 62 |
+
def test_get_item(
|
| 63 |
+
self,
|
| 64 |
+
num_volumes=5,
|
| 65 |
+
num_channels=4,
|
| 66 |
+
volume_size=(10, 13, 8),
|
| 67 |
+
dtype=torch.float32,
|
| 68 |
+
):
|
| 69 |
+
|
| 70 |
+
device = torch.device("cuda:0")
|
| 71 |
+
|
| 72 |
+
# make sure we have at least 3 volumes to prevent indexing crash
|
| 73 |
+
num_volumes = max(num_volumes, 3)
|
| 74 |
+
|
| 75 |
+
features = torch.randn(
|
| 76 |
+
size=[num_volumes, num_channels, *volume_size],
|
| 77 |
+
device=device,
|
| 78 |
+
dtype=torch.float32,
|
| 79 |
+
)
|
| 80 |
+
densities = torch.randn(
|
| 81 |
+
size=[num_volumes, 1, *volume_size], device=device, dtype=torch.float32
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
features_list, rand_sizes = TestVolumes._random_volume_list(
|
| 85 |
+
num_volumes, 3, volume_size, num_channels, device
|
| 86 |
+
)
|
| 87 |
+
densities_list, _ = TestVolumes._random_volume_list(
|
| 88 |
+
num_volumes, 3, volume_size, 1, device, rand_sizes=rand_sizes
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
volume_translation = -torch.randn(num_volumes, 3).type_as(features)
|
| 92 |
+
voxel_size = torch.rand(num_volumes, 1).type_as(features) + 0.5
|
| 93 |
+
|
| 94 |
+
for features_, densities_ in zip(
|
| 95 |
+
(None, features, features_list), (densities, densities, densities_list)
|
| 96 |
+
):
|
| 97 |
+
|
| 98 |
+
# init the volume structure
|
| 99 |
+
v = Volumes(
|
| 100 |
+
features=features_,
|
| 101 |
+
densities=densities_,
|
| 102 |
+
volume_translation=volume_translation,
|
| 103 |
+
voxel_size=voxel_size,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# int index
|
| 107 |
+
index = 1
|
| 108 |
+
v_selected = v[index]
|
| 109 |
+
self.assertEqual(len(v_selected), 1)
|
| 110 |
+
self._check_indexed_volumes(v, v_selected, [(0, 1)])
|
| 111 |
+
|
| 112 |
+
# list index
|
| 113 |
+
index = [1, 2]
|
| 114 |
+
v_selected = v[index]
|
| 115 |
+
self.assertEqual(len(v_selected), len(index))
|
| 116 |
+
self._check_indexed_volumes(v, v_selected, enumerate(index))
|
| 117 |
+
|
| 118 |
+
# slice index
|
| 119 |
+
index = slice(0, 2, 1)
|
| 120 |
+
v_selected = v[0:2]
|
| 121 |
+
self.assertEqual(len(v_selected), 2)
|
| 122 |
+
self._check_indexed_volumes(v, v_selected, [(0, 0), (1, 1)])
|
| 123 |
+
|
| 124 |
+
# bool tensor
|
| 125 |
+
index = (torch.rand(num_volumes) > 0.5).to(device)
|
| 126 |
+
index[:2] = True # make sure smth is selected
|
| 127 |
+
v_selected = v[index]
|
| 128 |
+
self.assertEqual(len(v_selected), index.sum())
|
| 129 |
+
self._check_indexed_volumes(
|
| 130 |
+
v,
|
| 131 |
+
v_selected,
|
| 132 |
+
zip(
|
| 133 |
+
torch.arange(index.sum()),
|
| 134 |
+
torch.nonzero(index, as_tuple=False).squeeze(),
|
| 135 |
+
),
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
# int tensor
|
| 139 |
+
index = torch.tensor([1, 2], dtype=torch.int64, device=device)
|
| 140 |
+
v_selected = v[index]
|
| 141 |
+
self.assertEqual(len(v_selected), index.numel())
|
| 142 |
+
self._check_indexed_volumes(v, v_selected, enumerate(index.tolist()))
|
| 143 |
+
|
| 144 |
+
# invalid index
|
| 145 |
+
index = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)
|
| 146 |
+
with self.assertRaises(IndexError):
|
| 147 |
+
v_selected = v[index]
|
| 148 |
+
index = 1.2 # floating point index
|
| 149 |
+
with self.assertRaises(IndexError):
|
| 150 |
+
v_selected = v[index]
|
| 151 |
+
|
| 152 |
+
def test_locator_init(self, batch_size=9, resolution=(3, 5, 7)):
|
| 153 |
+
with self.subTest("VolumeLocator init with all sizes equal"):
|
| 154 |
+
grid_sizes = [resolution for _ in range(batch_size)]
|
| 155 |
+
locator_tuple = VolumeLocator(
|
| 156 |
+
batch_size=batch_size, grid_sizes=resolution, device=torch.device("cpu")
|
| 157 |
+
)
|
| 158 |
+
locator_list = VolumeLocator(
|
| 159 |
+
batch_size=batch_size, grid_sizes=grid_sizes, device=torch.device("cpu")
|
| 160 |
+
)
|
| 161 |
+
locator_tensor = VolumeLocator(
|
| 162 |
+
batch_size=batch_size,
|
| 163 |
+
grid_sizes=torch.tensor(grid_sizes),
|
| 164 |
+
device=torch.device("cpu"),
|
| 165 |
+
)
|
| 166 |
+
expected_grid_sizes = torch.tensor(grid_sizes)
|
| 167 |
+
expected_resolution = resolution
|
| 168 |
+
assert torch.allclose(expected_grid_sizes, locator_tuple._grid_sizes)
|
| 169 |
+
assert torch.allclose(expected_grid_sizes, locator_list._grid_sizes)
|
| 170 |
+
assert torch.allclose(expected_grid_sizes, locator_tensor._grid_sizes)
|
| 171 |
+
self.assertEqual(expected_resolution, locator_tuple._resolution)
|
| 172 |
+
self.assertEqual(expected_resolution, locator_list._resolution)
|
| 173 |
+
self.assertEqual(expected_resolution, locator_tensor._resolution)
|
| 174 |
+
|
| 175 |
+
with self.subTest("VolumeLocator with different sizes in different grids"):
|
| 176 |
+
grid_sizes_list = [
|
| 177 |
+
torch.randint(low=1, high=42, size=(3,)) for _ in range(batch_size)
|
| 178 |
+
]
|
| 179 |
+
grid_sizes_tensor = torch.cat([el[None] for el in grid_sizes_list])
|
| 180 |
+
locator_list = VolumeLocator(
|
| 181 |
+
batch_size=batch_size,
|
| 182 |
+
grid_sizes=grid_sizes_list,
|
| 183 |
+
device=torch.device("cpu"),
|
| 184 |
+
)
|
| 185 |
+
locator_tensor = VolumeLocator(
|
| 186 |
+
batch_size=batch_size,
|
| 187 |
+
grid_sizes=grid_sizes_tensor,
|
| 188 |
+
device=torch.device("cpu"),
|
| 189 |
+
)
|
| 190 |
+
expected_grid_sizes = grid_sizes_tensor
|
| 191 |
+
expected_resolution = tuple(torch.max(expected_grid_sizes, dim=0).values)
|
| 192 |
+
assert torch.allclose(expected_grid_sizes, locator_list._grid_sizes)
|
| 193 |
+
assert torch.allclose(expected_grid_sizes, locator_tensor._grid_sizes)
|
| 194 |
+
self.assertEqual(expected_resolution, locator_list._resolution)
|
| 195 |
+
self.assertEqual(expected_resolution, locator_tensor._resolution)
|
| 196 |
+
|
| 197 |
+
def test_coord_transforms(self, num_volumes=3, num_channels=4, dtype=torch.float32):
|
| 198 |
+
"""
|
| 199 |
+
Test the correctness of the conversion between the internal
|
| 200 |
+
Transform3D Volumes.VolumeLocator._local_to_world_transform and the initialization
|
| 201 |
+
from the translation and voxel_size.
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
device = torch.device("cuda:0")
|
| 205 |
+
|
| 206 |
+
# try for 10 sets of different random sizes/centers/voxel_sizes
|
| 207 |
+
for _ in range(10):
|
| 208 |
+
|
| 209 |
+
size = torch.randint(high=10, size=(3,), low=3).tolist()
|
| 210 |
+
|
| 211 |
+
densities = torch.randn(
|
| 212 |
+
size=[num_volumes, num_channels, *size],
|
| 213 |
+
device=device,
|
| 214 |
+
dtype=torch.float32,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# init the transformation params
|
| 218 |
+
volume_translation = torch.randn(num_volumes, 3)
|
| 219 |
+
voxel_size = torch.rand(num_volumes, 3) * 3.0 + 0.5
|
| 220 |
+
|
| 221 |
+
# get the corresponding Transform3d object
|
| 222 |
+
local_offset = torch.tensor(list(size), dtype=torch.float32, device=device)[
|
| 223 |
+
[2, 1, 0]
|
| 224 |
+
][None].repeat(num_volumes, 1)
|
| 225 |
+
local_to_world_transform = (
|
| 226 |
+
Scale(0.5 * local_offset - 0.5, device=device)
|
| 227 |
+
.scale(voxel_size)
|
| 228 |
+
.translate(-volume_translation)
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
# init the volume structures with the scale and translation,
|
| 232 |
+
# then get the coord grid in world coords
|
| 233 |
+
v_trans_vs = Volumes(
|
| 234 |
+
densities=densities,
|
| 235 |
+
voxel_size=voxel_size,
|
| 236 |
+
volume_translation=volume_translation,
|
| 237 |
+
)
|
| 238 |
+
grid_rot_trans_vs = v_trans_vs.get_coord_grid(world_coordinates=True)
|
| 239 |
+
|
| 240 |
+
# map the default local coords to the world coords
|
| 241 |
+
# with local_to_world_transform
|
| 242 |
+
v_default = Volumes(densities=densities)
|
| 243 |
+
grid_default_local = v_default.get_coord_grid(world_coordinates=False)
|
| 244 |
+
grid_default_world = local_to_world_transform.transform_points(
|
| 245 |
+
grid_default_local.view(num_volumes, -1, 3)
|
| 246 |
+
).view(num_volumes, *size, 3)
|
| 247 |
+
|
| 248 |
+
# check that both grids are the same
|
| 249 |
+
self.assertClose(grid_rot_trans_vs, grid_default_world, atol=1e-5)
|
| 250 |
+
|
| 251 |
+
# check that the transformations are the same
|
| 252 |
+
self.assertClose(
|
| 253 |
+
v_trans_vs.get_local_to_world_coords_transform().get_matrix(),
|
| 254 |
+
local_to_world_transform.get_matrix(),
|
| 255 |
+
atol=1e-5,
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
def test_coord_grid_convention(
|
| 259 |
+
self, num_volumes=3, num_channels=4, dtype=torch.float32
|
| 260 |
+
):
|
| 261 |
+
"""
|
| 262 |
+
Check that for a trivial volume with spatial size DxHxW=5x7x5:
|
| 263 |
+
1) xyz_world=(0, 0, 0) lands right in the middle of the volume
|
| 264 |
+
with xyz_local=(0, 0, 0).
|
| 265 |
+
2) xyz_world=(-2, 3, 2) results in xyz_local=(-1, 1, -1).
|
| 266 |
+
3) The centeral voxel of the volume coordinate grid
|
| 267 |
+
has coords x_world=(0, 0, 0) and x_local=(0, 0, 0)
|
| 268 |
+
4) grid_sampler(world_coordinate_grid, local_coordinate_grid)
|
| 269 |
+
is the same as world_coordinate_grid itself. I.e. the local coordinate
|
| 270 |
+
grid matches the grid_sampler coordinate convention.
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
device = torch.device("cuda:0")
|
| 274 |
+
|
| 275 |
+
densities = torch.randn(
|
| 276 |
+
size=[num_volumes, num_channels, 5, 7, 5],
|
| 277 |
+
device=device,
|
| 278 |
+
dtype=torch.float32,
|
| 279 |
+
)
|
| 280 |
+
v_trivial = Volumes(densities=densities)
|
| 281 |
+
|
| 282 |
+
# check the case with x_world=(0,0,0)
|
| 283 |
+
pts_world = torch.zeros(num_volumes, 1, 3, device=device, dtype=torch.float32)
|
| 284 |
+
pts_local = v_trivial.world_to_local_coords(pts_world)
|
| 285 |
+
pts_local_expected = torch.zeros_like(pts_local)
|
| 286 |
+
self.assertClose(pts_local, pts_local_expected)
|
| 287 |
+
|
| 288 |
+
# check the case with x_world=(-2, 3, -2)
|
| 289 |
+
pts_world = torch.tensor([-2, 3, -2], device=device, dtype=torch.float32)[
|
| 290 |
+
None, None
|
| 291 |
+
].repeat(num_volumes, 1, 1)
|
| 292 |
+
pts_local = v_trivial.world_to_local_coords(pts_world)
|
| 293 |
+
pts_local_expected = torch.tensor(
|
| 294 |
+
[-1, 1, -1], device=device, dtype=torch.float32
|
| 295 |
+
)[None, None].repeat(num_volumes, 1, 1)
|
| 296 |
+
self.assertClose(pts_local, pts_local_expected)
|
| 297 |
+
|
| 298 |
+
# check that the central voxel has coords x_world=(0, 0, 0) and x_local(0, 0, 0)
|
| 299 |
+
grid_world = v_trivial.get_coord_grid(world_coordinates=True)
|
| 300 |
+
grid_local = v_trivial.get_coord_grid(world_coordinates=False)
|
| 301 |
+
for grid in (grid_world, grid_local):
|
| 302 |
+
x0 = grid[0, :, :, 2, 0]
|
| 303 |
+
y0 = grid[0, :, 3, :, 1]
|
| 304 |
+
z0 = grid[0, 2, :, :, 2]
|
| 305 |
+
for coord_line in (x0, y0, z0):
|
| 306 |
+
self.assertClose(coord_line, torch.zeros_like(coord_line), atol=1e-7)
|
| 307 |
+
|
| 308 |
+
# resample grid_world using grid_sampler with local coords
|
| 309 |
+
# -> make sure the resampled version is the same as original
|
| 310 |
+
grid_world_resampled = torch.nn.functional.grid_sample(
|
| 311 |
+
grid_world.permute(0, 4, 1, 2, 3), grid_local, align_corners=True
|
| 312 |
+
).permute(0, 2, 3, 4, 1)
|
| 313 |
+
self.assertClose(grid_world_resampled, grid_world, atol=1e-7)
|
| 314 |
+
|
| 315 |
+
for align_corners in [True, False]:
|
| 316 |
+
v_trivial = Volumes(densities=densities, align_corners=align_corners)
|
| 317 |
+
|
| 318 |
+
# check the case with x_world=(0,0,0)
|
| 319 |
+
pts_world = torch.zeros(
|
| 320 |
+
num_volumes, 1, 3, device=device, dtype=torch.float32
|
| 321 |
+
)
|
| 322 |
+
pts_local = v_trivial.world_to_local_coords(pts_world)
|
| 323 |
+
pts_local_expected = torch.zeros_like(pts_local)
|
| 324 |
+
self.assertClose(pts_local, pts_local_expected)
|
| 325 |
+
|
| 326 |
+
# check the case with x_world=(-2, 3, -2)
|
| 327 |
+
pts_world_tuple = [-2, 3, -2]
|
| 328 |
+
pts_world = torch.tensor(
|
| 329 |
+
pts_world_tuple, device=device, dtype=torch.float32
|
| 330 |
+
)[None, None].repeat(num_volumes, 1, 1)
|
| 331 |
+
pts_local = v_trivial.world_to_local_coords(pts_world)
|
| 332 |
+
pts_local_expected = torch.tensor(
|
| 333 |
+
[-1, 1, -1], device=device, dtype=torch.float32
|
| 334 |
+
)[None, None].repeat(num_volumes, 1, 1)
|
| 335 |
+
self.assertClose(pts_local, pts_local_expected)
|
| 336 |
+
|
| 337 |
+
# # check that the central voxel has coords x_world=(0, 0, 0) and x_local(0, 0, 0)
|
| 338 |
+
grid_world = v_trivial.get_coord_grid(world_coordinates=True)
|
| 339 |
+
grid_local = v_trivial.get_coord_grid(world_coordinates=False)
|
| 340 |
+
for grid in (grid_world, grid_local):
|
| 341 |
+
x0 = grid[0, :, :, 2, 0]
|
| 342 |
+
y0 = grid[0, :, 3, :, 1]
|
| 343 |
+
z0 = grid[0, 2, :, :, 2]
|
| 344 |
+
for coord_line in (x0, y0, z0):
|
| 345 |
+
self.assertClose(
|
| 346 |
+
coord_line, torch.zeros_like(coord_line), atol=1e-7
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# resample grid_world using grid_sampler with local coords
|
| 350 |
+
# -> make sure the resampled version is the same as original
|
| 351 |
+
grid_world_resampled = torch.nn.functional.grid_sample(
|
| 352 |
+
grid_world.permute(0, 4, 1, 2, 3),
|
| 353 |
+
grid_local,
|
| 354 |
+
align_corners=align_corners,
|
| 355 |
+
).permute(0, 2, 3, 4, 1)
|
| 356 |
+
self.assertClose(grid_world_resampled, grid_world, atol=1e-7)
|
| 357 |
+
|
| 358 |
+
def test_coord_grid_convention_heterogeneous(
|
| 359 |
+
self, num_channels=4, dtype=torch.float32
|
| 360 |
+
):
|
| 361 |
+
"""
|
| 362 |
+
Check that for a list of 2 trivial volumes with
|
| 363 |
+
spatial sizes DxHxW=(5x7x5, 3x5x5):
|
| 364 |
+
1) xyz_world=(0, 0, 0) lands right in the middle of the volume
|
| 365 |
+
with xyz_local=(0, 0, 0).
|
| 366 |
+
2) xyz_world=((-2, 3, -2), (-2, -2, 1)) results
|
| 367 |
+
in xyz_local=((-1, 1, -1), (-1, -1, 1)).
|
| 368 |
+
3) The centeral voxel of the volume coordinate grid
|
| 369 |
+
has coords x_world=(0, 0, 0) and x_local=(0, 0, 0)
|
| 370 |
+
4) grid_sampler(world_coordinate_grid, local_coordinate_grid)
|
| 371 |
+
is the same as world_coordinate_grid itself. I.e. the local coordinate
|
| 372 |
+
grid matches the grid_sampler coordinate convention.
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
device = torch.device("cuda:0")
|
| 376 |
+
|
| 377 |
+
sizes = [(5, 7, 5), (3, 5, 5)]
|
| 378 |
+
|
| 379 |
+
densities_list = [
|
| 380 |
+
torch.randn(size=[num_channels, *size], device=device, dtype=torch.float32)
|
| 381 |
+
for size in sizes
|
| 382 |
+
]
|
| 383 |
+
|
| 384 |
+
# init the volume
|
| 385 |
+
v_trivial = Volumes(densities=densities_list)
|
| 386 |
+
|
| 387 |
+
# check the border point locations
|
| 388 |
+
pts_world = torch.tensor(
|
| 389 |
+
[[-2.0, 3.0, -2.0], [-2.0, -2.0, 1.0]], device=device, dtype=torch.float32
|
| 390 |
+
)[:, None]
|
| 391 |
+
pts_local = v_trivial.world_to_local_coords(pts_world)
|
| 392 |
+
pts_local_expected = torch.tensor(
|
| 393 |
+
[[-1.0, 1.0, -1.0], [-1.0, -1.0, 1.0]], device=device, dtype=torch.float32
|
| 394 |
+
)[:, None]
|
| 395 |
+
self.assertClose(pts_local, pts_local_expected)
|
| 396 |
+
|
| 397 |
+
# check that the central voxel has coords x_world=(0, 0, 0) and x_local(0, 0, 0)
|
| 398 |
+
grid_world = v_trivial.get_coord_grid(world_coordinates=True)
|
| 399 |
+
grid_local = v_trivial.get_coord_grid(world_coordinates=False)
|
| 400 |
+
for grid in (grid_world, grid_local):
|
| 401 |
+
x0 = grid[0, :, :, 2, 0]
|
| 402 |
+
y0 = grid[0, :, 3, :, 1]
|
| 403 |
+
z0 = grid[0, 2, :, :, 2]
|
| 404 |
+
for coord_line in (x0, y0, z0):
|
| 405 |
+
self.assertClose(coord_line, torch.zeros_like(coord_line), atol=1e-7)
|
| 406 |
+
x0 = grid[1, :, :, 2, 0]
|
| 407 |
+
y0 = grid[1, :, 2, :, 1]
|
| 408 |
+
z0 = grid[1, 1, :, :, 2]
|
| 409 |
+
for coord_line in (x0, y0, z0):
|
| 410 |
+
self.assertClose(coord_line, torch.zeros_like(coord_line), atol=1e-7)
|
| 411 |
+
|
| 412 |
+
# resample grid_world using grid_sampler with local coords
|
| 413 |
+
# -> make sure the resampled version is the same as original
|
| 414 |
+
for grid_world_, grid_local_, size in zip(grid_world, grid_local, sizes):
|
| 415 |
+
grid_world_crop = grid_world_[: size[0], : size[1], : size[2], :][None]
|
| 416 |
+
grid_local_crop = grid_local_[: size[0], : size[1], : size[2], :][None]
|
| 417 |
+
grid_world_crop_resampled = torch.nn.functional.grid_sample(
|
| 418 |
+
grid_world_crop.permute(0, 4, 1, 2, 3),
|
| 419 |
+
grid_local_crop,
|
| 420 |
+
align_corners=True,
|
| 421 |
+
).permute(0, 2, 3, 4, 1)
|
| 422 |
+
self.assertClose(grid_world_crop_resampled, grid_world_crop, atol=1e-7)
|
| 423 |
+
|
| 424 |
+
def test_coord_grid_transforms(
|
| 425 |
+
self, num_volumes=3, num_channels=4, dtype=torch.float32
|
| 426 |
+
):
|
| 427 |
+
"""
|
| 428 |
+
Test whether conversion between local-world coordinates of the
|
| 429 |
+
volume returns correct results.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
device = torch.device("cuda:0")
|
| 433 |
+
|
| 434 |
+
# try for 10 sets of different random sizes/centers/voxel_sizes
|
| 435 |
+
for _ in range(10):
|
| 436 |
+
|
| 437 |
+
size = torch.randint(high=10, size=(3,), low=3).tolist()
|
| 438 |
+
|
| 439 |
+
center = torch.randn(num_volumes, 3, dtype=torch.float32, device=device)
|
| 440 |
+
voxel_size = torch.rand(1, dtype=torch.float32, device=device) * 5.0 + 0.5
|
| 441 |
+
|
| 442 |
+
for densities in (
|
| 443 |
+
torch.randn(
|
| 444 |
+
size=[num_volumes, num_channels, *size],
|
| 445 |
+
device=device,
|
| 446 |
+
dtype=torch.float32,
|
| 447 |
+
),
|
| 448 |
+
TestVolumes._random_volume_list(
|
| 449 |
+
num_volumes, 3, size, num_channels, device, rand_sizes=None
|
| 450 |
+
)[0],
|
| 451 |
+
):
|
| 452 |
+
|
| 453 |
+
# init the volume structure
|
| 454 |
+
v = Volumes(
|
| 455 |
+
densities=densities,
|
| 456 |
+
voxel_size=voxel_size,
|
| 457 |
+
volume_translation=-center,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
# get local coord grid
|
| 461 |
+
grid_local = v.get_coord_grid(world_coordinates=False)
|
| 462 |
+
|
| 463 |
+
# convert from world to local to world
|
| 464 |
+
grid_world = v.get_coord_grid(world_coordinates=True)
|
| 465 |
+
grid_local_2 = v.world_to_local_coords(grid_world)
|
| 466 |
+
grid_world_2 = v.local_to_world_coords(grid_local_2)
|
| 467 |
+
|
| 468 |
+
# assertions on shape and values of grid_world and grid_local
|
| 469 |
+
self.assertClose(grid_world, grid_world_2, atol=1e-5)
|
| 470 |
+
self.assertClose(grid_local, grid_local_2, atol=1e-5)
|
| 471 |
+
|
| 472 |
+
# check that the individual slices of the location grid have
|
| 473 |
+
# constant values along expected dimensions
|
| 474 |
+
for plane_dim in (1, 2, 3):
|
| 475 |
+
for grid_plane in grid_world.split(1, dim=plane_dim):
|
| 476 |
+
grid_coord_dim = {1: 2, 2: 1, 3: 0}[plane_dim]
|
| 477 |
+
grid_coord_plane = grid_plane.squeeze()[..., grid_coord_dim]
|
| 478 |
+
# check that all elements of grid_coord_plane are
|
| 479 |
+
# the same for each batch element
|
| 480 |
+
self.assertClose(
|
| 481 |
+
grid_coord_plane.reshape(num_volumes, -1).max(dim=1).values,
|
| 482 |
+
grid_coord_plane.reshape(num_volumes, -1).min(dim=1).values,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
def test_clone(
|
| 486 |
+
self, num_volumes=3, num_channels=4, size=(6, 8, 10), dtype=torch.float32
|
| 487 |
+
):
|
| 488 |
+
"""
|
| 489 |
+
Test cloning of a `Volumes` object
|
| 490 |
+
"""
|
| 491 |
+
|
| 492 |
+
device = torch.device("cuda:0")
|
| 493 |
+
|
| 494 |
+
features = torch.randn(
|
| 495 |
+
size=[num_volumes, num_channels, *size], device=device, dtype=torch.float32
|
| 496 |
+
)
|
| 497 |
+
densities = torch.rand(
|
| 498 |
+
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
for has_features in (True, False):
|
| 502 |
+
v = Volumes(
|
| 503 |
+
densities=densities, features=features if has_features else None
|
| 504 |
+
)
|
| 505 |
+
vnew = v.clone()
|
| 506 |
+
vnew._densities.data[0, 0, 0, 0, 0] += 1.0
|
| 507 |
+
self.assertNotAlmostEqual(
|
| 508 |
+
float(
|
| 509 |
+
(vnew.densities()[0, 0, 0, 0, 0] - v.densities()[0, 0, 0, 0, 0])
|
| 510 |
+
.abs()
|
| 511 |
+
.max()
|
| 512 |
+
),
|
| 513 |
+
0.0,
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if has_features:
|
| 517 |
+
vnew._features.data[0, 0, 0, 0, 0] += 1.0
|
| 518 |
+
self.assertNotAlmostEqual(
|
| 519 |
+
float(
|
| 520 |
+
(vnew.features()[0, 0, 0, 0, 0] - v.features()[0, 0, 0, 0, 0])
|
| 521 |
+
.abs()
|
| 522 |
+
.max()
|
| 523 |
+
),
|
| 524 |
+
0.0,
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
def _check_vars_on_device(self, v, desired_device):
|
| 528 |
+
for var_name, var in vars(v).items():
|
| 529 |
+
if var_name != "device":
|
| 530 |
+
if var is not None:
|
| 531 |
+
self.assertTrue(
|
| 532 |
+
var.device.type == desired_device.type,
|
| 533 |
+
(var_name, var.device, desired_device),
|
| 534 |
+
)
|
| 535 |
+
else:
|
| 536 |
+
self.assertTrue(var.type == desired_device.type)
|
| 537 |
+
|
| 538 |
+
def test_to(
|
| 539 |
+
self, num_volumes=3, num_channels=4, size=(6, 8, 10), dtype=torch.float32
|
| 540 |
+
):
|
| 541 |
+
"""
|
| 542 |
+
Test the moving of the volumes from/to gpu and cpu
|
| 543 |
+
"""
|
| 544 |
+
|
| 545 |
+
features = torch.randn(
|
| 546 |
+
size=[num_volumes, num_channels, *size], dtype=torch.float32
|
| 547 |
+
)
|
| 548 |
+
densities = torch.rand(size=[num_volumes, 1, *size], dtype=dtype)
|
| 549 |
+
volumes = Volumes(densities=densities, features=features)
|
| 550 |
+
locator = VolumeLocator(
|
| 551 |
+
batch_size=5, grid_sizes=(3, 5, 7), device=volumes.device
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
for name, obj in (("VolumeLocator", locator), ("Volumes", volumes)):
|
| 555 |
+
with self.subTest(f"Moving {name} from/to gpu and cpu"):
|
| 556 |
+
# Test support for str and torch.device
|
| 557 |
+
cpu_device = torch.device("cpu")
|
| 558 |
+
|
| 559 |
+
converted_obj = obj.to("cpu")
|
| 560 |
+
self.assertEqual(cpu_device, converted_obj.device)
|
| 561 |
+
self.assertEqual(cpu_device, obj.device)
|
| 562 |
+
self.assertIs(obj, converted_obj)
|
| 563 |
+
|
| 564 |
+
converted_obj = obj.to(cpu_device)
|
| 565 |
+
self.assertEqual(cpu_device, converted_obj.device)
|
| 566 |
+
self.assertEqual(cpu_device, obj.device)
|
| 567 |
+
self.assertIs(obj, converted_obj)
|
| 568 |
+
|
| 569 |
+
cuda_device = torch.device("cuda:0")
|
| 570 |
+
|
| 571 |
+
converted_obj = obj.to("cuda:0")
|
| 572 |
+
self.assertEqual(cuda_device, converted_obj.device)
|
| 573 |
+
self.assertEqual(cpu_device, obj.device)
|
| 574 |
+
self.assertIsNot(obj, converted_obj)
|
| 575 |
+
|
| 576 |
+
converted_obj = obj.to(cuda_device)
|
| 577 |
+
self.assertEqual(cuda_device, converted_obj.device)
|
| 578 |
+
self.assertEqual(cpu_device, obj.device)
|
| 579 |
+
self.assertIsNot(obj, converted_obj)
|
| 580 |
+
|
| 581 |
+
with self.subTest("Test device placement of internal tensors of Volumes"):
|
| 582 |
+
features = features.to(cuda_device)
|
| 583 |
+
densities = features.to(cuda_device)
|
| 584 |
+
|
| 585 |
+
for features_ in (features, None):
|
| 586 |
+
volumes = Volumes(densities=densities, features=features_)
|
| 587 |
+
|
| 588 |
+
cpu_volumes = volumes.cpu()
|
| 589 |
+
cuda_volumes = cpu_volumes.cuda()
|
| 590 |
+
cuda_volumes2 = cuda_volumes.cuda()
|
| 591 |
+
cpu_volumes2 = cuda_volumes2.cpu()
|
| 592 |
+
|
| 593 |
+
for volumes1, volumes2 in itertools.combinations(
|
| 594 |
+
(volumes, cpu_volumes, cpu_volumes2, cuda_volumes, cuda_volumes2), 2
|
| 595 |
+
):
|
| 596 |
+
if volumes1 is cuda_volumes and volumes2 is cuda_volumes2:
|
| 597 |
+
# checks that we do not copy if the devices stay the same
|
| 598 |
+
assert_fun = self.assertIs
|
| 599 |
+
else:
|
| 600 |
+
assert_fun = self.assertSeparate
|
| 601 |
+
assert_fun(volumes1._densities, volumes2._densities)
|
| 602 |
+
if features_ is not None:
|
| 603 |
+
assert_fun(volumes1._features, volumes2._features)
|
| 604 |
+
for volumes_ in (volumes1, volumes2):
|
| 605 |
+
if volumes_ in (cpu_volumes, cpu_volumes2):
|
| 606 |
+
self._check_vars_on_device(volumes_, cpu_device)
|
| 607 |
+
else:
|
| 608 |
+
self._check_vars_on_device(volumes_, cuda_device)
|
| 609 |
+
|
| 610 |
+
with self.subTest("Test device placement of internal tensors of VolumeLocator"):
|
| 611 |
+
for device1, device2 in itertools.combinations(
|
| 612 |
+
(torch.device("cpu"), torch.device("cuda:0")), 2
|
| 613 |
+
):
|
| 614 |
+
locator = locator.to(device1)
|
| 615 |
+
locator = locator.to(device2)
|
| 616 |
+
self.assertEqual(locator._grid_sizes.device, device2)
|
| 617 |
+
self.assertEqual(locator._local_to_world_transform.device, device2)
|
| 618 |
+
|
| 619 |
+
def _check_padded(self, x_pad, x_list, grid_sizes):
|
| 620 |
+
"""
|
| 621 |
+
Check that padded tensors x_pad are the same as x_list tensors.
|
| 622 |
+
"""
|
| 623 |
+
num_volumes = len(x_list)
|
| 624 |
+
for i in range(num_volumes):
|
| 625 |
+
self.assertClose(
|
| 626 |
+
x_pad[i][:, : grid_sizes[i][0], : grid_sizes[i][1], : grid_sizes[i][2]],
|
| 627 |
+
x_list[i],
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
def test_feature_density_setters(self):
|
| 631 |
+
"""
|
| 632 |
+
Tests getters and setters for padded/list representations.
|
| 633 |
+
"""
|
| 634 |
+
|
| 635 |
+
device = torch.device("cuda:0")
|
| 636 |
+
diff_device = torch.device("cpu")
|
| 637 |
+
|
| 638 |
+
num_volumes = 30
|
| 639 |
+
num_channels = 4
|
| 640 |
+
K = 20
|
| 641 |
+
|
| 642 |
+
densities = []
|
| 643 |
+
features = []
|
| 644 |
+
grid_sizes = []
|
| 645 |
+
diff_grid_sizes = []
|
| 646 |
+
|
| 647 |
+
for _ in range(num_volumes):
|
| 648 |
+
grid_size = torch.randint(K - 1, size=(3,)).long() + 1
|
| 649 |
+
densities.append(
|
| 650 |
+
torch.rand((1, *grid_size), device=device, dtype=torch.float32)
|
| 651 |
+
)
|
| 652 |
+
features.append(
|
| 653 |
+
torch.rand(
|
| 654 |
+
(num_channels, *grid_size), device=device, dtype=torch.float32
|
| 655 |
+
)
|
| 656 |
+
)
|
| 657 |
+
grid_sizes.append(grid_size)
|
| 658 |
+
|
| 659 |
+
diff_grid_size = (
|
| 660 |
+
copy.deepcopy(grid_size) + torch.randint(2, size=(3,)).long() + 1
|
| 661 |
+
)
|
| 662 |
+
diff_grid_sizes.append(diff_grid_size)
|
| 663 |
+
grid_sizes = torch.stack(grid_sizes).to(device)
|
| 664 |
+
diff_grid_sizes = torch.stack(diff_grid_sizes).to(device)
|
| 665 |
+
|
| 666 |
+
volumes = Volumes(densities=densities, features=features)
|
| 667 |
+
self.assertClose(volumes.get_grid_sizes(), grid_sizes)
|
| 668 |
+
|
| 669 |
+
# test the getters
|
| 670 |
+
features_padded = volumes.features()
|
| 671 |
+
densities_padded = volumes.densities()
|
| 672 |
+
features_list = volumes.features_list()
|
| 673 |
+
densities_list = volumes.densities_list()
|
| 674 |
+
for x_pad, x_list in zip(
|
| 675 |
+
(densities_padded, features_padded, densities_padded, features_padded),
|
| 676 |
+
(densities_list, features_list, densities, features),
|
| 677 |
+
):
|
| 678 |
+
self._check_padded(x_pad, x_list, grid_sizes)
|
| 679 |
+
|
| 680 |
+
# test feature setters
|
| 681 |
+
features_new = [
|
| 682 |
+
torch.rand((num_channels, *grid_size), device=device, dtype=torch.float32)
|
| 683 |
+
for grid_size in grid_sizes
|
| 684 |
+
]
|
| 685 |
+
volumes._set_features(features_new)
|
| 686 |
+
features_new_list = volumes.features_list()
|
| 687 |
+
features_new_padded = volumes.features()
|
| 688 |
+
for x_pad, x_list in zip(
|
| 689 |
+
(features_new_padded, features_new_padded),
|
| 690 |
+
(features_new, features_new_list),
|
| 691 |
+
):
|
| 692 |
+
self._check_padded(x_pad, x_list, grid_sizes)
|
| 693 |
+
|
| 694 |
+
# wrong features to update
|
| 695 |
+
bad_features_new = [
|
| 696 |
+
[
|
| 697 |
+
torch.rand(
|
| 698 |
+
(num_channels, *grid_size), device=diff_device, dtype=torch.float32
|
| 699 |
+
)
|
| 700 |
+
for grid_size in diff_grid_sizes
|
| 701 |
+
],
|
| 702 |
+
torch.rand(
|
| 703 |
+
(num_volumes, num_channels, K + 1, K, K),
|
| 704 |
+
device=device,
|
| 705 |
+
dtype=torch.float32,
|
| 706 |
+
),
|
| 707 |
+
None,
|
| 708 |
+
]
|
| 709 |
+
for bad_features_new_ in bad_features_new:
|
| 710 |
+
with self.assertRaises(ValueError):
|
| 711 |
+
volumes._set_densities(bad_features_new_)
|
| 712 |
+
|
| 713 |
+
# test density setters
|
| 714 |
+
densities_new = [
|
| 715 |
+
torch.rand((1, *grid_size), device=device, dtype=torch.float32)
|
| 716 |
+
for grid_size in grid_sizes
|
| 717 |
+
]
|
| 718 |
+
volumes._set_densities(densities_new)
|
| 719 |
+
densities_new_list = volumes.densities_list()
|
| 720 |
+
densities_new_padded = volumes.densities()
|
| 721 |
+
for x_pad, x_list in zip(
|
| 722 |
+
(densities_new_padded, densities_new_padded),
|
| 723 |
+
(densities_new, densities_new_list),
|
| 724 |
+
):
|
| 725 |
+
self._check_padded(x_pad, x_list, grid_sizes)
|
| 726 |
+
|
| 727 |
+
# wrong densities to update
|
| 728 |
+
bad_densities_new = [
|
| 729 |
+
[
|
| 730 |
+
torch.rand((1, *grid_size), device=diff_device, dtype=torch.float32)
|
| 731 |
+
for grid_size in diff_grid_sizes
|
| 732 |
+
],
|
| 733 |
+
torch.rand(
|
| 734 |
+
(num_volumes, 1, K + 1, K, K), device=device, dtype=torch.float32
|
| 735 |
+
),
|
| 736 |
+
None,
|
| 737 |
+
]
|
| 738 |
+
for bad_densities_new_ in bad_densities_new:
|
| 739 |
+
with self.assertRaises(ValueError):
|
| 740 |
+
volumes._set_densities(bad_densities_new_)
|
| 741 |
+
|
| 742 |
+
# test update_padded
|
| 743 |
+
volumes = Volumes(densities=densities, features=features)
|
| 744 |
+
volumes_updated = volumes.update_padded(
|
| 745 |
+
densities_new, new_features=features_new
|
| 746 |
+
)
|
| 747 |
+
densities_new_list = volumes_updated.densities_list()
|
| 748 |
+
densities_new_padded = volumes_updated.densities()
|
| 749 |
+
features_new_list = volumes_updated.features_list()
|
| 750 |
+
features_new_padded = volumes_updated.features()
|
| 751 |
+
for x_pad, x_list in zip(
|
| 752 |
+
(
|
| 753 |
+
densities_new_padded,
|
| 754 |
+
densities_new_padded,
|
| 755 |
+
features_new_padded,
|
| 756 |
+
features_new_padded,
|
| 757 |
+
),
|
| 758 |
+
(densities_new, densities_new_list, features_new, features_new_list),
|
| 759 |
+
):
|
| 760 |
+
self._check_padded(x_pad, x_list, grid_sizes)
|
| 761 |
+
self.assertIs(volumes.get_grid_sizes(), volumes_updated.get_grid_sizes())
|
| 762 |
+
self.assertIs(
|
| 763 |
+
volumes.get_local_to_world_coords_transform(),
|
| 764 |
+
volumes_updated.get_local_to_world_coords_transform(),
|
| 765 |
+
)
|
| 766 |
+
self.assertIs(volumes.device, volumes_updated.device)
|
| 767 |
+
|
| 768 |
+
def test_constructor_for_padded_lists(self):
|
| 769 |
+
"""
|
| 770 |
+
Tests constructor for padded/list representations.
|
| 771 |
+
"""
|
| 772 |
+
|
| 773 |
+
device = torch.device("cuda:0")
|
| 774 |
+
diff_device = torch.device("cpu")
|
| 775 |
+
|
| 776 |
+
num_volumes = 3
|
| 777 |
+
num_channels = 4
|
| 778 |
+
size = (6, 8, 10)
|
| 779 |
+
diff_size = (6, 8, 11)
|
| 780 |
+
|
| 781 |
+
# good ways to define densities
|
| 782 |
+
ok_densities = [
|
| 783 |
+
torch.randn(
|
| 784 |
+
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
|
| 785 |
+
).unbind(0),
|
| 786 |
+
torch.randn(
|
| 787 |
+
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
|
| 788 |
+
),
|
| 789 |
+
]
|
| 790 |
+
|
| 791 |
+
# bad ways to define features
|
| 792 |
+
bad_features = [
|
| 793 |
+
torch.randn(
|
| 794 |
+
size=[num_volumes + 1, num_channels, *size],
|
| 795 |
+
device=device,
|
| 796 |
+
dtype=torch.float32,
|
| 797 |
+
).unbind(
|
| 798 |
+
0
|
| 799 |
+
), # list with diff batch size
|
| 800 |
+
torch.randn(
|
| 801 |
+
size=[num_volumes + 1, num_channels, *size],
|
| 802 |
+
device=device,
|
| 803 |
+
dtype=torch.float32,
|
| 804 |
+
), # diff batch size
|
| 805 |
+
torch.randn(
|
| 806 |
+
size=[num_volumes, num_channels, *diff_size],
|
| 807 |
+
device=device,
|
| 808 |
+
dtype=torch.float32,
|
| 809 |
+
).unbind(
|
| 810 |
+
0
|
| 811 |
+
), # list with different size
|
| 812 |
+
torch.randn(
|
| 813 |
+
size=[num_volumes, num_channels, *diff_size],
|
| 814 |
+
device=device,
|
| 815 |
+
dtype=torch.float32,
|
| 816 |
+
), # different size
|
| 817 |
+
torch.randn(
|
| 818 |
+
size=[num_volumes, num_channels, *size],
|
| 819 |
+
device=diff_device,
|
| 820 |
+
dtype=torch.float32,
|
| 821 |
+
), # different device
|
| 822 |
+
torch.randn(
|
| 823 |
+
size=[num_volumes, num_channels, *size],
|
| 824 |
+
device=diff_device,
|
| 825 |
+
dtype=torch.float32,
|
| 826 |
+
).unbind(
|
| 827 |
+
0
|
| 828 |
+
), # list with different device
|
| 829 |
+
]
|
| 830 |
+
|
| 831 |
+
# good ways to define features
|
| 832 |
+
ok_features = [
|
| 833 |
+
torch.randn(
|
| 834 |
+
size=[num_volumes, num_channels, *size],
|
| 835 |
+
device=device,
|
| 836 |
+
dtype=torch.float32,
|
| 837 |
+
).unbind(
|
| 838 |
+
0
|
| 839 |
+
), # list of features of correct size
|
| 840 |
+
torch.randn(
|
| 841 |
+
size=[num_volumes, num_channels, *size],
|
| 842 |
+
device=device,
|
| 843 |
+
dtype=torch.float32,
|
| 844 |
+
),
|
| 845 |
+
]
|
| 846 |
+
|
| 847 |
+
for densities in ok_densities:
|
| 848 |
+
for features in bad_features:
|
| 849 |
+
self.assertRaises(
|
| 850 |
+
ValueError, Volumes, densities=densities, features=features
|
| 851 |
+
)
|
| 852 |
+
for features in ok_features:
|
| 853 |
+
Volumes(densities=densities, features=features)
|
| 854 |
+
|
| 855 |
+
def test_constructor(
|
| 856 |
+
self, num_volumes=3, num_channels=4, size=(6, 8, 10), dtype=torch.float32
|
| 857 |
+
):
|
| 858 |
+
"""
|
| 859 |
+
Test different ways of calling the `Volumes` constructor
|
| 860 |
+
"""
|
| 861 |
+
|
| 862 |
+
device = torch.device("cuda:0")
|
| 863 |
+
|
| 864 |
+
# all ways to define features
|
| 865 |
+
features = [
|
| 866 |
+
torch.randn(
|
| 867 |
+
size=[num_volumes, num_channels, *size],
|
| 868 |
+
device=device,
|
| 869 |
+
dtype=torch.float32,
|
| 870 |
+
), # padded tensor
|
| 871 |
+
torch.randn(
|
| 872 |
+
size=[num_volumes, num_channels, *size],
|
| 873 |
+
device=device,
|
| 874 |
+
dtype=torch.float32,
|
| 875 |
+
).unbind(
|
| 876 |
+
0
|
| 877 |
+
), # list of features
|
| 878 |
+
None, # no features
|
| 879 |
+
]
|
| 880 |
+
|
| 881 |
+
# bad ways to define features
|
| 882 |
+
bad_features = [
|
| 883 |
+
torch.randn(
|
| 884 |
+
size=[num_volumes, num_channels, 2, *size],
|
| 885 |
+
device=device,
|
| 886 |
+
dtype=torch.float32,
|
| 887 |
+
), # 6 dims
|
| 888 |
+
torch.randn(
|
| 889 |
+
size=[num_volumes, *size], device=device, dtype=torch.float32
|
| 890 |
+
), # 4 dims
|
| 891 |
+
torch.randn(
|
| 892 |
+
size=[num_volumes, *size], device=device, dtype=torch.float32
|
| 893 |
+
).unbind(
|
| 894 |
+
0
|
| 895 |
+
), # list of 4 dim tensors
|
| 896 |
+
]
|
| 897 |
+
|
| 898 |
+
# all ways to define densities
|
| 899 |
+
densities = [
|
| 900 |
+
torch.randn(
|
| 901 |
+
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
|
| 902 |
+
), # padded tensor
|
| 903 |
+
torch.randn(
|
| 904 |
+
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
|
| 905 |
+
).unbind(
|
| 906 |
+
0
|
| 907 |
+
), # list of densities
|
| 908 |
+
]
|
| 909 |
+
|
| 910 |
+
# bad ways to define densities
|
| 911 |
+
bad_densities = [
|
| 912 |
+
None, # omitted
|
| 913 |
+
torch.randn(
|
| 914 |
+
size=[num_volumes, 1, 1, *size], device=device, dtype=torch.float32
|
| 915 |
+
), # 6-dim tensor
|
| 916 |
+
torch.randn(
|
| 917 |
+
size=[num_volumes, 1, 1, *size], device=device, dtype=torch.float32
|
| 918 |
+
).unbind(
|
| 919 |
+
0
|
| 920 |
+
), # list of 5-dim densities
|
| 921 |
+
]
|
| 922 |
+
|
| 923 |
+
# all possible ways to define the voxels sizes
|
| 924 |
+
vox_sizes = [
|
| 925 |
+
torch.Tensor([1.0, 1.0, 1.0]),
|
| 926 |
+
[1.0, 1.0, 1.0],
|
| 927 |
+
torch.Tensor([1.0, 1.0, 1.0])[None].repeat(num_volumes, 1),
|
| 928 |
+
torch.Tensor([1.0])[None].repeat(num_volumes, 1),
|
| 929 |
+
1.0,
|
| 930 |
+
torch.Tensor([1.0]),
|
| 931 |
+
]
|
| 932 |
+
|
| 933 |
+
# all possible ways to define the volume translations
|
| 934 |
+
vol_translations = [
|
| 935 |
+
torch.Tensor([1.0, 1.0, 1.0]),
|
| 936 |
+
[1.0, 1.0, 1.0],
|
| 937 |
+
torch.Tensor([1.0, 1.0, 1.0])[None].repeat(num_volumes, 1),
|
| 938 |
+
]
|
| 939 |
+
|
| 940 |
+
# wrong ways to define voxel sizes
|
| 941 |
+
bad_vox_sizes = [
|
| 942 |
+
torch.Tensor([1.0, 1.0, 1.0, 1.0]),
|
| 943 |
+
[1.0, 1.0, 1.0, 1.0],
|
| 944 |
+
torch.Tensor([]),
|
| 945 |
+
None,
|
| 946 |
+
]
|
| 947 |
+
|
| 948 |
+
# wrong ways to define the volume translations
|
| 949 |
+
bad_vol_translations = [
|
| 950 |
+
torch.Tensor([1.0, 1.0]),
|
| 951 |
+
[1.0, 1.0],
|
| 952 |
+
1.0,
|
| 953 |
+
torch.Tensor([1.0, 1.0, 1.0])[None].repeat(num_volumes + 1, 1),
|
| 954 |
+
]
|
| 955 |
+
|
| 956 |
+
def zip_with_ok_indicator(good, bad):
|
| 957 |
+
return zip([*good, *bad], [*([True] * len(good)), *([False] * len(bad))])
|
| 958 |
+
|
| 959 |
+
for features_, features_ok in zip_with_ok_indicator(features, bad_features):
|
| 960 |
+
for densities_, densities_ok in zip_with_ok_indicator(
|
| 961 |
+
densities, bad_densities
|
| 962 |
+
):
|
| 963 |
+
for vox_size, size_ok in zip_with_ok_indicator(
|
| 964 |
+
vox_sizes, bad_vox_sizes
|
| 965 |
+
):
|
| 966 |
+
for vol_translation, trans_ok in zip_with_ok_indicator(
|
| 967 |
+
vol_translations, bad_vol_translations
|
| 968 |
+
):
|
| 969 |
+
if (
|
| 970 |
+
size_ok and trans_ok and features_ok and densities_ok
|
| 971 |
+
): # if all entries are good we check that this doesnt throw
|
| 972 |
+
Volumes(
|
| 973 |
+
features=features_,
|
| 974 |
+
densities=densities_,
|
| 975 |
+
voxel_size=vox_size,
|
| 976 |
+
volume_translation=vol_translation,
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
else: # otherwise we check for ValueError
|
| 980 |
+
self.assertRaises(
|
| 981 |
+
ValueError,
|
| 982 |
+
Volumes,
|
| 983 |
+
features=features_,
|
| 984 |
+
densities=densities_,
|
| 985 |
+
voxel_size=vox_size,
|
| 986 |
+
volume_translation=vol_translation,
|
| 987 |
+
)
|
third_party/AnyBimanual/third_party/pytorch3d/website/.dockerignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*/node_modules
|
| 2 |
+
*.log
|
third_party/AnyBimanual/third_party/pytorch3d/website/.gitignore
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.DS_Store
|
| 2 |
+
|
| 3 |
+
node_modules
|
| 4 |
+
|
| 5 |
+
lib/core/metadata.js
|
| 6 |
+
lib/core/MetadataBlog.js
|
| 7 |
+
|
| 8 |
+
website/translated_docs
|
| 9 |
+
website/build/
|
| 10 |
+
website/yarn.lock
|
| 11 |
+
website/node_modules
|
| 12 |
+
website/i18n/*
|
| 13 |
+
website/_tutorials/*
|
third_party/AnyBimanual/third_party/pytorch3d/website/README.md
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This website was created with [Docusaurus](https://docusaurus.io/).
|
| 2 |
+
|
| 3 |
+
# Building the PyTorch3D website
|
| 4 |
+
|
| 5 |
+
## Install
|
| 6 |
+
|
| 7 |
+
1. Make sure all the dependencies for the website are installed:
|
| 8 |
+
|
| 9 |
+
```sh
|
| 10 |
+
# Install dependencies
|
| 11 |
+
$ yarn
|
| 12 |
+
|
| 13 |
+
or
|
| 14 |
+
|
| 15 |
+
$ npm install docusaurus-init
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
2. Run your dev server:
|
| 19 |
+
|
| 20 |
+
```sh
|
| 21 |
+
# Start the site
|
| 22 |
+
$ yarn start
|
| 23 |
+
|
| 24 |
+
or
|
| 25 |
+
$ ./node_modules/docusaurus/lib/start-server.js
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
## Build the tutorials
|
| 29 |
+
|
| 30 |
+
We convert the ipython notebooks to html using `parse_tutorials.py` which is found in the scripts folder at the root of the PyTorch3D directory.
|
| 31 |
+
|
| 32 |
+
Before running this script install the following dependencies:
|
| 33 |
+
|
| 34 |
+
```
|
| 35 |
+
pip install nbformat==4.4.0 nbconvert==5.3.1 ipywidgets==7.5.1 tornado==4.2 bs4
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
Install yarn:
|
| 39 |
+
|
| 40 |
+
```
|
| 41 |
+
brew install yarn
|
| 42 |
+
|
| 43 |
+
# or
|
| 44 |
+
|
| 45 |
+
curl -o- -L https://yarnpkg.com/install.sh | bash
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
Then run the build script:
|
| 49 |
+
|
| 50 |
+
```
|
| 51 |
+
bash scripts/build_website.sh
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
This will build the docusaurus website and run a script to parse the tutorials and generate:
|
| 55 |
+
- `.html` files in the `website/_tutorials` folder
|
| 56 |
+
- `.js` files in the `website/pages/tutorials` folder
|
| 57 |
+
- `.py`/`.ipynb` files in the `website/static/files` folder
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
TODO: Add support for latex in markdown in jupyter notebooks and embedded images.
|
| 61 |
+
|
| 62 |
+
## Build and publish the website
|
| 63 |
+
|
| 64 |
+
To update for a new version, you need to build the tutorials and the website and push to the gh-pages
|
| 65 |
+
branch of `github.com/facebookresearch/pytorch3d`. The instructions in `scripts/publish_website.sh`
|
| 66 |
+
bring it all together.
|
| 67 |
+
|
| 68 |
+
## Add a new tutorial
|
| 69 |
+
|
| 70 |
+
The tutorials to include in the website are listed in `website/tutorials.json`. If you create a new tutorial add an entry to the list in this file. This is needed in order to generate the sidebar for the tutorials page.
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
## Edit the landing page
|
| 74 |
+
|
| 75 |
+
To change the content of the landing page modify: `website/pages/en/index.js`.
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
## Edit the tutorials page
|
| 79 |
+
|
| 80 |
+
To change the content of the tutorials home page modify: `website/pages/tutorials/index.js`.
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
---------------------------------------------------------
|
| 84 |
+
|
| 85 |
+
## Docusaurus docs
|
| 86 |
+
|
| 87 |
+
- [Get Started in 5 Minutes](#get-started-in-5-minutes)
|
| 88 |
+
- [Directory Structure](#directory-structure)
|
| 89 |
+
- [Editing Content](#editing-content)
|
| 90 |
+
- [Adding Content](#adding-content)
|
| 91 |
+
- [Full Documentation](#full-documentation)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
## Directory Structure
|
| 95 |
+
|
| 96 |
+
Your project file structure should look something like this
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
my-docusaurus/
|
| 100 |
+
docs/
|
| 101 |
+
doc-1.md
|
| 102 |
+
doc-2.md
|
| 103 |
+
doc-3.md
|
| 104 |
+
website/
|
| 105 |
+
blog/
|
| 106 |
+
2016-3-11-oldest-post.md
|
| 107 |
+
2017-10-24-newest-post.md
|
| 108 |
+
core/
|
| 109 |
+
node_modules/
|
| 110 |
+
pages/
|
| 111 |
+
static/
|
| 112 |
+
css/
|
| 113 |
+
img/
|
| 114 |
+
package.json
|
| 115 |
+
sidebars.json
|
| 116 |
+
siteConfig.js
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
# Editing Content
|
| 120 |
+
|
| 121 |
+
## Editing an existing docs page
|
| 122 |
+
|
| 123 |
+
Edit docs by navigating to `docs/` and editing the corresponding document:
|
| 124 |
+
|
| 125 |
+
`docs/doc-to-be-edited.md`
|
| 126 |
+
|
| 127 |
+
```markdown
|
| 128 |
+
---
|
| 129 |
+
id: page-needs-edit
|
| 130 |
+
title: This Doc Needs To Be Edited
|
| 131 |
+
---
|
| 132 |
+
|
| 133 |
+
Edit me...
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
For more information about docs, click [here](https://docusaurus.io/docs/en/navigation)
|
| 137 |
+
|
| 138 |
+
## Editing an existing blog post
|
| 139 |
+
|
| 140 |
+
Edit blog posts by navigating to `website/blog` and editing the corresponding post:
|
| 141 |
+
|
| 142 |
+
`website/blog/post-to-be-edited.md`
|
| 143 |
+
|
| 144 |
+
```markdown
|
| 145 |
+
---
|
| 146 |
+
id: post-needs-edit
|
| 147 |
+
title: This Blog Post Needs To Be Edited
|
| 148 |
+
---
|
| 149 |
+
|
| 150 |
+
Edit me...
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
For more information about blog posts, click [here](https://docusaurus.io/docs/en/adding-blog)
|
| 154 |
+
|
| 155 |
+
# Adding Content
|
| 156 |
+
|
| 157 |
+
## Adding a new docs page to an existing sidebar
|
| 158 |
+
|
| 159 |
+
1. Create the doc as a new markdown file in `/docs`, example `docs/newly-created-doc.md`:
|
| 160 |
+
|
| 161 |
+
```md
|
| 162 |
+
---
|
| 163 |
+
id: newly-created-doc
|
| 164 |
+
title: This Doc Needs To Be Edited
|
| 165 |
+
---
|
| 166 |
+
|
| 167 |
+
My new content here..
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
1. Refer to that doc's ID in an existing sidebar in `website/sidebars.json`:
|
| 171 |
+
|
| 172 |
+
```javascript
|
| 173 |
+
// Add newly-created-doc to the Getting Started category of docs
|
| 174 |
+
{
|
| 175 |
+
"docs": {
|
| 176 |
+
"Getting Started": [
|
| 177 |
+
"quick-start",
|
| 178 |
+
"newly-created-doc" // new doc here
|
| 179 |
+
],
|
| 180 |
+
...
|
| 181 |
+
},
|
| 182 |
+
...
|
| 183 |
+
}
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
For more information about adding new docs, click [here](https://docusaurus.io/docs/en/navigation)
|
| 187 |
+
|
| 188 |
+
## Adding a new blog post
|
| 189 |
+
|
| 190 |
+
1. Make sure there is a header link to your blog in `website/siteConfig.js`:
|
| 191 |
+
|
| 192 |
+
`website/siteConfig.js`
|
| 193 |
+
|
| 194 |
+
```javascript
|
| 195 |
+
headerLinks: [
|
| 196 |
+
...
|
| 197 |
+
{ blog: true, label: 'Blog' },
|
| 198 |
+
...
|
| 199 |
+
]
|
| 200 |
+
```
|
| 201 |
+
|
| 202 |
+
2. Create the blog post with the format `YYYY-MM-DD-My-Blog-Post-Title.md` in `website/blog`:
|
| 203 |
+
|
| 204 |
+
`website/blog/2018-05-21-New-Blog-Post.md`
|
| 205 |
+
|
| 206 |
+
```markdown
|
| 207 |
+
---
|
| 208 |
+
author: Frank Li
|
| 209 |
+
authorURL: https://twitter.com/foobarbaz
|
| 210 |
+
authorFBID: 503283835
|
| 211 |
+
title: New Blog Post
|
| 212 |
+
---
|
| 213 |
+
|
| 214 |
+
Lorem Ipsum...
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
For more information about blog posts, click [here](https://docusaurus.io/docs/en/adding-blog)
|
| 218 |
+
|
| 219 |
+
## Adding items to your site's top navigation bar
|
| 220 |
+
|
| 221 |
+
1. Add links to docs, custom pages or external links by editing the headerLinks field of `website/siteConfig.js`:
|
| 222 |
+
|
| 223 |
+
`website/siteConfig.js`
|
| 224 |
+
|
| 225 |
+
```javascript
|
| 226 |
+
{
|
| 227 |
+
headerLinks: [
|
| 228 |
+
...
|
| 229 |
+
/* you can add docs */
|
| 230 |
+
{ doc: 'my-examples', label: 'Examples' },
|
| 231 |
+
/* you can add custom pages */
|
| 232 |
+
{ page: 'help', label: 'Help' },
|
| 233 |
+
/* you can add external links */
|
| 234 |
+
{ href: 'https://github.com/facebook/docusaurus', label: 'GitHub' },
|
| 235 |
+
...
|
| 236 |
+
],
|
| 237 |
+
...
|
| 238 |
+
}
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
For more information about the navigation bar, click [here](https://docusaurus.io/docs/en/navigation)
|
| 242 |
+
|
| 243 |
+
## Adding custom pages
|
| 244 |
+
|
| 245 |
+
1. Docusaurus uses React components to build pages. The components are saved as .js files in `website/pages/en`:
|
| 246 |
+
1. If you want your page to show up in your navigation header, you will need to update `website/siteConfig.js` to add to the `headerLinks` element:
|
| 247 |
+
|
| 248 |
+
`website/siteConfig.js`
|
| 249 |
+
|
| 250 |
+
```javascript
|
| 251 |
+
{
|
| 252 |
+
headerLinks: [
|
| 253 |
+
...
|
| 254 |
+
{ page: 'my-new-custom-page', label: 'My New Custom Page' },
|
| 255 |
+
...
|
| 256 |
+
],
|
| 257 |
+
...
|
| 258 |
+
}
|
| 259 |
+
```
|
| 260 |
+
|
| 261 |
+
For more information about custom pages, click [here](https://docusaurus.io/docs/en/custom-pages).
|
| 262 |
+
|
| 263 |
+
# Full Documentation
|
| 264 |
+
|
| 265 |
+
Full documentation can be found on the [website](https://docusaurus.io/).
|
third_party/AnyBimanual/third_party/pytorch3d/website/core/Footer.js
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
const PropTypes = require("prop-types");
|
| 10 |
+
const React = require('react');
|
| 11 |
+
|
| 12 |
+
function SocialFooter(props) {
|
| 13 |
+
const repoUrl = `https://github.com/${props.config.organizationName}/${props.config.projectName}`;
|
| 14 |
+
return (
|
| 15 |
+
<div className="footerSection">
|
| 16 |
+
<div className="social">
|
| 17 |
+
<a
|
| 18 |
+
className="github-button" // part of the https://buttons.github.io/buttons.js script in siteConfig.js
|
| 19 |
+
href={repoUrl}
|
| 20 |
+
data-count-href={`${repoUrl}/stargazers`}
|
| 21 |
+
data-show-count="true"
|
| 22 |
+
data-count-aria-label="# stargazers on GitHub"
|
| 23 |
+
aria-label="Star PyTorch3D on GitHub"
|
| 24 |
+
>
|
| 25 |
+
{props.config.projectName}
|
| 26 |
+
</a>
|
| 27 |
+
</div>
|
| 28 |
+
</div>
|
| 29 |
+
);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
SocialFooter.propTypes = {
|
| 33 |
+
config: PropTypes.object
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
class Footer extends React.Component {
|
| 37 |
+
docUrl(doc, language) {
|
| 38 |
+
const baseUrl = this.props.config.baseUrl;
|
| 39 |
+
const docsUrl = this.props.config.docsUrl;
|
| 40 |
+
const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`;
|
| 41 |
+
const langPart = `${language ? `${language}/` : ''}`;
|
| 42 |
+
return `${baseUrl}${docsPart}${langPart}${doc}`;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
pageUrl(doc, language) {
|
| 46 |
+
const baseUrl = this.props.config.baseUrl;
|
| 47 |
+
return baseUrl + (language ? `${language}/` : '') + doc;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
render() {
|
| 51 |
+
const repoUrl = `https://github.com/${this.props.config.organizationName}/${this.props.config.projectName}`;
|
| 52 |
+
return (
|
| 53 |
+
<footer className="nav-footer" id="footer">
|
| 54 |
+
<section className="sitemap">
|
| 55 |
+
<SocialFooter config={this.props.config} />
|
| 56 |
+
</section>
|
| 57 |
+
|
| 58 |
+
<a
|
| 59 |
+
href="https://opensource.facebook.com/"
|
| 60 |
+
target="_blank"
|
| 61 |
+
rel="noreferrer noopener"
|
| 62 |
+
className="fbOpenSource">
|
| 63 |
+
<img
|
| 64 |
+
src={`${this.props.config.baseUrl}img/oss_logo.png`}
|
| 65 |
+
alt="Facebook Open Source"
|
| 66 |
+
width="170"
|
| 67 |
+
height="45"
|
| 68 |
+
/>
|
| 69 |
+
</a>
|
| 70 |
+
<section className="copyright">{this.props.config.copyright}
|
| 71 |
+
<br/>
|
| 72 |
+
Legal:
|
| 73 |
+
<a
|
| 74 |
+
href="https://opensource.facebook.com/legal/privacy/"
|
| 75 |
+
target="_blank"
|
| 76 |
+
rel="noreferrer noopener">
|
| 77 |
+
Privacy
|
| 78 |
+
</a>
|
| 79 |
+
<a
|
| 80 |
+
href="https://opensource.facebook.com/legal/terms/"
|
| 81 |
+
target="_blank"
|
| 82 |
+
rel="noreferrer noopener">
|
| 83 |
+
Terms
|
| 84 |
+
</a>
|
| 85 |
+
</section>
|
| 86 |
+
</footer>
|
| 87 |
+
);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
module.exports = Footer;
|
third_party/AnyBimanual/third_party/pytorch3d/website/core/Tutorial.js
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*
|
| 8 |
+
* @format
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
const React = require('react');
|
| 12 |
+
|
| 13 |
+
const fs = require('fs-extra');
|
| 14 |
+
const path = require('path');
|
| 15 |
+
const CWD = process.cwd();
|
| 16 |
+
|
| 17 |
+
const CompLibrary = require(`${CWD}/node_modules/docusaurus/lib/core/CompLibrary.js`);
|
| 18 |
+
const Container = CompLibrary.Container;
|
| 19 |
+
|
| 20 |
+
const TutorialSidebar = require(`${CWD}/core/TutorialSidebar.js`);
|
| 21 |
+
|
| 22 |
+
function renderDownloadIcon() {
|
| 23 |
+
return (
|
| 24 |
+
<svg
|
| 25 |
+
aria-hidden="true"
|
| 26 |
+
focusable="false"
|
| 27 |
+
data-prefix="fas"
|
| 28 |
+
data-icon="file-download"
|
| 29 |
+
className="svg-inline--fa fa-file-download fa-w-12"
|
| 30 |
+
role="img"
|
| 31 |
+
xmlns="http://www.w3.org/2000/svg"
|
| 32 |
+
viewBox="0 0 384 512">
|
| 33 |
+
<path
|
| 34 |
+
fill="currentColor"
|
| 35 |
+
d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"
|
| 36 |
+
/>
|
| 37 |
+
</svg>
|
| 38 |
+
);
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
class Tutorial extends React.Component {
|
| 42 |
+
render() {
|
| 43 |
+
const {baseUrl, tutorialID} = this.props;
|
| 44 |
+
|
| 45 |
+
const htmlFile = `${CWD}/_tutorials/${tutorialID}.html`;
|
| 46 |
+
const normalizedHtmlFile = path.normalize(htmlFile);
|
| 47 |
+
|
| 48 |
+
return (
|
| 49 |
+
<div className="docMainWrapper wrapper">
|
| 50 |
+
<TutorialSidebar currentTutorialID={tutorialID} />
|
| 51 |
+
<Container className="mainContainer">
|
| 52 |
+
<div className="tutorialButtonsWrapper">
|
| 53 |
+
<div className="tutorialButtonWrapper buttonWrapper">
|
| 54 |
+
<a
|
| 55 |
+
className="tutorialButton button"
|
| 56 |
+
download
|
| 57 |
+
href={`https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/${tutorialID}.ipynb`}
|
| 58 |
+
target="_blank">
|
| 59 |
+
<img
|
| 60 |
+
className="colabButton"
|
| 61 |
+
align="left"
|
| 62 |
+
src={`${baseUrl}img/colab_icon.png`}
|
| 63 |
+
/>
|
| 64 |
+
{'Run in Google Colab'}
|
| 65 |
+
</a>
|
| 66 |
+
</div>
|
| 67 |
+
<div className="tutorialButtonWrapper buttonWrapper">
|
| 68 |
+
<a
|
| 69 |
+
className="tutorialButton button"
|
| 70 |
+
download
|
| 71 |
+
href={`${baseUrl}files/${tutorialID}.ipynb`}
|
| 72 |
+
target="_blank">
|
| 73 |
+
{renderDownloadIcon()}
|
| 74 |
+
{'Download Tutorial Jupyter Notebook'}
|
| 75 |
+
</a>
|
| 76 |
+
</div>
|
| 77 |
+
<div className="tutorialButtonWrapper buttonWrapper">
|
| 78 |
+
<a
|
| 79 |
+
className="tutorialButton button"
|
| 80 |
+
download
|
| 81 |
+
href={`${baseUrl}files/${tutorialID}.py`}
|
| 82 |
+
target="_blank">
|
| 83 |
+
{renderDownloadIcon()}
|
| 84 |
+
{'Download Tutorial Source Code'}
|
| 85 |
+
</a>
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
<div
|
| 89 |
+
className="tutorialBody"
|
| 90 |
+
dangerouslySetInnerHTML={{
|
| 91 |
+
__html: fs.readFileSync(normalizedHtmlFile, {encoding: 'utf8'}),
|
| 92 |
+
}}
|
| 93 |
+
/>
|
| 94 |
+
</Container>
|
| 95 |
+
</div>
|
| 96 |
+
);
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
module.exports = Tutorial;
|
third_party/AnyBimanual/third_party/pytorch3d/website/core/TutorialSidebar.js
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*
|
| 8 |
+
* @format
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
const React = require('react');
|
| 12 |
+
const fs = require('fs-extra');
|
| 13 |
+
const path = require('path');
|
| 14 |
+
const join = path.join;
|
| 15 |
+
const CWD = process.cwd();
|
| 16 |
+
|
| 17 |
+
const CompLibrary = require(join(
|
| 18 |
+
CWD,
|
| 19 |
+
'/node_modules/docusaurus/lib/core/CompLibrary.js',
|
| 20 |
+
));
|
| 21 |
+
const SideNav = require(join(
|
| 22 |
+
CWD,
|
| 23 |
+
'/node_modules/docusaurus/lib/core/nav/SideNav.js',
|
| 24 |
+
));
|
| 25 |
+
|
| 26 |
+
const Container = CompLibrary.Container;
|
| 27 |
+
|
| 28 |
+
const OVERVIEW_ID = 'tutorial_overview';
|
| 29 |
+
|
| 30 |
+
class TutorialSidebar extends React.Component {
|
| 31 |
+
render() {
|
| 32 |
+
const {currentTutorialID} = this.props;
|
| 33 |
+
const current = {
|
| 34 |
+
id: currentTutorialID || OVERVIEW_ID,
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
const toc = [
|
| 38 |
+
{
|
| 39 |
+
type: 'CATEGORY',
|
| 40 |
+
title: 'Tutorials',
|
| 41 |
+
children: [
|
| 42 |
+
{
|
| 43 |
+
type: 'LINK',
|
| 44 |
+
item: {
|
| 45 |
+
permalink: 'tutorials/',
|
| 46 |
+
id: OVERVIEW_ID,
|
| 47 |
+
title: 'Overview',
|
| 48 |
+
},
|
| 49 |
+
},
|
| 50 |
+
],
|
| 51 |
+
},
|
| 52 |
+
];
|
| 53 |
+
|
| 54 |
+
const jsonFile = join(CWD, 'tutorials.json');
|
| 55 |
+
const normJsonFile = path.normalize(jsonFile);
|
| 56 |
+
const json = JSON.parse(fs.readFileSync(normJsonFile, {encoding: 'utf8'}));
|
| 57 |
+
|
| 58 |
+
Object.keys(json).forEach(category => {
|
| 59 |
+
const categoryItems = json[category];
|
| 60 |
+
const items = [];
|
| 61 |
+
categoryItems.map(item => {
|
| 62 |
+
items.push({
|
| 63 |
+
type: 'LINK',
|
| 64 |
+
item: {
|
| 65 |
+
permalink: `tutorials/${item.id}`,
|
| 66 |
+
id: item.id,
|
| 67 |
+
title: item.title,
|
| 68 |
+
},
|
| 69 |
+
});
|
| 70 |
+
});
|
| 71 |
+
|
| 72 |
+
toc.push({
|
| 73 |
+
type: 'CATEGORY',
|
| 74 |
+
title: category,
|
| 75 |
+
children: items,
|
| 76 |
+
});
|
| 77 |
+
});
|
| 78 |
+
|
| 79 |
+
return (
|
| 80 |
+
<Container className="docsNavContainer" id="docsNav" wrapper={false}>
|
| 81 |
+
<SideNav
|
| 82 |
+
language={'tutorials'}
|
| 83 |
+
root={'tutorials'}
|
| 84 |
+
title="Tutorials"
|
| 85 |
+
contents={toc}
|
| 86 |
+
current={current}
|
| 87 |
+
/>
|
| 88 |
+
</Container>
|
| 89 |
+
);
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
module.exports = TutorialSidebar;
|
third_party/AnyBimanual/third_party/pytorch3d/website/package.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"scripts": {
|
| 3 |
+
"examples": "docusaurus-examples",
|
| 4 |
+
"start": "docusaurus-start",
|
| 5 |
+
"build": "docusaurus-build",
|
| 6 |
+
"publish-gh-pages": "docusaurus-publish",
|
| 7 |
+
"write-translations": "docusaurus-write-translations",
|
| 8 |
+
"version": "docusaurus-version",
|
| 9 |
+
"rename-version": "docusaurus-rename-version"
|
| 10 |
+
},
|
| 11 |
+
"devDependencies": {
|
| 12 |
+
"docusaurus": "^1.14.4"
|
| 13 |
+
}
|
| 14 |
+
}
|
third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/help.js
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
const React = require('react');
|
| 10 |
+
|
| 11 |
+
const CompLibrary = require('../../core/CompLibrary.js');
|
| 12 |
+
|
| 13 |
+
const Container = CompLibrary.Container;
|
| 14 |
+
const GridBlock = CompLibrary.GridBlock;
|
| 15 |
+
|
| 16 |
+
function Help(props) {
|
| 17 |
+
const {config: siteConfig, language = ''} = props;
|
| 18 |
+
const {baseUrl, docsUrl} = siteConfig;
|
| 19 |
+
const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`;
|
| 20 |
+
const langPart = `${language ? `${language}/` : ''}`;
|
| 21 |
+
const docUrl = doc => `${baseUrl}${docsPart}${langPart}${doc}`;
|
| 22 |
+
|
| 23 |
+
const supportLinks = [
|
| 24 |
+
{
|
| 25 |
+
content: `Learn more using the [documentation on this site.](${docUrl(
|
| 26 |
+
'doc1.html',
|
| 27 |
+
)})`,
|
| 28 |
+
title: 'Browse Docs',
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
content: 'Ask questions about the documentation and project',
|
| 32 |
+
title: 'Join the community',
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
content: "Find out what's new with this project",
|
| 36 |
+
title: 'Stay up to date',
|
| 37 |
+
},
|
| 38 |
+
];
|
| 39 |
+
|
| 40 |
+
return (
|
| 41 |
+
<div className="docMainWrapper wrapper">
|
| 42 |
+
<Container className="mainContainer documentContainer postContainer">
|
| 43 |
+
<div className="post">
|
| 44 |
+
<header className="postHeader">
|
| 45 |
+
<h1>Need help?</h1>
|
| 46 |
+
</header>
|
| 47 |
+
<p>This project is maintained by a dedicated group of people.</p>
|
| 48 |
+
<GridBlock contents={supportLinks} layout="threeColumn" />
|
| 49 |
+
</div>
|
| 50 |
+
</Container>
|
| 51 |
+
</div>
|
| 52 |
+
);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
module.exports = Help;
|
third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/index.js
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
const React = require('react');
|
| 10 |
+
|
| 11 |
+
const CompLibrary = require('../../core/CompLibrary.js');
|
| 12 |
+
|
| 13 |
+
const MarkdownBlock = CompLibrary.MarkdownBlock; /* Used to read markdown */
|
| 14 |
+
const Container = CompLibrary.Container;
|
| 15 |
+
const GridBlock = CompLibrary.GridBlock;
|
| 16 |
+
const bash = (...args) => `~~~bash\n${String.raw(...args)}\n~~~`;
|
| 17 |
+
class HomeSplash extends React.Component {
|
| 18 |
+
render() {
|
| 19 |
+
const {siteConfig, language = ''} = this.props;
|
| 20 |
+
const {baseUrl, docsUrl} = siteConfig;
|
| 21 |
+
const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`;
|
| 22 |
+
const langPart = `${language ? `${language}/` : ''}`;
|
| 23 |
+
const docUrl = doc => `${baseUrl}${docsPart}${langPart}${doc}`;
|
| 24 |
+
|
| 25 |
+
const SplashContainer = props => (
|
| 26 |
+
<div className="homeContainer">
|
| 27 |
+
<div className="homeSplashFade">
|
| 28 |
+
<div className="wrapper homeWrapper">{props.children}</div>
|
| 29 |
+
</div>
|
| 30 |
+
</div>
|
| 31 |
+
);
|
| 32 |
+
|
| 33 |
+
const Logo = props => (
|
| 34 |
+
<div className="splashLogo">
|
| 35 |
+
<img src={props.img_src} alt="Project Logo" />
|
| 36 |
+
</div>
|
| 37 |
+
);
|
| 38 |
+
|
| 39 |
+
const ProjectTitle = props => (
|
| 40 |
+
<h2 className="projectTitle">
|
| 41 |
+
<small>{props.tagline}</small>
|
| 42 |
+
</h2>
|
| 43 |
+
);
|
| 44 |
+
|
| 45 |
+
const PromoSection = props => (
|
| 46 |
+
<div className="section promoSection">
|
| 47 |
+
<div className="promoRow">
|
| 48 |
+
<div className="pluginRowBlock">{props.children}</div>
|
| 49 |
+
</div>
|
| 50 |
+
</div>
|
| 51 |
+
);
|
| 52 |
+
|
| 53 |
+
const Button = props => (
|
| 54 |
+
<div className="pluginWrapper buttonWrapper">
|
| 55 |
+
<a className="button" href={props.href} target={props.target}>
|
| 56 |
+
{props.children}
|
| 57 |
+
</a>
|
| 58 |
+
</div>
|
| 59 |
+
);
|
| 60 |
+
|
| 61 |
+
return (
|
| 62 |
+
<SplashContainer>
|
| 63 |
+
<Logo img_src={baseUrl + 'img/pytorch3dlogowhite.svg'} />
|
| 64 |
+
<div className="inner">
|
| 65 |
+
<ProjectTitle tagline={siteConfig.tagline} title={siteConfig.title} />
|
| 66 |
+
<PromoSection>
|
| 67 |
+
<Button href={docUrl('why_pytorch3d.html')}>Docs</Button>
|
| 68 |
+
<Button href={`${baseUrl}tutorials/`}>Tutorials</Button>
|
| 69 |
+
<Button href={'#quickstart'}>Get Started</Button>
|
| 70 |
+
</PromoSection>
|
| 71 |
+
</div>
|
| 72 |
+
</SplashContainer>
|
| 73 |
+
);
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
function SocialBanner() {
|
| 78 |
+
return (
|
| 79 |
+
<div className="socialBanner">
|
| 80 |
+
<div>
|
| 81 |
+
Support Ukraine 🇺🇦{' '}
|
| 82 |
+
<a href="https://opensource.fb.com/support-ukraine">
|
| 83 |
+
Help Provide Humanitarian Aid to Ukraine
|
| 84 |
+
</a>
|
| 85 |
+
.
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
class Index extends React.Component {
|
| 92 |
+
render() {
|
| 93 |
+
const {config: siteConfig, language = ''} = this.props;
|
| 94 |
+
const {baseUrl} = siteConfig;
|
| 95 |
+
|
| 96 |
+
const Block = props => (
|
| 97 |
+
<Container
|
| 98 |
+
padding={['bottom', 'top']}
|
| 99 |
+
id={props.id}
|
| 100 |
+
background={props.background}>
|
| 101 |
+
<GridBlock
|
| 102 |
+
align="center"
|
| 103 |
+
contents={props.children}
|
| 104 |
+
layout={props.layout}
|
| 105 |
+
/>
|
| 106 |
+
</Container>
|
| 107 |
+
);
|
| 108 |
+
|
| 109 |
+
const Description = () => (
|
| 110 |
+
<Block background="light">
|
| 111 |
+
{[
|
| 112 |
+
{
|
| 113 |
+
content:
|
| 114 |
+
'This is another description of how this project is useful',
|
| 115 |
+
image: `${baseUrl}img/docusaurus.svg`,
|
| 116 |
+
imageAlign: 'right',
|
| 117 |
+
title: 'Description',
|
| 118 |
+
},
|
| 119 |
+
]}
|
| 120 |
+
</Block>
|
| 121 |
+
);
|
| 122 |
+
|
| 123 |
+
const pre = '```';
|
| 124 |
+
|
| 125 |
+
const codeExample = `${pre}python
|
| 126 |
+
from pytorch3d.utils import ico_sphere
|
| 127 |
+
from pytorch3d.io import load_obj
|
| 128 |
+
from pytorch3d.structures import Meshes
|
| 129 |
+
from pytorch3d.ops import sample_points_from_meshes
|
| 130 |
+
from pytorch3d.loss import chamfer_distance
|
| 131 |
+
|
| 132 |
+
# Use an ico_sphere mesh and load a mesh from an .obj e.g. model.obj
|
| 133 |
+
sphere_mesh = ico_sphere(level=3)
|
| 134 |
+
verts, faces, _ = load_obj("model.obj")
|
| 135 |
+
test_mesh = Meshes(verts=[verts], faces=[faces.verts_idx])
|
| 136 |
+
|
| 137 |
+
# Differentiably sample 5k points from the surface of each mesh and then compute the loss.
|
| 138 |
+
sample_sphere = sample_points_from_meshes(sphere_mesh, 5000)
|
| 139 |
+
sample_test = sample_points_from_meshes(test_mesh, 5000)
|
| 140 |
+
loss_chamfer, _ = chamfer_distance(sample_sphere, sample_test)
|
| 141 |
+
`;
|
| 142 |
+
|
| 143 |
+
const QuickStart = () => (
|
| 144 |
+
<div
|
| 145 |
+
className="productShowcaseSection"
|
| 146 |
+
id="quickstart"
|
| 147 |
+
style={{textAlign: 'center'}}>
|
| 148 |
+
<h2>Get Started</h2>
|
| 149 |
+
<Container>
|
| 150 |
+
<ol>
|
| 151 |
+
<li>
|
| 152 |
+
<strong>Install PyTorch3D </strong> (following the instructions <a href="https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md">here</a>)
|
| 153 |
+
</li>
|
| 154 |
+
<li>
|
| 155 |
+
<strong>Try a few 3D operators </strong>
|
| 156 |
+
e.g. compute the chamfer loss between two meshes:
|
| 157 |
+
<MarkdownBlock>{codeExample}</MarkdownBlock>
|
| 158 |
+
</li>
|
| 159 |
+
</ol>
|
| 160 |
+
</Container>
|
| 161 |
+
</div>
|
| 162 |
+
);
|
| 163 |
+
|
| 164 |
+
const Features = () => (
|
| 165 |
+
<div className="productShowcaseSection" style={{textAlign: 'center'}}>
|
| 166 |
+
<Block layout="fourColumn">
|
| 167 |
+
{[
|
| 168 |
+
{
|
| 169 |
+
content:
|
| 170 |
+
'Supports batching of 3D inputs of different sizes ' +
|
| 171 |
+
'such as meshes' ,
|
| 172 |
+
image: `${baseUrl}img/batching.svg`,
|
| 173 |
+
imageAlign: 'top',
|
| 174 |
+
title: 'Heterogeneous Batching',
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
content:
|
| 178 |
+
'Supports optimized implementations of ' +
|
| 179 |
+
'several common functions for 3D data',
|
| 180 |
+
image: `${baseUrl}img/ops.png`,
|
| 181 |
+
imageAlign: 'top',
|
| 182 |
+
title: 'Fast 3D Operators',
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
content:
|
| 186 |
+
'Modular differentiable rendering API ' +
|
| 187 |
+
'with parallel implementations in ' +
|
| 188 |
+
'PyTorch, C++ and CUDA' ,
|
| 189 |
+
image: `${baseUrl}img/rendering.svg`,
|
| 190 |
+
imageAlign: 'top',
|
| 191 |
+
title: 'Differentiable Rendering',
|
| 192 |
+
},
|
| 193 |
+
]}
|
| 194 |
+
</Block>
|
| 195 |
+
</div>
|
| 196 |
+
);
|
| 197 |
+
|
| 198 |
+
const Showcase = () => {
|
| 199 |
+
if ((siteConfig.users || []).length === 0) {
|
| 200 |
+
return null;
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
const showcase = siteConfig.users
|
| 204 |
+
.filter(user => user.pinned)
|
| 205 |
+
.map(user => (
|
| 206 |
+
<a href={user.infoLink} key={user.infoLink}>
|
| 207 |
+
<img src={user.image} alt={user.caption} title={user.caption} />
|
| 208 |
+
</a>
|
| 209 |
+
));
|
| 210 |
+
|
| 211 |
+
const pageUrl = page => baseUrl + (language ? `${language}/` : '') + page;
|
| 212 |
+
|
| 213 |
+
return (
|
| 214 |
+
<div className="productShowcaseSection paddingBottom">
|
| 215 |
+
<h2>Who is Using This?</h2>
|
| 216 |
+
<p>This project is used by all these people</p>
|
| 217 |
+
<div className="logos">{showcase}</div>
|
| 218 |
+
<div className="more-users">
|
| 219 |
+
<a className="button" href={pageUrl('users.html')}>
|
| 220 |
+
More {siteConfig.title} Users
|
| 221 |
+
</a>
|
| 222 |
+
</div>
|
| 223 |
+
</div>
|
| 224 |
+
);
|
| 225 |
+
};
|
| 226 |
+
|
| 227 |
+
return (
|
| 228 |
+
<div>
|
| 229 |
+
<SocialBanner />
|
| 230 |
+
<HomeSplash siteConfig={siteConfig} language={language} />
|
| 231 |
+
<div className="landingPage mainContainer">
|
| 232 |
+
<Features />
|
| 233 |
+
<QuickStart />
|
| 234 |
+
</div>
|
| 235 |
+
</div>
|
| 236 |
+
);
|
| 237 |
+
}
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
module.exports = Index;
|
third_party/AnyBimanual/third_party/pytorch3d/website/pages/en/users.js
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
const React = require('react');
|
| 10 |
+
|
| 11 |
+
const CompLibrary = require('../../core/CompLibrary.js');
|
| 12 |
+
|
| 13 |
+
const Container = CompLibrary.Container;
|
| 14 |
+
|
| 15 |
+
class Users extends React.Component {
|
| 16 |
+
render() {
|
| 17 |
+
const {config: siteConfig} = this.props;
|
| 18 |
+
if ((siteConfig.users || []).length === 0) {
|
| 19 |
+
return null;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
const editUrl = `${siteConfig.repoUrl}/edit/main/website/siteConfig.js`;
|
| 23 |
+
const showcase = siteConfig.users.map(user => (
|
| 24 |
+
<a href={user.infoLink} key={user.infoLink}>
|
| 25 |
+
<img src={user.image} alt={user.caption} title={user.caption} />
|
| 26 |
+
</a>
|
| 27 |
+
));
|
| 28 |
+
|
| 29 |
+
return (
|
| 30 |
+
<div className="mainContainer">
|
| 31 |
+
<Container padding={['bottom', 'top']}>
|
| 32 |
+
<div className="showcaseSection">
|
| 33 |
+
<div className="prose">
|
| 34 |
+
<h1>Who is Using This?</h1>
|
| 35 |
+
<p>This project is used by many folks</p>
|
| 36 |
+
</div>
|
| 37 |
+
<div className="logos">{showcase}</div>
|
| 38 |
+
<p>Are you using this project?</p>
|
| 39 |
+
<a href={editUrl} className="button">
|
| 40 |
+
Add your company
|
| 41 |
+
</a>
|
| 42 |
+
</div>
|
| 43 |
+
</Container>
|
| 44 |
+
</div>
|
| 45 |
+
);
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
module.exports = Users;
|
third_party/AnyBimanual/third_party/pytorch3d/website/pages/tutorials/index.js
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*
|
| 8 |
+
* @format
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
const React = require('react');
|
| 12 |
+
|
| 13 |
+
const CWD = process.cwd();
|
| 14 |
+
|
| 15 |
+
const CompLibrary = require(`${CWD}/node_modules/docusaurus/lib/core/CompLibrary.js`);
|
| 16 |
+
const Container = CompLibrary.Container;
|
| 17 |
+
const MarkdownBlock = CompLibrary.MarkdownBlock;
|
| 18 |
+
|
| 19 |
+
const TutorialSidebar = require(`${CWD}/core/TutorialSidebar.js`);
|
| 20 |
+
const bash = (...args) => `~~~bash\n${String.raw(...args)}\n~~~`;
|
| 21 |
+
|
| 22 |
+
class TutorialHome extends React.Component {
|
| 23 |
+
render() {
|
| 24 |
+
return (
|
| 25 |
+
<div className="docMainWrapper wrapper">
|
| 26 |
+
<TutorialSidebar currentTutorialID={null} />
|
| 27 |
+
<Container className="mainContainer documentContainer postContainer">
|
| 28 |
+
<div className="post">
|
| 29 |
+
<header className="postHeader">
|
| 30 |
+
<h1 className="postHeaderTitle">
|
| 31 |
+
Welcome to the PyTorch3D Tutorials
|
| 32 |
+
</h1>
|
| 33 |
+
</header>
|
| 34 |
+
<p>
|
| 35 |
+
Here you can learn about the structure and applications of
|
| 36 |
+
PyTorch3D from examples which are in the form of ipython
|
| 37 |
+
notebooks.
|
| 38 |
+
</p>
|
| 39 |
+
<h3> Run interactively </h3>
|
| 40 |
+
<p>
|
| 41 |
+
At the top of each example you can find a button named{' '}
|
| 42 |
+
<strong>"Run in Google Colab"</strong> which will open the
|
| 43 |
+
notebook in{' '}
|
| 44 |
+
<a href="https://colab.research.google.com/notebooks/intro.ipynb">
|
| 45 |
+
{' '}
|
| 46 |
+
Google Colaboratory{' '}
|
| 47 |
+
</a>{' '}
|
| 48 |
+
where you can run the code directly in the browser with access to
|
| 49 |
+
GPU support - it looks like this:
|
| 50 |
+
</p>
|
| 51 |
+
<div className="tutorialButtonsWrapper">
|
| 52 |
+
<div className="tutorialButtonWrapper buttonWrapper">
|
| 53 |
+
<a className="tutorialButton button" target="_blank">
|
| 54 |
+
<img
|
| 55 |
+
className="colabButton"
|
| 56 |
+
align="left"
|
| 57 |
+
src="/img/colab_icon.png"
|
| 58 |
+
/>
|
| 59 |
+
{'Run in Google Colab'}
|
| 60 |
+
</a>
|
| 61 |
+
</div>
|
| 62 |
+
</div>
|
| 63 |
+
<p>
|
| 64 |
+
{' '}
|
| 65 |
+
You can modify the code and experiment with varying different
|
| 66 |
+
settings. Remember to install the latest stable version of
|
| 67 |
+
PyTorch3D and its dependencies. Code to do this with pip is
|
| 68 |
+
provided in each notebook.{' '}
|
| 69 |
+
</p>
|
| 70 |
+
<h3> Run locally </h3>
|
| 71 |
+
<p>
|
| 72 |
+
{' '}
|
| 73 |
+
There is also a button to download the notebook and source code to
|
| 74 |
+
run it locally.{' '}
|
| 75 |
+
</p>
|
| 76 |
+
</div>
|
| 77 |
+
</Container>
|
| 78 |
+
</div>
|
| 79 |
+
);
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
module.exports = TutorialHome;
|
third_party/AnyBimanual/third_party/pytorch3d/website/sidebars.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"docs": {
|
| 3 |
+
"Introduction": ["why_pytorch3d"],
|
| 4 |
+
"Data": ["io", "meshes_io", "datasets", "batching"],
|
| 5 |
+
"Ops": ["cubify", "iou3d"],
|
| 6 |
+
"Visualization": ["visualization"],
|
| 7 |
+
"Renderer": ["renderer", "renderer_getting_started", "cameras"]
|
| 8 |
+
}
|
| 9 |
+
}
|
third_party/AnyBimanual/third_party/pytorch3d/website/siteConfig.js
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
// @licenselint-loose-mode
|
| 10 |
+
|
| 11 |
+
// See https://docusaurus.io/docs/site-config for all the possible
|
| 12 |
+
// site configuration options.
|
| 13 |
+
|
| 14 |
+
// List of projects/orgs using your project for the users page.
|
| 15 |
+
const users = [
|
| 16 |
+
{
|
| 17 |
+
caption: 'User1',
|
| 18 |
+
// You will need to prepend the image path with your baseUrl
|
| 19 |
+
// if it is not '/', like: '/test-site/img/image.jpg'.
|
| 20 |
+
image: '/img/undraw_open_source.svg',
|
| 21 |
+
infoLink: 'https://www.facebook.com',
|
| 22 |
+
pinned: true,
|
| 23 |
+
},
|
| 24 |
+
];
|
| 25 |
+
|
| 26 |
+
const baseUrl = '/'
|
| 27 |
+
|
| 28 |
+
const siteConfig = {
|
| 29 |
+
title: 'PyTorch3D', // Title for your website.
|
| 30 |
+
tagline: 'A library for deep learning with 3D data',
|
| 31 |
+
url: 'https://pytorch3d.org', // Your website URL
|
| 32 |
+
baseUrl: baseUrl, // Base URL for your project */
|
| 33 |
+
projectName: 'pytorch3d',
|
| 34 |
+
organizationName: 'facebookresearch',
|
| 35 |
+
customDocsPath: 'docs/notes',
|
| 36 |
+
headerLinks: [
|
| 37 |
+
{doc: 'why_pytorch3d', label: 'Docs'},
|
| 38 |
+
{page: 'tutorials', label: 'Tutorials'},
|
| 39 |
+
{href: "https://pytorch3d.readthedocs.io/", label: 'API'},
|
| 40 |
+
{href: "https://github.com/facebookresearch/pytorch3d", label: 'GitHub'},
|
| 41 |
+
],
|
| 42 |
+
|
| 43 |
+
// If you have users set above, you add it here:
|
| 44 |
+
users,
|
| 45 |
+
|
| 46 |
+
/* path to images for header/footer */
|
| 47 |
+
headerIcon: 'img/pytorch3dfavicon.png',
|
| 48 |
+
footerIcon: 'img/pytorch3dfavicon.png',
|
| 49 |
+
favicon: 'img/pytorch3dfavicon.png',
|
| 50 |
+
|
| 51 |
+
/* Colors for website */
|
| 52 |
+
colors: {
|
| 53 |
+
primaryColor: '#812CE5',
|
| 54 |
+
secondaryColor: '#FFAF00',
|
| 55 |
+
},
|
| 56 |
+
|
| 57 |
+
// This copyright info is used in /core/Footer.js and blog RSS/Atom feeds.
|
| 58 |
+
copyright: `Copyright \u{00A9} ${new Date().getFullYear()} Meta Platforms, Inc`,
|
| 59 |
+
|
| 60 |
+
highlight: {
|
| 61 |
+
// Highlight.js theme to use for syntax highlighting in code blocks.
|
| 62 |
+
theme: 'default',
|
| 63 |
+
},
|
| 64 |
+
|
| 65 |
+
// Add custom scripts here that would be placed in <script> tags.
|
| 66 |
+
scripts: ['https://buttons.github.io/buttons.js'],
|
| 67 |
+
|
| 68 |
+
// On page navigation for the current documentation page.
|
| 69 |
+
onPageNav: 'separate',
|
| 70 |
+
// No .html extensions for paths.
|
| 71 |
+
cleanUrl: true,
|
| 72 |
+
|
| 73 |
+
// Open Graph and Twitter card images.
|
| 74 |
+
ogImage: 'img/pytorch3dlogoicon.svg',
|
| 75 |
+
twitterImage: 'img/pytorch3dlogoicon.svg',
|
| 76 |
+
|
| 77 |
+
// Google analytics
|
| 78 |
+
gaTrackingId: 'UA-157376881-1',
|
| 79 |
+
|
| 80 |
+
// For sites with a sizable amount of content, set collapsible to true.
|
| 81 |
+
// Expand/collapse the links and subcategories under categories.
|
| 82 |
+
// docsSideNavCollapsible: true,
|
| 83 |
+
|
| 84 |
+
// Show documentation's last contributor's name.
|
| 85 |
+
enableUpdateBy: true,
|
| 86 |
+
|
| 87 |
+
// Show documentation's last update time.
|
| 88 |
+
// enableUpdateTime: true,
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
module.exports = siteConfig;
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/css/custom.css
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
html body {
|
| 10 |
+
font-family: 'Montserrat', sans-serif;
|
| 11 |
+
overflow-x: hidden;
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
.fixedHeaderContainer {
|
| 15 |
+
background-color: #222222;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
.fixedHeaderContainer header .headerTitleWithLogo {
|
| 20 |
+
display: block;
|
| 21 |
+
color: #ffffff;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
.fixedHeaderContainer header .logo {
|
| 25 |
+
height: 50px;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
.fixedHeaderContainer header a:nth-child(2) {
|
| 29 |
+
position: absolute;
|
| 30 |
+
right: 0px;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
.fixedHeaderContainer header a:nth-child(2) h3 {
|
| 34 |
+
font-size: 14px;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
.fixedHeaderContainer header a:nth-child(2) h3::before {
|
| 38 |
+
content: 'v: ';
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.navigationSlider {
|
| 42 |
+
margin-right: 80px;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
.navigationSlider .slidingNav ul {
|
| 46 |
+
background: #222222;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
.navigationSlider .slidingNav ul li a {
|
| 50 |
+
color: #c7d4fd;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
.navigationSlider .slidingNav ul li a:hover,
|
| 54 |
+
.navigationSlider .slidingNav ul li a:focus {
|
| 55 |
+
color: #ffffff;
|
| 56 |
+
background-color: inherit;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
.navigationSlider .slidingNav ul li.siteNavItemActive > a,
|
| 60 |
+
.navigationSlider .slidingNav ul li.siteNavGroupActive > a {
|
| 61 |
+
background-color: inherit;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
.homeContainer {
|
| 65 |
+
background: linear-gradient(
|
| 66 |
+
rgba(129, 44, 229, 1) 0%,
|
| 67 |
+
rgba(255, 175, 0, 1) 100%
|
| 68 |
+
);
|
| 69 |
+
padding: 25px 0px;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
.splashLogo {
|
| 73 |
+
display: block;
|
| 74 |
+
margin: 0 auto;
|
| 75 |
+
width: 65%;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
.projectTitle {
|
| 79 |
+
color: #ffffff;
|
| 80 |
+
font-variant: small-caps;
|
| 81 |
+
font-weight: 300;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
.promoSection .button {
|
| 85 |
+
border: 2px solid #fff;
|
| 86 |
+
color: #ffffff;
|
| 87 |
+
font-size: 19px;
|
| 88 |
+
margin: 10px;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
.promoSection .button:hover {
|
| 92 |
+
background: inherit;
|
| 93 |
+
border: 2px solid #ffffff;
|
| 94 |
+
color: #ffffff;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
.landingPage {
|
| 98 |
+
padding: 0px;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
.productShowcaseSection {
|
| 103 |
+
padding: 45px 20px 30px 20px;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
div.productShowcaseSection {
|
| 107 |
+
color: #6c6c6c;
|
| 108 |
+
padding-top: 40px;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
#quickstart {
|
| 112 |
+
padding-top: 80px;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
.productShowcaseSection > h2 {
|
| 116 |
+
font-variant: small-caps;
|
| 117 |
+
font-weight: 360;
|
| 118 |
+
margin: 0px;
|
| 119 |
+
padding: 0px;
|
| 120 |
+
color: #5b1861;
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
.productShowcaseSection p {
|
| 124 |
+
font-weight: 360;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
# Subtitles for key features
|
| 128 |
+
.productShowcaseSection .blockContent > div span p {
|
| 129 |
+
font-size: 18px;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
.productShowcaseSection div.container {
|
| 133 |
+
padding-bottom: 40px;
|
| 134 |
+
padding-top: 10px;
|
| 135 |
+
padding-left: 0px;
|
| 136 |
+
padding-right: 0px;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
.productShowcaseSection img {
|
| 140 |
+
height: 100px;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
.gridBlock .fourByGridBlock img {
|
| 144 |
+
max-width: 200%;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.productShowcaseSection li {
|
| 148 |
+
padding: 10px 0;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
.productShowcaseSection pre {
|
| 152 |
+
margin: 10px 0;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
.productShowcaseSection code {
|
| 156 |
+
background: #fff;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
.container .wrapper .alignCenter h2 {
|
| 160 |
+
color: #222222;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
div#quickstart {
|
| 164 |
+
background: #efefef;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
div#quickstart ol {
|
| 168 |
+
margin-bottom: 0px;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
.nav-footer {
|
| 172 |
+
background-color: #222222;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.nav-footer .sitemap a {
|
| 176 |
+
color: #c7d4fd;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
.nav-footer .sitemap a:hover {
|
| 180 |
+
color: #ffffff;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
.social {
|
| 184 |
+
text-align: center
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
a,
|
| 188 |
+
p a {
|
| 189 |
+
color: #4872f9;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
a:hover,
|
| 193 |
+
p a:hover {
|
| 194 |
+
color: #4872f9;
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
.imageAlignTop .blockImage {
|
| 198 |
+
margin-bottom: 20px;
|
| 199 |
+
max-width: 200px;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
/* Style tutorials */
|
| 203 |
+
.tutorialBody {
|
| 204 |
+
margin-top: -20px;
|
| 205 |
+
color: #6c6c6c;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
.tutorialBody h1 {
|
| 209 |
+
margin: 0px;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
.tutorialBody h1,
|
| 213 |
+
.tutorialBody h2,
|
| 214 |
+
.tutorialBody h3 {
|
| 215 |
+
color: #222222;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
.tutorialBody pre {
|
| 219 |
+
font-family: 'IBM Plex Mono', monospace;
|
| 220 |
+
font-size: 14px;
|
| 221 |
+
margin: 0px;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
.tutorialBody .input_prompt,
|
| 225 |
+
.tutorialBody .output_prompt {
|
| 226 |
+
color: darkred;
|
| 227 |
+
font-size: 12px;
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
.tutorialBody .highlight {
|
| 231 |
+
background: #f3f4f7;
|
| 232 |
+
padding: 10px 20px;
|
| 233 |
+
border: lightgray 1px solid;
|
| 234 |
+
border-radius: 3px;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
.tutorialBody .cell {
|
| 238 |
+
margin: 20px;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
.tutorialBody .output_stderr {
|
| 242 |
+
background-color: #fdede9;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
.tutorialBody .anchor-link {
|
| 246 |
+
color: lightgray;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.tutorialBody iframe {
|
| 250 |
+
width: 100%;
|
| 251 |
+
height: 100vh;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
.tutorialButtonWrapper,
|
| 255 |
+
.tutorialRuntime {
|
| 256 |
+
margin: 20px;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
.tutorialButtonWrapper {
|
| 260 |
+
float: left;
|
| 261 |
+
margin: 5px;
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
.colabButtonWrapper {
|
| 265 |
+
float: left;
|
| 266 |
+
margin: 5px;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.colabButtonWrapper img {
|
| 270 |
+
padding-right: 0.25em;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
.colabButton {
|
| 274 |
+
width: 24px;
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
.tutorialButtonsWrapper {
|
| 278 |
+
display: flex;
|
| 279 |
+
align-items: center;
|
| 280 |
+
padding-bottom: 15px;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
/* .tutorialButton {
|
| 284 |
+
color: #4872f9;
|
| 285 |
+
border: 1px solid #4872f9;
|
| 286 |
+
}
|
| 287 |
+
*/
|
| 288 |
+
.tutorialButton svg {
|
| 289 |
+
height: 15px;
|
| 290 |
+
margin-right: 5px;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
.tutorialButton:hover {
|
| 294 |
+
color: #4872f9;
|
| 295 |
+
background-color: inherit;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
.wrapper {
|
| 299 |
+
max-width: 1400px;
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@media only screen and (min-device-width: 360px) and (max-device-width: 736px) {
|
| 304 |
+
.fixedHeaderContainer header a:nth-child(2) {
|
| 305 |
+
position: absolute;
|
| 306 |
+
right: 150px;
|
| 307 |
+
}
|
| 308 |
+
.promoSection .button {
|
| 309 |
+
font-size: 12px;
|
| 310 |
+
margin: 3px;
|
| 311 |
+
}
|
| 312 |
+
.inner h2 {
|
| 313 |
+
margin-top: 0px;
|
| 314 |
+
}
|
| 315 |
+
.splashLogo {
|
| 316 |
+
width: 90%;
|
| 317 |
+
}
|
| 318 |
+
.headerTitleWithLogo {
|
| 319 |
+
display: block !important;
|
| 320 |
+
}
|
| 321 |
+
.blockContent > div span p {
|
| 322 |
+
margin-bottom: 30px
|
| 323 |
+
}
|
| 324 |
+
.productShowcaseSection div.container {
|
| 325 |
+
padding-top: 0px;
|
| 326 |
+
}
|
| 327 |
+
.productShowcaseSection > h2 {
|
| 328 |
+
padding-bottom: 20px;
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
@media only screen and (max-width: 1023px) {
|
| 333 |
+
.fixedHeaderContainer header a:nth-child(2) {
|
| 334 |
+
position: absolute;
|
| 335 |
+
right: 200px;
|
| 336 |
+
}
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
@media only screen and (min-width: 1024px) {
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
@media only screen and (min-width: 1400px) {
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
@media only screen and (min-width: 1500px) {
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
/* Social Banner */
|
| 349 |
+
.socialBanner {
|
| 350 |
+
font-weight: bold;
|
| 351 |
+
font-size: 20px;
|
| 352 |
+
padding: 20px;
|
| 353 |
+
max-width: 768px;
|
| 354 |
+
margin: 0 auto;
|
| 355 |
+
text-align: center;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
.socialBanner a {
|
| 359 |
+
text-decoration: underline;
|
| 360 |
+
}
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/css/pygments.css
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
.highlight .hll {
|
| 10 |
+
background-color: #ffffcc;
|
| 11 |
+
}
|
| 12 |
+
.highlight .c {
|
| 13 |
+
color: #60a0b0;
|
| 14 |
+
font-style: italic;
|
| 15 |
+
} /* Comment */
|
| 16 |
+
.highlight .err {
|
| 17 |
+
border: 1px solid #ff0000;
|
| 18 |
+
} /* Error */
|
| 19 |
+
.highlight .k {
|
| 20 |
+
color: #007020;
|
| 21 |
+
font-weight: bold;
|
| 22 |
+
} /* Keyword */
|
| 23 |
+
.highlight .o {
|
| 24 |
+
color: #666666;
|
| 25 |
+
} /* Operator */
|
| 26 |
+
.highlight .cm {
|
| 27 |
+
color: #60a0b0;
|
| 28 |
+
font-style: italic;
|
| 29 |
+
} /* Comment.Multiline */
|
| 30 |
+
.highlight .cp {
|
| 31 |
+
color: #007020;
|
| 32 |
+
} /* Comment.Preproc */
|
| 33 |
+
.highlight .c1 {
|
| 34 |
+
color: #60a0b0;
|
| 35 |
+
font-style: italic;
|
| 36 |
+
} /* Comment.Single */
|
| 37 |
+
.highlight .cs {
|
| 38 |
+
color: #60a0b0;
|
| 39 |
+
background-color: #fff0f0;
|
| 40 |
+
} /* Comment.Special */
|
| 41 |
+
.highlight .gd {
|
| 42 |
+
color: #a00000;
|
| 43 |
+
} /* Generic.Deleted */
|
| 44 |
+
.highlight .ge {
|
| 45 |
+
font-style: italic;
|
| 46 |
+
} /* Generic.Emph */
|
| 47 |
+
.highlight .gr {
|
| 48 |
+
color: #ff0000;
|
| 49 |
+
} /* Generic.Error */
|
| 50 |
+
.highlight .gh {
|
| 51 |
+
color: #000080;
|
| 52 |
+
font-weight: bold;
|
| 53 |
+
} /* Generic.Heading */
|
| 54 |
+
.highlight .gi {
|
| 55 |
+
color: #00a000;
|
| 56 |
+
} /* Generic.Inserted */
|
| 57 |
+
.highlight .go {
|
| 58 |
+
color: #808080;
|
| 59 |
+
} /* Generic.Output */
|
| 60 |
+
.highlight .gp {
|
| 61 |
+
color: #c65d09;
|
| 62 |
+
font-weight: bold;
|
| 63 |
+
} /* Generic.Prompt */
|
| 64 |
+
.highlight .gs {
|
| 65 |
+
font-weight: bold;
|
| 66 |
+
} /* Generic.Strong */
|
| 67 |
+
.highlight .gu {
|
| 68 |
+
color: #800080;
|
| 69 |
+
font-weight: bold;
|
| 70 |
+
} /* Generic.Subheading */
|
| 71 |
+
.highlight .gt {
|
| 72 |
+
color: #0040d0;
|
| 73 |
+
} /* Generic.Traceback */
|
| 74 |
+
.highlight .kc {
|
| 75 |
+
color: #007020;
|
| 76 |
+
font-weight: bold;
|
| 77 |
+
} /* Keyword.Constant */
|
| 78 |
+
.highlight .kd {
|
| 79 |
+
color: #007020;
|
| 80 |
+
font-weight: bold;
|
| 81 |
+
} /* Keyword.Declaration */
|
| 82 |
+
.highlight .kn {
|
| 83 |
+
color: #007020;
|
| 84 |
+
font-weight: bold;
|
| 85 |
+
} /* Keyword.Namespace */
|
| 86 |
+
.highlight .kp {
|
| 87 |
+
color: #007020;
|
| 88 |
+
} /* Keyword.Pseudo */
|
| 89 |
+
.highlight .kr {
|
| 90 |
+
color: #007020;
|
| 91 |
+
font-weight: bold;
|
| 92 |
+
} /* Keyword.Reserved */
|
| 93 |
+
.highlight .kt {
|
| 94 |
+
color: #902000;
|
| 95 |
+
} /* Keyword.Type */
|
| 96 |
+
.highlight .m {
|
| 97 |
+
color: #40a070;
|
| 98 |
+
} /* Literal.Number */
|
| 99 |
+
.highlight .s {
|
| 100 |
+
color: #4070a0;
|
| 101 |
+
} /* Literal.String */
|
| 102 |
+
.highlight .na {
|
| 103 |
+
color: #4070a0;
|
| 104 |
+
} /* Name.Attribute */
|
| 105 |
+
.highlight .nb {
|
| 106 |
+
color: #007020;
|
| 107 |
+
} /* Name.Builtin */
|
| 108 |
+
.highlight .nc {
|
| 109 |
+
color: #0e84b5;
|
| 110 |
+
font-weight: bold;
|
| 111 |
+
} /* Name.Class */
|
| 112 |
+
.highlight .no {
|
| 113 |
+
color: #60add5;
|
| 114 |
+
} /* Name.Constant */
|
| 115 |
+
.highlight .nd {
|
| 116 |
+
color: #555555;
|
| 117 |
+
font-weight: bold;
|
| 118 |
+
} /* Name.Decorator */
|
| 119 |
+
.highlight .ni {
|
| 120 |
+
color: #d55537;
|
| 121 |
+
font-weight: bold;
|
| 122 |
+
} /* Name.Entity */
|
| 123 |
+
.highlight .ne {
|
| 124 |
+
color: #007020;
|
| 125 |
+
} /* Name.Exception */
|
| 126 |
+
.highlight .nf {
|
| 127 |
+
color: #06287e;
|
| 128 |
+
} /* Name.Function */
|
| 129 |
+
.highlight .nl {
|
| 130 |
+
color: #002070;
|
| 131 |
+
font-weight: bold;
|
| 132 |
+
} /* Name.Label */
|
| 133 |
+
.highlight .nn {
|
| 134 |
+
color: #0e84b5;
|
| 135 |
+
font-weight: bold;
|
| 136 |
+
} /* Name.Namespace */
|
| 137 |
+
.highlight .nt {
|
| 138 |
+
color: #062873;
|
| 139 |
+
font-weight: bold;
|
| 140 |
+
} /* Name.Tag */
|
| 141 |
+
.highlight .nv {
|
| 142 |
+
color: #bb60d5;
|
| 143 |
+
} /* Name.Variable */
|
| 144 |
+
.highlight .ow {
|
| 145 |
+
color: #007020;
|
| 146 |
+
font-weight: bold;
|
| 147 |
+
} /* Operator.Word */
|
| 148 |
+
.highlight .w {
|
| 149 |
+
color: #bbbbbb;
|
| 150 |
+
} /* Text.Whitespace */
|
| 151 |
+
.highlight .mf {
|
| 152 |
+
color: #40a070;
|
| 153 |
+
} /* Literal.Number.Float */
|
| 154 |
+
.highlight .mh {
|
| 155 |
+
color: #40a070;
|
| 156 |
+
} /* Literal.Number.Hex */
|
| 157 |
+
.highlight .mi {
|
| 158 |
+
color: #40a070;
|
| 159 |
+
} /* Literal.Number.Integer */
|
| 160 |
+
.highlight .mo {
|
| 161 |
+
color: #40a070;
|
| 162 |
+
} /* Literal.Number.Oct */
|
| 163 |
+
.highlight .sb {
|
| 164 |
+
color: #4070a0;
|
| 165 |
+
} /* Literal.String.Backtick */
|
| 166 |
+
.highlight .sc {
|
| 167 |
+
color: #4070a0;
|
| 168 |
+
} /* Literal.String.Char */
|
| 169 |
+
.highlight .sd {
|
| 170 |
+
color: #4070a0;
|
| 171 |
+
font-style: italic;
|
| 172 |
+
} /* Literal.String.Doc */
|
| 173 |
+
.highlight .s2 {
|
| 174 |
+
color: #4070a0;
|
| 175 |
+
} /* Literal.String.Double */
|
| 176 |
+
.highlight .se {
|
| 177 |
+
color: #4070a0;
|
| 178 |
+
font-weight: bold;
|
| 179 |
+
} /* Literal.String.Escape */
|
| 180 |
+
.highlight .sh {
|
| 181 |
+
color: #4070a0;
|
| 182 |
+
} /* Literal.String.Heredoc */
|
| 183 |
+
.highlight .si {
|
| 184 |
+
color: #70a0d0;
|
| 185 |
+
font-style: italic;
|
| 186 |
+
} /* Literal.String.Interpol */
|
| 187 |
+
.highlight .sx {
|
| 188 |
+
color: #c65d09;
|
| 189 |
+
} /* Literal.String.Other */
|
| 190 |
+
.highlight .sr {
|
| 191 |
+
color: #235388;
|
| 192 |
+
} /* Literal.String.Regex */
|
| 193 |
+
.highlight .s1 {
|
| 194 |
+
color: #4070a0;
|
| 195 |
+
} /* Literal.String.Single */
|
| 196 |
+
.highlight .ss {
|
| 197 |
+
color: #517918;
|
| 198 |
+
} /* Literal.String.Symbol */
|
| 199 |
+
.highlight .bp {
|
| 200 |
+
color: #007020;
|
| 201 |
+
} /* Name.Builtin.Pseudo */
|
| 202 |
+
.highlight .vc {
|
| 203 |
+
color: #bb60d5;
|
| 204 |
+
} /* Name.Variable.Class */
|
| 205 |
+
.highlight .vg {
|
| 206 |
+
color: #bb60d5;
|
| 207 |
+
} /* Name.Variable.Global */
|
| 208 |
+
.highlight .vi {
|
| 209 |
+
color: #bb60d5;
|
| 210 |
+
} /* Name.Variable.Instance */
|
| 211 |
+
.highlight .il {
|
| 212 |
+
color: #40a070;
|
| 213 |
+
} /* Literal.Number.Integer.Long */
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/batching.svg
ADDED
|
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/colab_icon.png
ADDED
|
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/favicon.ico
ADDED
|
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/ops.png
ADDED
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/ops.svg
ADDED
|
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/oss_logo.png
ADDED
|
third_party/AnyBimanual/third_party/pytorch3d/website/static/img/pytorch3dfavicon.png
ADDED
|
|