diff --git a/utils3d/.gitattributes b/utils3d/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..f13e053bf0ebf99d69b8e28c0f02eb346dcfe15e --- /dev/null +++ b/utils3d/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/utils3d/.gitignore b/utils3d/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ee5eb3d77924f475451c78df75beff5228263f10 --- /dev/null +++ b/utils3d/.gitignore @@ -0,0 +1,118 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +.vscode +test/results_to_check +timetest.py diff --git a/utils3d/LICENSE b/utils3d/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e272f903dff5da72307bba07d412429306b75d87 --- /dev/null +++ b/utils3d/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 EasternJournalist + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/utils3d/README.md b/utils3d/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b4478ee29f10696eda65e9e20c86e33d46c5eb37 --- /dev/null +++ b/utils3d/README.md @@ -0,0 +1,46 @@ +# utils3d +Easy 3D python utilities for computer vision and graphics researchers. + +Supports: +* Transformation between OpenCV and OpenGL coordinate systems, **no more confusion** +* Easy rasterization, **no worries about OpenGL objects and buffers** +* Some mesh processing utilities, **all vectorized for effciency; some differentiable** +* Projection, unprojection, depth-based image warping, flow-based image warping... +* Easy Reading and writing .obj, .ply files +* Reading and writing Colmap format camera parameters +* NeRF/MipNeRF utilities + +For most functions, there are both numpy (indifferentiable) and pytorch implementations (differentiable). + +Pytorch is not required for using this package, but if you want to use the differentiable functions, you will need to install pytorch (and nvdiffrast if you want to use the pytorch rasterization functions). + +## Install + +Install by git + +```bash +pip install git+https://github.com/EasternJournalist/utils3d.git#egg=utils3d +``` + +or clone the repo and install with `-e` option for convenient updating and modifying. + +```bash +git clone https://github.com/EasternJournalist/utils3d.git +pip install -e ./utils3d +``` + +## Topics (TODO) + +### Camera + +### Rotations + +### Mesh + +### Rendering + +### Projection + +### Image warping + +### NeRF \ No newline at end of file diff --git a/utils3d/gen_unified_interface.py b/utils3d/gen_unified_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..c1887ff85d27bb0342427d36192bc6dce57c9fb2 --- /dev/null +++ b/utils3d/gen_unified_interface.py @@ -0,0 +1,106 @@ +import inspect +import textwrap +import re +import itertools +import numbers +import importlib +import sys +import functools +from pathlib import Path + +from utils3d._helpers import suppress_traceback + + +def _contains_tensor(obj): + if isinstance(obj, (list, tuple)): + return any(_contains_tensor(item) for item in obj) + elif isinstance(obj, dict): + return any(_contains_tensor(value) for value in obj.values()) + else: + import torch + return isinstance(obj, torch.Tensor) + +@suppress_traceback +def _call_based_on_args(fname, args, kwargs): + if 'torch' in sys.modules: + if any(_contains_tensor(arg) for arg in args) or any(_contains_tensor(v) for v in kwargs.values()): + fn = getattr(utils3d.torch, fname, None) + if fn is None: + raise NotImplementedError(f"Function {fname} has no torch implementation.") + return fn(*args, **kwargs) + fn = getattr(utils3d.numpy, fname, None) + if fn is None: + raise NotImplementedError(f"Function {fname} has no numpy implementation.") + return fn(*args, **kwargs) + + +def extract_signature(fn): + signature = inspect.signature(fn) + + signature_str = str(signature) + + signature_str = re.sub(r"", lambda m: m.group(0).split('\'')[1], signature_str) + signature_str = re.sub(r"(?=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "utils3d" +version = "0.0.2" +description = "A small package for 3D graphics" +readme = "README.md" +authors = [ + {name = "EasternJournalist", email = "wangrc2081cs@mail.ustc.edu.cn"} +] +license = {text = "MIT"} +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent" +] +dependencies = [ + "moderngl", + "numpy", + "plyfile", + "scipy" +] +requires-python = ">=3.7" + +[project.urls] +Homepage = "https://github.com/EasternJournalist/utils3d" + +[tool.setuptools.packages.find] +where = ["."] +include = ["utils3d*"] + +[tool.setuptools.package-data] +"utils3d.numpy.shaders" = ["*"] \ No newline at end of file diff --git a/utils3d/test/io_/write_ply.py b/utils3d/test/io_/write_ply.py new file mode 100644 index 0000000000000000000000000000000000000000..e840f24abaae42a0710089b5e71a40abcc7c4282 --- /dev/null +++ b/utils3d/test/io_/write_ply.py @@ -0,0 +1,22 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) +import utils3d +import numpy as np + +def run(): + image_uv, image_mesh = utils3d.numpy.utils.image_mesh(128, 128) + image_mesh = image_mesh.reshape(-1, 4) + depth = np.ones((128, 128), dtype=float) * 2 + depth[32:96, 32:96] = 1 + depth = depth.reshape(-1) + intrinsics = utils3d.numpy.transforms.intrinsics_from_fov(1.0, 128, 128) + intrinsics = utils3d.numpy.transforms.normalize_intrinsics(intrinsics, 128, 128) + extrinsics = utils3d.numpy.transforms.extrinsics_look_at([0, 0, 1], [0, 0, 0], [0, 1, 0]) + pts = utils3d.numpy.transforms.unproject_cv(image_uv, depth, extrinsics, intrinsics) + pts = pts.reshape(-1, 3) + image_mesh = utils3d.numpy.mesh.triangulate(image_mesh, vertices=pts) + utils3d.io.write_ply(os.path.join(os.path.dirname(__file__), '..', 'results_to_check', 'write_ply.ply'), pts, image_mesh) + +if __name__ == '__main__': + run() diff --git a/utils3d/test/numpy_/mesh/compute_face_angle.py b/utils3d/test/numpy_/mesh/compute_face_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..6f689bd7310bfb2f9769c37a6519c00c6bf6c28b --- /dev/null +++ b/utils3d/test/numpy_/mesh/compute_face_angle.py @@ -0,0 +1,46 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[np.pi/2, np.pi/4, np.pi/4]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + faces_ = faces.reshape(-1, L, 3) + vertices_ = vertices.reshape(-1, N, 3) + N = vertices_.shape[0] + expected = np.zeros((N, L, 3), dtype=float) + for i in range(3): + edge0 = vertices_[np.arange(N)[:, None], faces_[..., (i+1)%3]] - vertices_[np.arange(N)[:, None], faces_[..., i]] + edge1 = vertices_[np.arange(N)[:, None], faces_[..., (i+2)%3]] - vertices_[np.arange(N)[:, None], faces_[..., i]] + expected[..., i] = np.arccos(np.sum( + edge0 / np.linalg.norm(edge0, axis=-1, keepdims=True) * \ + edge1 / np.linalg.norm(edge1, axis=-1, keepdims=True), + axis=-1 + )) + expected = expected.reshape(*spatial, L, 3) + + actual = utils3d.numpy.compute_face_angle(vertices, faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/compute_face_normal.py b/utils3d/test/numpy_/mesh/compute_face_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0789f60a4c4ac886f197fde94efe47d0312e2c --- /dev/null +++ b/utils3d/test/numpy_/mesh/compute_face_normal.py @@ -0,0 +1,41 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + faces_ = faces.reshape(-1, L, 3) + vertices_ = vertices.reshape(-1, N, 3) + N = vertices_.shape[0] + expected = np.cross( + vertices_[np.arange(N)[:, None], faces_[..., 1]] - vertices_[np.arange(N)[:, None], faces_[..., 0]], + vertices_[np.arange(N)[:, None], faces_[..., 2]] - vertices_[np.arange(N)[:, None], faces_[..., 0]] + ).reshape(*spatial, L, 3) + expected = np.nan_to_num(expected / np.linalg.norm(expected, axis=-1, keepdims=True)) + + actual = utils3d.numpy.compute_face_normal(vertices, faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/compute_vertex_normal.py b/utils3d/test/numpy_/mesh/compute_vertex_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d213688fb2baeceb9bf3701952b0067438f96c --- /dev/null +++ b/utils3d/test/numpy_/mesh/compute_vertex_normal.py @@ -0,0 +1,43 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +from trimesh import geometry + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + face_normals = utils3d.numpy.compute_face_normal(vertices, faces) + + faces_ = faces.reshape(-1, L, 3) + face_normals = face_normals.reshape(-1, L, 3) + vertices_normals = [] + for face, face_normal in zip(faces_, face_normals): + vertices_normals.append( + geometry.mean_vertex_normals(N, face, face_normal) + ) + expected = np.array(vertices_normals).reshape(*spatial, N, 3) + + actual = utils3d.numpy.compute_vertex_normal(vertices, faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/compute_vertex_normal_weighted.py b/utils3d/test/numpy_/mesh/compute_vertex_normal_weighted.py new file mode 100644 index 0000000000000000000000000000000000000000..3fba7cde87cc9d816deb84653a31f8347f130569 --- /dev/null +++ b/utils3d/test/numpy_/mesh/compute_vertex_normal_weighted.py @@ -0,0 +1,45 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +from trimesh import geometry + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + face_normals = utils3d.numpy.compute_face_normal(vertices, faces) + face_angles = utils3d.numpy.compute_face_angle(vertices, faces) + + faces_ = faces.reshape(-1, L, 3) + face_normals = face_normals.reshape(-1, L, 3) + face_angles = face_angles.reshape(-1, L, 3) + vertices_normals = [] + for face, face_normal, face_angle in zip(faces_, face_normals, face_angles): + vertices_normals.append( + geometry.weighted_vertex_normals(N, face, face_normal, face_angle) + ) + expected = np.array(vertices_normals).reshape(*spatial, N, 3) + + actual = utils3d.numpy.compute_vertex_normal_weighted(vertices, faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/merge_duplicate_vertices.py b/utils3d/test/numpy_/mesh/merge_duplicate_vertices.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a0130624f0fe89b21547caf11365a53b9022a3 --- /dev/null +++ b/utils3d/test/numpy_/mesh/merge_duplicate_vertices.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected_vertices = np.array([[0, 0, 0], [1, 0, 0]]) + expected_faces = np.array([[0, 1, 1]]) + expected = expected_vertices[expected_faces] + else: + N = np.random.randint(100, 1000) + vertices = np.random.rand(N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + vertices[-(N//2):] = vertices[:N//2] + + expected_vertices = vertices[:-(N//2)].copy() + expected_faces = faces.copy() + expected_faces[expected_faces >= N - N//2] -= N - N//2 + expected = expected_vertices[expected_faces] + + actual_vertices, actual_faces = utils3d.numpy.merge_duplicate_vertices(vertices, faces) + actual = actual_vertices[actual_faces] + + assert expected_vertices.shape == actual_vertices.shape and np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/remove_corrupted_faces.py b/utils3d/test/numpy_/mesh/remove_corrupted_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..e818add7c78d96b54ae243930d6e7e10c6e6fb69 --- /dev/null +++ b/utils3d/test/numpy_/mesh/remove_corrupted_faces.py @@ -0,0 +1,30 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + faces = np.array([[0, 1, 2], [0, 2, 2], [0, 2, 3]]) + expected = np.array([[0, 1, 2], [0, 2, 3]]) + else: + L = np.random.randint(1, 1000) + N = np.random.randint(100, 1000) + faces = np.random.randint(0, N, size=(L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + corrupted = np.random.randint(0, 2, size=L).astype(bool) + faces[corrupted, 1] = faces[corrupted, 0] + expected = faces[~corrupted] + + actual = utils3d.numpy.remove_corrupted_faces(faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/mesh/triangulate.py b/utils3d/test/numpy_/mesh/triangulate.py new file mode 100644 index 0000000000000000000000000000000000000000..89d25c994ae8c1dcb34255f6490d88d33ecf173d --- /dev/null +++ b/utils3d/test/numpy_/mesh/triangulate.py @@ -0,0 +1,33 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + L = 1 + N = 5 + faces = np.array([[0, 1, 2, 3, 4]]) + expected = np.array([[0, 1, 2], [0, 2, 3], [0, 3, 4]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + L = np.random.randint(1, 1000) + N = np.random.randint(3, 10) + faces = np.random.randint(0, 10000, size=(*spatial, L, N)) + + loop_indices = [[0, i, i + 1] for i in range(1, N - 1)] + expected = faces[..., loop_indices].reshape((*spatial, L * (N - 2), 3)) + + actual = utils3d.numpy.triangulate(faces) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/rasterization/warp_image_by_depth.py b/utils3d/test/numpy_/rasterization/warp_image_by_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..92e79b1f71d7ed6ece0b289c7f05d16adcbc9dc9 --- /dev/null +++ b/utils3d/test/numpy_/rasterization/warp_image_by_depth.py @@ -0,0 +1,30 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import imageio + +def run(): + depth = np.ones((128, 128), dtype=np.float32) * 2 + depth[32:48, 32:48] = 1 + intrinsics = utils3d.numpy.transforms.intrinsics(1.0, 1.0, 0.5, 0.5).astype(np.float32) + extrinsics_src = utils3d.numpy.transforms.extrinsics_look_at([0, 0, 1], [0, 0, 0], [0, 1, 0]).astype(np.float32) + extrinsics_tgt = utils3d.numpy.transforms.extrinsics_look_at([1, 0, 1], [0, 0, 0], [0, 1, 0]).astype(np.float32) + ctx = utils3d.numpy.rasterization.RastContext( + standalone=True, + backend='egl', + device_index=0, + ) + uv, _ = utils3d.numpy.rasterization.warp_image_by_depth( + ctx, + depth, + extrinsics_src=extrinsics_src, + extrinsics_tgt=extrinsics_tgt, + intrinsics_src=intrinsics + ) + uv = (np.concatenate([uv, np.zeros((128, 128, 1), dtype=np.float32)], axis=-1) * 255).astype(np.uint8) + imageio.imwrite(os.path.join(os.path.dirname(__file__), '..', '..', 'results_to_check', 'warp_image_uv.png'), uv) + +if __name__ == '__main__': + run() diff --git a/utils3d/test/numpy_/transforms/crop_intrinsic.py b/utils3d/test/numpy_/transforms/crop_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..ef11cf7524de482e48cdcda4d0c5eec1b8cc8728 --- /dev/null +++ b/utils3d/test/numpy_/transforms/crop_intrinsic.py @@ -0,0 +1,53 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + left = np.random.uniform(0, width, spatial) + top = np.random.uniform(0, height, spatial) + crop_width = np.random.uniform(0, width - left, spatial) + crop_height = np.random.uniform(0, height - top, spatial) + + focal = np.maximum(width, height) / (2 * np.tan(fov / 2)) + cx = width / 2 - left + cy = height / 2 - top + expected = np.zeros((*spatial, 3, 3)) + expected[..., 0, 0] = focal + expected[..., 1, 1] = focal + expected[..., 0, 2] = cx + expected[..., 1, 2] = cy + expected[..., 2, 2] = 1 + expected = utils3d.numpy.normalize_intrinsics(expected, crop_width, crop_height) + + actual = utils3d.numpy.crop_intrinsics( + utils3d.numpy.normalize_intrinsics( + utils3d.numpy.intrinsics_from_fov(fov, width, height), + width, height + ), + width, height, left, top, crop_width, crop_height + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + f'\tleft: {left}\n' + \ + f'\ttop: {top}\n' + \ + f'\tcrop_width: {crop_width}\n' + \ + f'\tcrop_height: {crop_height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/extrinsic_look_at.py b/utils3d/test/numpy_/transforms/extrinsic_look_at.py new file mode 100644 index 0000000000000000000000000000000000000000..901aac16b287703441c3ec9b46f42a5b5171abd8 --- /dev/null +++ b/utils3d/test/numpy_/transforms/extrinsic_look_at.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = [] + for i in range(np.prod(spatial) if len(spatial) > 0 else 1): + expected.append(utils3d.numpy.view_to_extrinsics(np.array(glm.lookAt( + glm.vec3(eye.reshape([-1, 3])[i]), + glm.vec3(lookat.reshape([-1, 3])[i]), + glm.vec3(up.reshape([-1, 3])[i]) + )))) + expected = np.concatenate(expected, axis=0).reshape([*spatial, 4, 4]) + + actual = utils3d.numpy.extrinsics_look_at(eye, lookat, up) + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/extrinsic_to_view.py b/utils3d/test/numpy_/transforms/extrinsic_to_view.py new file mode 100644 index 0000000000000000000000000000000000000000..93481ade7bd0918cdec5418236c04e4fdd693c70 --- /dev/null +++ b/utils3d/test/numpy_/transforms/extrinsic_to_view.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.view_look_at(eye, lookat, up) + + actual = utils3d.numpy.view_to_extrinsics(utils3d.numpy.extrinsics_look_at(eye, lookat, up)) + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/intrinsic.py b/utils3d/test/numpy_/transforms/intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..8a61d624fa7478a4bdd959a085fcaa85be929cd5 --- /dev/null +++ b/utils3d/test/numpy_/transforms/intrinsic.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + focal_x = np.random.uniform(1, 10000, spatial) + focal_y = np.random.uniform(1, 10000, spatial) + center_x = np.random.uniform(1, 10000, spatial) + center_y = np.random.uniform(1, 10000, spatial) + + expected = np.zeros((*spatial, 3, 3)) + expected[..., 0, 0] = focal_x + expected[..., 1, 1] = focal_y + expected[..., 0, 2] = center_x + expected[..., 1, 2] = center_y + expected[..., 2, 2] = 1 + + actual = utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/intrinsic_from_fov.py b/utils3d/test/numpy_/transforms/intrinsic_from_fov.py new file mode 100644 index 0000000000000000000000000000000000000000..3bde71bc275b84a7c49fd237817768032988e45c --- /dev/null +++ b/utils3d/test/numpy_/transforms/intrinsic_from_fov.py @@ -0,0 +1,38 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + + focal = np.maximum(width, height) / (2 * np.tan(fov / 2)) + cx = width / 2 + cy = height / 2 + expected = np.zeros((*spatial, 3, 3)) + expected[..., 0, 0] = focal + expected[..., 1, 1] = focal + expected[..., 0, 2] = cx + expected[..., 1, 2] = cy + expected[..., 2, 2] = 1 + + actual = utils3d.numpy.intrinsics_from_fov(fov, width, height) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/intrinsic_from_fov_xy.py b/utils3d/test/numpy_/transforms/intrinsic_from_fov_xy.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8a7a2f5b17846bf6ffb638e29bee9e271e3140 --- /dev/null +++ b/utils3d/test/numpy_/transforms/intrinsic_from_fov_xy.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + + focal_x = 0.5 / np.tan(fov_x / 2) + focal_y = 0.5 / np.tan(fov_y / 2) + cx = cy = 0.5 + expected = np.zeros((*spatial, 3, 3)) + expected[..., 0, 0] = focal_x + expected[..., 1, 1] = focal_y + expected[..., 0, 2] = cx + expected[..., 1, 2] = cy + expected[..., 2, 2] = 1 + + actual = utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/intrinsic_to_perspective.py b/utils3d/test/numpy_/transforms/intrinsic_to_perspective.py new file mode 100644 index 0000000000000000000000000000000000000000..36f04648919435f48bb01ef118496d10693e1226 --- /dev/null +++ b/utils3d/test/numpy_/transforms/intrinsic_to_perspective.py @@ -0,0 +1,38 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.perspective_from_fov_xy(fov_x, fov_y, near, far) + + actual = utils3d.numpy.intrinsics_to_perspective( + utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y), + near, + far + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/linearize_depth.py b/utils3d/test/numpy_/transforms/linearize_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..5387053ee7b139e364d9008a7cd2a5f4e22ec868 --- /dev/null +++ b/utils3d/test/numpy_/transforms/linearize_depth.py @@ -0,0 +1,34 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + depth = np.random.uniform(near, far, spatial) + + expected = depth + + actual = utils3d.numpy.depth_buffer_to_linear( + utils3d.numpy.project_depth(depth, near, far), + near, far + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tdepth: {depth}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/normalize_intrinsic.py b/utils3d/test/numpy_/transforms/normalize_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc372bfc46ab0f06baa5548bd020ecbf05a17da --- /dev/null +++ b/utils3d/test/numpy_/transforms/normalize_intrinsic.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + fov_x = np.where(width >= height, fov, 2 * np.arctan(np.tan(fov / 2) * width / height)) + fov_y = np.where(width >= height, 2 * np.arctan(np.tan(fov / 2) * height / width), fov) + + expected = utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y) + + actual = utils3d.numpy.normalize_intrinsics(utils3d.numpy.intrinsics_from_fov(fov, width, height), width, height) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/perspective.py b/utils3d/test/numpy_/transforms/perspective.py new file mode 100644 index 0000000000000000000000000000000000000000..072bdb56312184dce14527c29c9a979074ffba9d --- /dev/null +++ b/utils3d/test/numpy_/transforms/perspective.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = [] + for i in range(np.prod(spatial) if len(spatial) > 0 else 1): + expected.append(glm.perspective(fovy.flat[i], aspect.flat[i], near.flat[i], far.flat[i])) + expected = np.concatenate(expected, axis=0).reshape(*spatial, 4, 4) + + actual = utils3d.numpy.perspective(fovy, aspect, near, far) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/perspective_from_fov.py b/utils3d/test/numpy_/transforms/perspective_from_fov.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0d4f8914b3661742ea13ab0866ffc93b23557b --- /dev/null +++ b/utils3d/test/numpy_/transforms/perspective_from_fov.py @@ -0,0 +1,40 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + fov_y = 2 * np.arctan(np.tan(fov / 2) * height / np.maximum(width, height)) + expected = [] + for i in range(np.prod(spatial) if len(spatial) > 0 else 1): + expected.append(glm.perspective(fov_y.flat[i], width.flat[i] / height.flat[i], near.flat[i], far.flat[i])) + expected = np.concatenate(expected, axis=0).reshape(*spatial, 4, 4) + + actual = utils3d.numpy.perspective_from_fov(fov, width, height, near, far) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/perspective_from_fov_xy.py b/utils3d/test/numpy_/transforms/perspective_from_fov_xy.py new file mode 100644 index 0000000000000000000000000000000000000000..1967138d4e71a2772cbfb91b5026d6d52b2a39d8 --- /dev/null +++ b/utils3d/test/numpy_/transforms/perspective_from_fov_xy.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + aspect = np.tan(fov_x / 2) / np.tan(fov_y / 2) + expected = [] + for i in range(np.prod(spatial) if len(spatial) > 0 else 1): + expected.append(glm.perspective(fov_y.flat[i], aspect.flat[i], near.flat[i], far.flat[i])) + expected = np.concatenate(expected, axis=0).reshape(*spatial, 4, 4) + + actual = utils3d.numpy.perspective_from_fov_xy(fov_x, fov_y, near, far) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/perspective_to_intrinsic.py b/utils3d/test/numpy_/transforms/perspective_to_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..dc72af80d9920cba7ee4421133132498bbb6d4eb --- /dev/null +++ b/utils3d/test/numpy_/transforms/perspective_to_intrinsic.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100) + far = np.random.uniform(near*2, 1000) + + expected = utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y) + + actual = utils3d.numpy.perspective_to_intrinsics( + utils3d.numpy.perspective_from_fov_xy(fov_x, fov_y, near, far) + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/pixel_to_ndc.py b/utils3d/test/numpy_/transforms/pixel_to_ndc.py new file mode 100644 index 0000000000000000000000000000000000000000..ffde3a349968bfc712e1afc891a5f74d6de89b4d --- /dev/null +++ b/utils3d/test/numpy_/transforms/pixel_to_ndc.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + H = np.random.randint(1, 1000) + W = np.random.randint(1, 1000) + x, y = np.meshgrid(np.arange(W), np.arange(H), indexing='xy') + pixel = np.stack([x, y], axis=-1) + + expected = np.stack( + np.meshgrid( + np.linspace(-1 + 1 / W, 1 - 1 / W, W), + np.linspace(1 - 1 / H, -1 + 1 / H, H), + indexing='xy' + ), + axis=-1 + ) + + actual = utils3d.numpy.pixel_to_ndc(pixel, W, H) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tH: {H}\n' + \ + f'\tW: {W}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/pixel_to_uv.py b/utils3d/test/numpy_/transforms/pixel_to_uv.py new file mode 100644 index 0000000000000000000000000000000000000000..d5dde96cdde9f8a3eb687068692fd0abb522bc90 --- /dev/null +++ b/utils3d/test/numpy_/transforms/pixel_to_uv.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + for i in range(100): + H = np.random.randint(1, 1000) + W = np.random.randint(1, 1000) + x, y = np.meshgrid(np.arange(W), np.arange(H), indexing='xy') + pixel = np.stack([x, y], axis=-1) + + expected = np.stack( + np.meshgrid( + np.linspace(0.5 / W, 1 - 0.5 / W, W), + np.linspace(0.5 / H, 1 - 0.5 / H, H), + indexing='xy' + ), + axis=-1 + ) + + actual = utils3d.numpy.pixel_to_uv(pixel, W, H) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tH: {H}\n' + \ + f'\tW: {W}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/project_cv.py b/utils3d/test/numpy_/transforms/project_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..81dd5cc7ff93a6d61c11a96fba3d836f8735f72c --- /dev/null +++ b/utils3d/test/numpy_/transforms/project_cv.py @@ -0,0 +1,57 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + focal_x = np.random.uniform(0, 10, spatial) + focal_y = np.random.uniform(0, 10, spatial) + center_x = np.random.uniform(0, 1, spatial) + center_y = np.random.uniform(0, 1, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + pts = points - eye[..., None, :] + z_axis = lookat - eye + x_axis = np.cross(-up, z_axis) + y_axis = np.cross(z_axis, x_axis) + x_axis = x_axis / np.linalg.norm(x_axis, axis=-1, keepdims=True) + y_axis = y_axis / np.linalg.norm(y_axis, axis=-1, keepdims=True) + z_axis = z_axis / np.linalg.norm(z_axis, axis=-1, keepdims=True) + z = (pts * z_axis[..., None, :]).sum(axis=-1) + x = (pts * x_axis[..., None, :]).sum(axis=-1) + y = (pts * y_axis[..., None, :]).sum(axis=-1) + x = (x / z * focal_x[..., None] + center_x[..., None]) + y = (y / z * focal_y[..., None] + center_y[..., None]) + expected = np.stack([x, y], axis=-1) + + actual, _ = utils3d.numpy.transforms.project_cv(points, + utils3d.numpy.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y)) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/project_depth.py b/utils3d/test/numpy_/transforms/project_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..90de600211bd284f8ec51304288ee0c32e45f7ce --- /dev/null +++ b/utils3d/test/numpy_/transforms/project_depth.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + depth = np.random.uniform(near, far, spatial) + + proj = utils3d.numpy.perspective(1.0, 1.0, near, far)[..., 2, 2:4] + expected = ((proj[..., 0] * -depth + proj[..., 1]) / depth) * 0.5 + 0.5 + + actual = utils3d.numpy.project_depth(depth, near, far) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tdepth: {depth}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/project_gl.py b/utils3d/test/numpy_/transforms/project_gl.py new file mode 100644 index 0000000000000000000000000000000000000000..a6cdbd05b35b445605fc9e3bafa1d1f3baf077ce --- /dev/null +++ b/utils3d/test/numpy_/transforms/project_gl.py @@ -0,0 +1,58 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + pts = points - eye[..., None, :] + z_axis = (eye - lookat) + x_axis = np.cross(up, z_axis) + y_axis = np.cross(z_axis, x_axis) + x_axis = x_axis / np.linalg.norm(x_axis, axis=-1, keepdims=True) + y_axis = y_axis / np.linalg.norm(y_axis, axis=-1, keepdims=True) + z_axis = z_axis / np.linalg.norm(z_axis, axis=-1, keepdims=True) + z = (pts * z_axis[..., None, :]).sum(axis=-1) + x = (pts * x_axis[..., None, :]).sum(axis=-1) + y = (pts * y_axis[..., None, :]).sum(axis=-1) + x = (x / -z / np.tan(fovy[..., None] / 2) / aspect[..., None]) * 0.5 + 0.5 + y = (y / -z / np.tan(fovy[..., None] / 2)) * 0.5 + 0.5 + z = utils3d.numpy.project_depth(-z, near[..., None], far[..., None]) + expected = np.stack([x, y, z], axis=-1) + + actual, _ = utils3d.numpy.transforms.project_gl(points, None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far)) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/project_gl_cv.py b/utils3d/test/numpy_/transforms/project_gl_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..9247b1feb0b7e499e031b2d54f45acb2f17e73ab --- /dev/null +++ b/utils3d/test/numpy_/transforms/project_gl_cv.py @@ -0,0 +1,52 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + focal_x = 0.5 / (np.tan(fovy / 2) * aspect) + focal_y = 0.5 / np.tan(fovy / 2) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + gl = utils3d.numpy.transforms.project_gl(points, None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far)) + gl_uv = gl[0][..., :2] + gl_uv[..., 1] = 1 - gl_uv[..., 1] + gl_depth = gl[1] + + cv = utils3d.numpy.transforms.project_cv(points, + utils3d.numpy.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.intrinsics(focal_x, focal_y, 0.5, 0.5)) + cv_uv = cv[0][..., :2] + cv_depth = cv[1] + + assert np.allclose(gl_uv, cv_uv) and np.allclose(gl_depth, cv_depth), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'GL:\n' + \ + f'{gl}\n' + \ + 'CV:\n' + \ + f'{cv}' diff --git a/utils3d/test/numpy_/transforms/unproject_cv.py b/utils3d/test/numpy_/transforms/unproject_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..fd4991ca95836a60bc30ccce3dc235473ef9b8d9 --- /dev/null +++ b/utils3d/test/numpy_/transforms/unproject_cv.py @@ -0,0 +1,49 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + focal_x = np.random.uniform(0, 10, spatial) + focal_y = np.random.uniform(0, 10, spatial) + center_x = np.random.uniform(0, 1, spatial) + center_y = np.random.uniform(0, 1, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected = points + + actual = utils3d.numpy.transforms.unproject_cv( + *utils3d.numpy.transforms.project_cv(points, + utils3d.numpy.transforms.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.transforms.intrinsics(focal_x, focal_y, center_x, center_y)), + utils3d.numpy.transforms.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.transforms.intrinsics(focal_x, focal_y, center_x, center_y) + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/unproject_gl.py b/utils3d/test/numpy_/transforms/unproject_gl.py new file mode 100644 index 0000000000000000000000000000000000000000..710c0c411290ae58520b47503e92a9a130e34c71 --- /dev/null +++ b/utils3d/test/numpy_/transforms/unproject_gl.py @@ -0,0 +1,50 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected = points + + actual = utils3d.numpy.transforms.unproject_gl( + utils3d.numpy.transforms.project_gl(points, None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far))[0], + None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far) + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/numpy_/transforms/view_look_at.py b/utils3d/test/numpy_/transforms/view_look_at.py new file mode 100644 index 0000000000000000000000000000000000000000..d5462fb61d15dec95a6cc42ffa90ef57e90e8eb0 --- /dev/null +++ b/utils3d/test/numpy_/transforms/view_look_at.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = [] + for i in range(np.prod(spatial) if len(spatial) > 0 else 1): + expected.append(np.array(glm.lookAt( + glm.vec3(eye.reshape([-1, 3])[i]), + glm.vec3(lookat.reshape([-1, 3])[i]), + glm.vec3(up.reshape([-1, 3])[i]) + ))) + expected = np.concatenate(expected, axis=0).reshape([*spatial, 4, 4]) + + actual = utils3d.numpy.view_look_at(eye, lookat, up) + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/transforms/view_to_extrinsic.py b/utils3d/test/numpy_/transforms/view_to_extrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..548bcc2febd354cd45a6cca584b377eaa42e9b99 --- /dev/null +++ b/utils3d/test/numpy_/transforms/view_to_extrinsic.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import glm + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.extrinsics_look_at(eye, lookat, up) + + actual = utils3d.numpy.view_to_extrinsics(utils3d.numpy.view_look_at(eye, lookat, up)) + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/numpy_/utils/image_mesh.py b/utils3d/test/numpy_/utils/image_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..30bab4956740b3dd7c0285362b75d62c3e6efeb6 --- /dev/null +++ b/utils3d/test/numpy_/utils/image_mesh.py @@ -0,0 +1,32 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np + +def run(): + args = [ + {'W':2, 'H':2, 'backslash': np.array([False])}, + {'W':2, 'H':2, 'backslash': np.array([True])}, + {'H':2, 'W':3, 'backslash': np.array([True, False])}, + ] + + expected = [ + np.array([[0, 2, 1], [1, 2, 3]]), + np.array([[0, 2, 3], [0, 3, 1]]), + np.array([[0, 3, 4], [0, 4, 1], [1, 4, 2], [2, 4, 5]]), + ] + + for args, expected in zip(args, expected): + actual = utils3d.numpy.triangulate( + utils3d.numpy.image_mesh(args['H'], args['W'])[1], + backslash=args.get('backslash', None), + ) + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{args}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/rasterization_/gl/basic.py b/utils3d/test/rasterization_/gl/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..3d0d989b856156bff28fbd34b2808569d6216ccf --- /dev/null +++ b/utils3d/test/rasterization_/gl/basic.py @@ -0,0 +1,70 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import moderngl +import numpy as np +from PIL import Image +from pyrr import Matrix44 + +# ------------------- +# CREATE CONTEXT HERE +# ------------------- + +import moderngl + +def run(): + ctx = moderngl.create_context( + standalone=True, + backend='egl', + # These are OPTIONAL if you want to load a specific version + libgl='libGL.so.1', + libegl='libEGL.so.1', + ) + + prog = ctx.program(vertex_shader=""" + #version 330 + uniform mat4 model; + in vec2 in_vert; + in vec3 in_color; + out vec3 color; + void main() { + gl_Position = model * vec4(in_vert, 0.0, 1.0); + color = in_color; + } + """, + fragment_shader=""" + #version 330 + in vec3 color; + out vec4 fragColor; + void main() { + fragColor = vec4(color, 1.0); + } + """) + + vertices = np.array([ + -0.6, -0.6, + 1.0, 0.0, 0.0, + 0.6, -0.6, + 0.0, 1.0, 0.0, + 0.0, 0.6, + 0.0, 0.0, 1.0, + ], dtype='f4') + + vbo = ctx.buffer(vertices) + vao = ctx.simple_vertex_array(prog, vbo, 'in_vert', 'in_color') + fbo = ctx.framebuffer(color_attachments=[ctx.texture((512, 512), 4)]) + + fbo.use() + ctx.clear() + prog['model'].write(Matrix44.from_eulers((0.0, 0.1, 0.0), dtype='f4')) + vao.render(moderngl.TRIANGLES) + + data = fbo.read(components=3) + image = Image.frombytes('RGB', fbo.size, data) + image = image.transpose(Image.FLIP_TOP_BOTTOM) + image.save(os.path.join(os.path.dirname(__file__), '..', '..', 'results_to_check', 'output.png')) + + +if __name__ == '__main__': + run() diff --git a/utils3d/test/rasterization_/gl/rasterize_uv.py b/utils3d/test/rasterization_/gl/rasterize_uv.py new file mode 100644 index 0000000000000000000000000000000000000000..abf331102080ac2f39d0fa2b8748bbf4d042da6f --- /dev/null +++ b/utils3d/test/rasterization_/gl/rasterize_uv.py @@ -0,0 +1,42 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import imageio + +def run(): + image_uv, image_mesh = utils3d.numpy.utils.image_mesh(128, 128) + image_mesh = image_mesh.reshape(-1, 4) + depth = np.ones((128, 128), dtype=np.float32) * 2 + depth[32:96, 32:96] = 1 + depth = depth.reshape(-1) + intrinsics = utils3d.numpy.transforms.intrinsics_from_fov(1.0, 128, 128).astype(np.float32) + intrinsics = utils3d.numpy.transforms.normalize_intrinsics(intrinsics, 128, 128) + extrinsics = utils3d.numpy.transforms.extrinsics_look_at([0, 0, 1], [0, 0, 0], [0, 1, 0]).astype(np.float32) + pts = utils3d.numpy.transforms.unproject_cv(image_uv, depth, extrinsics, intrinsics) + pts = pts.reshape(-1, 3) + image_mesh = utils3d.numpy.mesh.triangulate(image_mesh, vertices=pts) + + perspective = utils3d.numpy.transforms.perspective(1.0, 1.0, 0.1, 10) + view = utils3d.numpy.transforms.view_look_at([1, 0, 1], [0, 0, 0], [0, 1, 0]) + mvp = np.matmul(perspective, view) + ctx = utils3d.numpy.rasterization.RastContext( + standalone=True, + backend='egl', + device_index=0, + ) + uv = utils3d.numpy.rasterization.rasterize_triangle_faces( + ctx, + pts, + image_mesh, + image_uv, + width=128, + height=128, + mvp=mvp, + )[0] + uv = (np.concatenate([uv, np.zeros((128, 128, 1), dtype=np.float32)], axis=-1) * 255).astype(np.uint8) + imageio.imwrite(os.path.join(os.path.dirname(__file__), '..', '..', 'results_to_check', 'rasterize_uv.png'), uv) + +if __name__ == '__main__': + run() diff --git a/utils3d/test/test.py b/utils3d/test/test.py new file mode 100644 index 0000000000000000000000000000000000000000..f97f3490608e5492cb04b8ec29e9f4217078b534 --- /dev/null +++ b/utils3d/test/test.py @@ -0,0 +1,50 @@ +import importlib +import os +import torch +import traceback + +CRED = '\033[91m' +CGREEN = '\033[92m' +CEND = '\033[0m' + +if __name__ == '__main__': + # list all tests + tests = [] + for root, dirs, files in os.walk('test'): + if root == 'test': + continue + for file in files: + if file.endswith('.py'): + root = root.replace('test/', '').replace('test\\', '') + test = os.path.join(root, file) + test = test.replace('/', '.').replace('\\', '.').replace('.py', '') + tests.append(test) + tests.sort() + print(f'Found {len(tests)} tests:') + for test in tests: + print(f' {test}') + print() + + # disable torch optimizations + torch.backends.cudnn.enabled = False + torch.backends.cuda.matmul.allow_tf32 = False + + # import and run + passed = 0 + for test in tests: + print(f'Running test: {test}... ', end='') + test = importlib.import_module(test, '.'.join(test.split('.')[:-1])) + try: + test.run() + except Exception as e: + print(CRED, end='') + print('Failed') + traceback.print_exc() + else: + print(CGREEN, end='') + print('Passed') + passed += 1 + print(CEND, end='') + + print(f'Passed {passed}/{len(tests)} tests') + \ No newline at end of file diff --git a/utils3d/test/torch_/mesh/compute_face_angle.py b/utils3d/test/torch_/mesh/compute_face_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..5701544c08ae29812cd63e464d63c41b5a32ce44 --- /dev/null +++ b/utils3d/test/torch_/mesh/compute_face_angle.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[np.pi/2, np.pi/4, np.pi/4]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + expected = utils3d.numpy.compute_face_angle(vertices, faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + vertices = torch.tensor(vertices, device=device) + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.compute_face_angle(vertices, faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/compute_face_normal.py b/utils3d/test/torch_/mesh/compute_face_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..abac81a9d33fd7969fc447d983e2f1905c023047 --- /dev/null +++ b/utils3d/test/torch_/mesh/compute_face_normal.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + expected = utils3d.numpy.compute_face_normal(vertices, faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + vertices = torch.tensor(vertices, device=device) + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.compute_face_normal(vertices, faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/compute_vertex_normal.py b/utils3d/test/torch_/mesh/compute_vertex_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad7aebc2bb0f1ddcdad72f28dc2357584a4741b --- /dev/null +++ b/utils3d/test/torch_/mesh/compute_vertex_normal.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + expected = utils3d.numpy.compute_vertex_normal(vertices, faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + vertices = torch.tensor(vertices, device=device) + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.compute_vertex_normal(vertices, faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/compute_vertex_normal_weighted.py b/utils3d/test/torch_/mesh/compute_vertex_normal_weighted.py new file mode 100644 index 0000000000000000000000000000000000000000..801cfdbeddbaca0f8b1de8975d849b68a73ebd23 --- /dev/null +++ b/utils3d/test/torch_/mesh/compute_vertex_normal_weighted.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(100, 1000) + vertices = np.random.rand(*spatial, N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(*spatial, L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + expected = utils3d.numpy.compute_vertex_normal_weighted(vertices, faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + vertices = torch.tensor(vertices, device=device) + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.compute_vertex_normal_weighted(vertices, faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/merge_duplicate_vertices.py b/utils3d/test/torch_/mesh/merge_duplicate_vertices.py new file mode 100644 index 0000000000000000000000000000000000000000..ee293c4e71e396e55ac02379ffa565f333eeedc8 --- /dev/null +++ b/utils3d/test/torch_/mesh/merge_duplicate_vertices.py @@ -0,0 +1,44 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + vertices = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]], dtype=float) + faces = np.array([[0, 1, 2]]) + expected_vertices = np.array([[0, 0, 0], [1, 0, 0]]) + expected_faces = np.array([[0, 1, 1]]) + expected = expected_vertices[expected_faces] + else: + N = np.random.randint(100, 1000) + vertices = np.random.rand(N, 3) + L = np.random.randint(1, 1000) + faces = np.random.randint(0, N, size=(L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + vertices[-(N//2):] = vertices[:N//2] + + expected_vertices, expected_faces = utils3d.numpy.merge_duplicate_vertices(vertices, faces) + expected = expected_vertices[expected_faces] + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + vertices = torch.tensor(vertices, device=device) + faces = torch.tensor(faces, device=device) + + actual_vertices, actual_faces = utils3d.torch.merge_duplicate_vertices(vertices, faces) + actual_vertices = actual_vertices.cpu().numpy() + actual_faces = actual_faces.cpu().numpy() + actual = actual_vertices[actual_faces] + + assert expected_vertices.shape == actual_vertices.shape and np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/remove_corrupted_faces.py b/utils3d/test/torch_/mesh/remove_corrupted_faces.py new file mode 100644 index 0000000000000000000000000000000000000000..d6af13ccdb1e834284e0839af0a01855e7e7b8ec --- /dev/null +++ b/utils3d/test/torch_/mesh/remove_corrupted_faces.py @@ -0,0 +1,33 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + faces = np.array([[0, 1, 2], [0, 2, 2], [0, 2, 3]]) + expected = np.array([[0, 1, 2], [0, 2, 3]]) + else: + L = np.random.randint(1, 1000) + N = np.random.randint(100, 1000) + faces = np.random.randint(0, N, size=(L, 3)) + faces[..., 1] = (faces[..., 0] + 1) % N + faces[..., 2] = (faces[..., 0] + 2) % N + + expected = utils3d.numpy.remove_corrupted_faces(faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.remove_corrupted_faces(faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/mesh/triangulate.py b/utils3d/test/torch_/mesh/triangulate.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fbda6ab6659566cbac2cefa3921873a7367ad5 --- /dev/null +++ b/utils3d/test/torch_/mesh/triangulate.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + L = 1 + N = 5 + faces = np.array([[0, 1, 2, 3, 4]]) + expected = np.array([[0, 1, 2], [0, 2, 3], [0, 3, 4]]) + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + L = np.random.randint(1, 1000) + N = np.random.randint(3, 10) + faces = np.random.randint(0, 10000, size=(*spatial, L, N)) + + expected = utils3d.numpy.triangulate(faces) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + faces = torch.tensor(faces, device=device) + + actual = utils3d.torch.triangulate(faces).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{faces}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/rasterization/warp_image_by_depth.py b/utils3d/test/torch_/rasterization/warp_image_by_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..b8fef7415126213f1f07cde46ef186e0bdc8c5a9 --- /dev/null +++ b/utils3d/test/torch_/rasterization/warp_image_by_depth.py @@ -0,0 +1,30 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch +import imageio + +def run(): + depth = torch.ones((1, 128, 128), dtype=torch.float32, device='cuda') * 2 + depth[:, 32:48, 32:48] = 1 + intrinsics = utils3d.torch.transforms.intrinsics(1.0, 1.0, 0.5, 0.5).to(depth) + extrinsics_src = utils3d.torch.transforms.extrinsics_look_at([0., 0., 1.], [0., 0., 0.], [0., 1., 0.]).to(depth) + extrinsics_tgt = utils3d.torch.transforms.extrinsics_look_at([1., 0., 1.], [0., 0., 0.], [0., 1., 0.]).to(depth) + ctx = utils3d.torch.rasterization.RastContext(backend='gl', device='cuda') + uv, _ = utils3d.torch.rasterization.warp_image_by_depth( + ctx, + depth, + extrinsics_src=extrinsics_src, + extrinsics_tgt=extrinsics_tgt, + intrinsics_src=intrinsics, + antialiasing=False, + ) + uv = torch.cat([uv, torch.zeros((1, 1, 128, 128)).to(uv)], dim=1) * 255 + uv = uv.permute(0, 2, 3, 1).squeeze().cpu().numpy().astype(np.uint8) + + imageio.imwrite(os.path.join(os.path.dirname(__file__), '..', '..', 'results_to_check', 'torch_warp_image_uv.png'), uv) + +if __name__ == '__main__': + run() diff --git a/utils3d/test/torch_/transforms/crop_intrinsic.py b/utils3d/test/torch_/transforms/crop_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..b1253aed673fa06d9fdb162de70cb0f897d0e141 --- /dev/null +++ b/utils3d/test/torch_/transforms/crop_intrinsic.py @@ -0,0 +1,60 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + left = np.random.uniform(0, width, spatial) + top = np.random.uniform(0, height, spatial) + crop_width = np.random.uniform(0, width - left, spatial) + crop_height = np.random.uniform(0, height - top, spatial) + + expected = utils3d.numpy.crop_intrinsics( + utils3d.numpy.normalize_intrinsics( + utils3d.numpy.intrinsics_from_fov(fov, width, height), + width, height + ), + width, height, left, top, crop_width, crop_height + ) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov = torch.tensor(fov, device=device) + width = torch.tensor(width, device=device) + height = torch.tensor(height, device=device) + left = torch.tensor(left, device=device) + top = torch.tensor(top, device=device) + crop_width = torch.tensor(crop_width, device=device) + crop_height = torch.tensor(crop_height, device=device) + + actual = utils3d.torch.crop_intrinsics( + utils3d.torch.normalize_intrinsics( + utils3d.torch.intrinsics_from_fov(fov, width, height), + width, height + ), + width, height, left, top, crop_width, crop_height + ).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + f'\tleft: {left}\n' + \ + f'\ttop: {top}\n' + \ + f'\tcrop_width: {crop_width}\n' + \ + f'\tcrop_height: {crop_height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/extrinsic_look_at.py b/utils3d/test/torch_/transforms/extrinsic_look_at.py new file mode 100644 index 0000000000000000000000000000000000000000..ab8bdf631b18cb0b0dfa0a95061982abc946ab27 --- /dev/null +++ b/utils3d/test/torch_/transforms/extrinsic_look_at.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.extrinsics_look_at(eye, lookat, up) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + + actual = utils3d.torch.extrinsics_look_at(eye, lookat, up).cpu().numpy() + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/extrinsic_to_view.py b/utils3d/test/torch_/transforms/extrinsic_to_view.py new file mode 100644 index 0000000000000000000000000000000000000000..be4705991d50336cc52c758136f4679789c4f9d3 --- /dev/null +++ b/utils3d/test/torch_/transforms/extrinsic_to_view.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.view_to_extrinsics(utils3d.numpy.extrinsics_look_at(eye, lookat, up)) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + + actual = utils3d.torch.view_to_extrinsics(utils3d.torch.extrinsics_look_at(eye, lookat, up)).cpu().numpy() + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/intrinsic.py b/utils3d/test/torch_/transforms/intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c08e529a036efaf829dd8ba9af52846ab78b37 --- /dev/null +++ b/utils3d/test/torch_/transforms/intrinsic.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + focal_x = np.random.uniform(1, 10000, spatial) + focal_y = np.random.uniform(1, 10000, spatial) + center_x = np.random.uniform(1, 10000, spatial) + center_y = np.random.uniform(1, 10000, spatial) + + expected = utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + focal_x = torch.tensor(focal_x, device=device) + focal_y = torch.tensor(focal_y, device=device) + center_x = torch.tensor(center_x, device=device) + center_y = torch.tensor(center_y, device=device) + + actual = utils3d.torch.intrinsics(focal_x, focal_y, center_x, center_y).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/intrinsic_from_fov.py b/utils3d/test/torch_/transforms/intrinsic_from_fov.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a8ac75d1a14f0e969de0bf3b717c06c3d54a55 --- /dev/null +++ b/utils3d/test/torch_/transforms/intrinsic_from_fov.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + + expected = utils3d.numpy.intrinsics_from_fov(fov, width, height) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov = torch.tensor(fov, device=device) + width = torch.tensor(width, device=device) + height = torch.tensor(height, device=device) + + actual = utils3d.torch.intrinsics_from_fov(fov, width, height).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/intrinsic_from_fov_xy.py b/utils3d/test/torch_/transforms/intrinsic_from_fov_xy.py new file mode 100644 index 0000000000000000000000000000000000000000..fa44e6a2db7d2c665f89cb9bc931eb1e86b562fb --- /dev/null +++ b/utils3d/test/torch_/transforms/intrinsic_from_fov_xy.py @@ -0,0 +1,33 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + + expected = utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov_x = torch.tensor(fov_x, device=device) + fov_y = torch.tensor(fov_y, device=device) + + actual = utils3d.torch.intrinsics_from_fov_xy(fov_x, fov_y).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/intrinsic_to_perspective.py b/utils3d/test/torch_/transforms/intrinsic_to_perspective.py new file mode 100644 index 0000000000000000000000000000000000000000..96d7b1e030a03a1b99d7b46b12e454746652a6ec --- /dev/null +++ b/utils3d/test/torch_/transforms/intrinsic_to_perspective.py @@ -0,0 +1,48 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.intrinsics_to_perspective( + utils3d.numpy.intrinsics_from_fov_xy(fov_x, fov_y), + near, + far + ) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov_x = torch.tensor(fov_x, device=device) + fov_y = torch.tensor(fov_y, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + + actual = utils3d.torch.intrinsics_to_perspective( + utils3d.torch.intrinsics_from_fov_xy(fov_x, fov_y), + near, + far + ).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/linearize_depth.py b/utils3d/test/torch_/transforms/linearize_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..906190b1d2d56f5ade6233505d7c6765373e3649 --- /dev/null +++ b/utils3d/test/torch_/transforms/linearize_depth.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + depth = np.random.uniform(near, far, spatial) + + expected = depth + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + depth = torch.tensor(depth, device=device) + + actual = utils3d.torch.depth_buffer_to_linear( + utils3d.torch.project_depth(depth, near, far), + near, far + ).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tdepth: {depth}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/matrix_to_quaternion.py b/utils3d/test/torch_/transforms/matrix_to_quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..63bb5bf8c3efaed176e1087627ff53d1af6c1cdd --- /dev/null +++ b/utils3d/test/torch_/transforms/matrix_to_quaternion.py @@ -0,0 +1,41 @@ +import numpy as np +import torch +from scipy.spatial.transform import Rotation as R +import utils3d + + +def run(): + for i in range(10): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + angle = np.random.uniform(-np.pi, np.pi, spatial) + axis = np.random.uniform(-1, 1, spatial + [3]) + axis = axis / np.linalg.norm(axis, axis=-1, keepdims=True) + axis_angle = angle[..., None] * axis + matrix = R.from_rotvec(axis_angle.reshape((-1, 3))).as_matrix().reshape(spatial + [3, 3]) + # matrix = np.array([ + # [1, 0, 0], + # [0, 0, -1], + # [0, 1, 0] + # ]).astype(np.float32) + # dim = 0 + # spatial = [] + expected = R.from_matrix(matrix.reshape(-1, 3, 3)).as_quat().reshape(spatial + [4])[..., [3, 0, 1, 2]] + actual = utils3d.torch.matrix_to_quaternion( + torch.from_numpy(matrix) + ).cpu().numpy() + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tangle: {angle}\n' + \ + f'\taxis: {axis}\n' + \ + f'\tmatrix: {matrix}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/normalize_intrinsic.py b/utils3d/test/torch_/transforms/normalize_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1494c0711130128cb2917d30e3d62d7e0cb11c --- /dev/null +++ b/utils3d/test/torch_/transforms/normalize_intrinsic.py @@ -0,0 +1,38 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + fov_x = np.where(width >= height, fov, 2 * np.arctan(np.tan(fov / 2) * width / height)) + fov_y = np.where(width >= height, 2 * np.arctan(np.tan(fov / 2) * height / width), fov) + + expected = utils3d.numpy.normalize_intrinsics(utils3d.numpy.intrinsics_from_fov(fov, width, height), width, height) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov = torch.tensor(fov, device=device) + width = torch.tensor(width, device=device) + height = torch.tensor(height, device=device) + + actual = utils3d.torch.normalize_intrinsics(utils3d.torch.intrinsics_from_fov(fov, width, height), width, height).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/perspective.py b/utils3d/test/torch_/transforms/perspective.py new file mode 100644 index 0000000000000000000000000000000000000000..6661bc16706aa02e1db4d492b8adad84ab9d5d19 --- /dev/null +++ b/utils3d/test/torch_/transforms/perspective.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.perspective(fovy, aspect, near, far) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fovy = torch.tensor(fovy, device=device) + aspect = torch.tensor(aspect, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + + actual = utils3d.torch.perspective(fovy, aspect, near, far).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/perspective_from_fov.py b/utils3d/test/torch_/transforms/perspective_from_fov.py new file mode 100644 index 0000000000000000000000000000000000000000..25c8248334191dca69cbe7aca39f7b86ccae80c2 --- /dev/null +++ b/utils3d/test/torch_/transforms/perspective_from_fov.py @@ -0,0 +1,43 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + width = np.random.uniform(1, 10000, spatial) + height = np.random.uniform(1, 10000, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.perspective_from_fov(fov, width, height, near, far) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov = torch.tensor(fov, device=device) + width = torch.tensor(width, device=device) + height = torch.tensor(height, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + + actual = utils3d.torch.perspective_from_fov(fov, width, height, near, far).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov: {fov}\n' + \ + f'\twidth: {width}\n' + \ + f'\theight: {height}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/perspective_from_fov_xy.py b/utils3d/test/torch_/transforms/perspective_from_fov_xy.py new file mode 100644 index 0000000000000000000000000000000000000000..414ec61db485d341db19b358e5f6791a9ebb812b --- /dev/null +++ b/utils3d/test/torch_/transforms/perspective_from_fov_xy.py @@ -0,0 +1,39 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.perspective_from_fov_xy(fov_x, fov_y, near, far) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov_x = torch.tensor(fov_x, device=device) + fov_y = torch.tensor(fov_y, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + + actual = utils3d.torch.perspective_from_fov_xy(fov_x, fov_y, near, far).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/perspective_to_intrinsic.py b/utils3d/test/torch_/transforms/perspective_to_intrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..700c90c6d621c005e780e61a83982a00f82284f4 --- /dev/null +++ b/utils3d/test/torch_/transforms/perspective_to_intrinsic.py @@ -0,0 +1,44 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + fov_x = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + fov_y = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + + expected = utils3d.numpy.perspective_to_intrinsics( + utils3d.numpy.perspective_from_fov_xy(fov_x, fov_y, near, far) + ) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fov_x = torch.tensor(fov_x, device=device) + fov_y = torch.tensor(fov_y, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + + actual = utils3d.torch.perspective_to_intrinsics( + utils3d.torch.perspective_from_fov_xy(fov_x, fov_y, near, far) + ).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfov_x: {fov_x}\n' + \ + f'\tfov_y: {fov_y}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/pixel_to_ndc.py b/utils3d/test/torch_/transforms/pixel_to_ndc.py new file mode 100644 index 0000000000000000000000000000000000000000..b20fd41699f5d0743b21d0b729f24327bd083709 --- /dev/null +++ b/utils3d/test/torch_/transforms/pixel_to_ndc.py @@ -0,0 +1,31 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + H = np.random.randint(1, 1000) + W = np.random.randint(1, 1000) + x, y = np.meshgrid(np.arange(W), np.arange(H), indexing='xy') + pixel = np.stack([x, y], axis=-1) + + expected = utils3d.numpy.pixel_to_ndc(pixel, W, H) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + pixel = torch.tensor(pixel, device=device) + W = torch.tensor(W, device=device) + H = torch.tensor(H, device=device) + + actual = utils3d.torch.pixel_to_ndc(pixel, W, H).cpu().numpy() + + assert np.allclose(expected, actual, atol=1e-6), '\n' + \ + 'Input:\n' + \ + f'\tH: {H}\n' + \ + f'\tW: {W}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/pixel_to_uv.py b/utils3d/test/torch_/transforms/pixel_to_uv.py new file mode 100644 index 0000000000000000000000000000000000000000..1243b269b5f4c4acea042a01096bb292520e8afa --- /dev/null +++ b/utils3d/test/torch_/transforms/pixel_to_uv.py @@ -0,0 +1,31 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + H = np.random.randint(1, 1000) + W = np.random.randint(1, 1000) + x, y = np.meshgrid(np.arange(W), np.arange(H), indexing='xy') + pixel = np.stack([x, y], axis=-1) + + expected = utils3d.numpy.pixel_to_uv(pixel, W, H) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + pixel = torch.tensor(pixel, device=device) + W = torch.tensor(W, device=device) + H = torch.tensor(H, device=device) + + actual = utils3d.torch.pixel_to_uv(pixel, W, H).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tH: {H}\n' + \ + f'\tW: {W}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/project_cv.py b/utils3d/test/torch_/transforms/project_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..e66d63ad1b60d7077bca9e70f6f7f6ff570e58dd --- /dev/null +++ b/utils3d/test/torch_/transforms/project_cv.py @@ -0,0 +1,59 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + focal_x = np.random.uniform(0, 10, spatial) + focal_y = np.random.uniform(0, 10, spatial) + center_x = np.random.uniform(0, 1, spatial) + center_y = np.random.uniform(0, 1, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected, _ = utils3d.numpy.transforms.project_cv(points, + utils3d.numpy.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y)) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + focal_x = torch.tensor(focal_x, device=device) + focal_y = torch.tensor(focal_y, device=device) + center_x = torch.tensor(center_x, device=device) + center_y = torch.tensor(center_y, device=device) + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + points = torch.tensor(points, device=device) + + actual, _ = utils3d.torch.project_cv(points, + utils3d.torch.extrinsics_look_at(eye, lookat, up), + utils3d.torch.intrinsics(focal_x, focal_y, center_x, center_y) + ) + actual = actual.cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/project_depth.py b/utils3d/test/torch_/transforms/project_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..834afb7958962e1bd3da07ecccd9223726acb3b4 --- /dev/null +++ b/utils3d/test/torch_/transforms/project_depth.py @@ -0,0 +1,36 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + depth = np.random.uniform(near, far, spatial) + + expected = utils3d.numpy.project_depth(depth, near, far) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + depth = torch.tensor(depth, device=device) + + actual = utils3d.torch.project_depth(depth, near, far).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tdepth: {depth}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/project_gl.py b/utils3d/test/torch_/transforms/project_gl.py new file mode 100644 index 0000000000000000000000000000000000000000..a5c6add441cff74fa9975d72aebdacc99fa3eecd --- /dev/null +++ b/utils3d/test/torch_/transforms/project_gl.py @@ -0,0 +1,58 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected, _ = utils3d.numpy.transforms.project_gl(points, None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far)) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fovy = torch.tensor(fovy, device=device) + aspect = torch.tensor(aspect, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + points = torch.tensor(points, device=device) + + actual, _ = utils3d.torch.project_gl(points, None, + utils3d.torch.view_look_at(eye, lookat, up), + utils3d.torch.perspective(fovy, aspect, near, far)) + actual = actual.cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/project_gl_cv.py b/utils3d/test/torch_/transforms/project_gl_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..f7be6fc271c7787e3bace6419ab8af5d0f9e385f --- /dev/null +++ b/utils3d/test/torch_/transforms/project_gl_cv.py @@ -0,0 +1,64 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + focal_x = 0.5 / (np.tan(fovy / 2) * aspect) + focal_y = 0.5 / np.tan(fovy / 2) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fovy = torch.tensor(fovy, device=device) + aspect = torch.tensor(aspect, device=device) + focal_x = torch.tensor(focal_x, device=device) + focal_y = torch.tensor(focal_y, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + points = torch.tensor(points, device=device) + + gl = utils3d.torch.project_gl(points, None, + utils3d.torch.view_look_at(eye, lookat, up), + utils3d.torch.perspective(fovy, aspect, near, far)) + gl_uv = gl[0][..., :2].cpu().numpy() + gl_uv[..., 1] = 1 - gl_uv[..., 1] + gl_depth = gl[1].cpu().numpy() + + cv = utils3d.torch.project_cv(points, + utils3d.torch.extrinsics_look_at(eye, lookat, up), + utils3d.torch.intrinsics(focal_x, focal_y, 0.5, 0.5)) + cv_uv = cv[0][..., :2].cpu().numpy() + cv_depth = cv[1].cpu().numpy() + + assert np.allclose(gl_uv, cv_uv) and np.allclose(gl_depth, cv_depth), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'GL:\n' + \ + f'{gl}\n' + \ + 'CV:\n' + \ + f'{cv}' diff --git a/utils3d/test/torch_/transforms/quaternion_to_matrix.py b/utils3d/test/torch_/transforms/quaternion_to_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..7d6356de2e663c5126e2b1cbbb1de0e3569ca39c --- /dev/null +++ b/utils3d/test/torch_/transforms/quaternion_to_matrix.py @@ -0,0 +1,33 @@ +import numpy as np +import torch +from scipy.spatial.transform import Rotation as R +import utils3d + + +def run(): + for i in range(10): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + angle = np.random.uniform(-np.pi, np.pi, spatial).astype(np.float32) + axis = np.random.uniform(-1, 1, spatial + [3]).astype(np.float32) + axis = axis / np.linalg.norm(axis, axis=-1, keepdims=True) + axis_angle = angle[..., None] * axis + quat = R.from_rotvec(axis_angle.reshape((-1, 3))).as_quat().reshape(spatial + [4]) + expected = R.from_quat(quat.reshape(-1, 4)).as_matrix().reshape(spatial + [3, 3]) + actual = utils3d.torch.quaternion_to_matrix( + torch.from_numpy(quat[..., [3, 0, 1, 2]]) + ).cpu().numpy() + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tangle: {angle}\n' + \ + f'\taxis: {axis}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/slerp.py b/utils3d/test/torch_/transforms/slerp.py new file mode 100644 index 0000000000000000000000000000000000000000..c202bf2a29746c3da933217cad2c31cf45d147ed --- /dev/null +++ b/utils3d/test/torch_/transforms/slerp.py @@ -0,0 +1,23 @@ +from scipy.spatial.transform import Rotation, Slerp +import numpy as np +import torch +import utils3d + + +def run(): + for i in range(100): + quat_1 = np.random.rand(4) # [w, x, y, z] + quat_2 = np.random.rand(4) + t = np.array(0) + expected = Slerp([0, 1], Rotation.from_quat([quat_1[[1, 2, 3, 0]], quat_2[[1, 2, 3, 0]]]))(t).as_matrix() + matrix_1 = Rotation.from_quat(quat_1[[1, 2, 3, 0]]).as_matrix() + matrix_2 = Rotation.from_quat(quat_2[[1, 2, 3, 0]]).as_matrix() + actual = utils3d.torch.slerp( + torch.from_numpy(matrix_1), + torch.from_numpy(matrix_2), + torch.from_numpy(t) + ).numpy() + assert np.allclose(actual, expected) + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/unproject_cv.py b/utils3d/test/torch_/transforms/unproject_cv.py new file mode 100644 index 0000000000000000000000000000000000000000..d73e3431ca2f7ceba54dbeafa459faf76452ac28 --- /dev/null +++ b/utils3d/test/torch_/transforms/unproject_cv.py @@ -0,0 +1,66 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + focal_x = np.random.uniform(0, 10, spatial) + focal_y = np.random.uniform(0, 10, spatial) + center_x = np.random.uniform(0, 1, spatial) + center_y = np.random.uniform(0, 1, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected = utils3d.numpy.transforms.unproject_cv( + *utils3d.numpy.transforms.project_cv(points, + utils3d.numpy.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y)), + utils3d.numpy.extrinsics_look_at(eye, lookat, up), + utils3d.numpy.intrinsics(focal_x, focal_y, center_x, center_y) + ) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + focal_x = torch.tensor(focal_x, device=device) + focal_y = torch.tensor(focal_y, device=device) + center_x = torch.tensor(center_x, device=device) + center_y = torch.tensor(center_y, device=device) + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + points = torch.tensor(points, device=device) + + actual = utils3d.torch.unproject_cv( + *utils3d.torch.project_cv(points, + utils3d.torch.extrinsics_look_at(eye, lookat, up), + utils3d.torch.intrinsics(focal_x, focal_y, center_x, center_y)), + utils3d.torch.extrinsics_look_at(eye, lookat, up), + utils3d.torch.intrinsics(focal_x, focal_y, center_x, center_y) + ) + actual = actual.cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfocal_x: {focal_x}\n' + \ + f'\tfocal_y: {focal_y}\n' + \ + f'\tcenter_x: {center_x}\n' + \ + f'\tcenter_y: {center_y}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/unproject_gl.py b/utils3d/test/torch_/transforms/unproject_gl.py new file mode 100644 index 0000000000000000000000000000000000000000..5da43dde7e4e10c9402f61103494d1ba5cc1fc2f --- /dev/null +++ b/utils3d/test/torch_/transforms/unproject_gl.py @@ -0,0 +1,68 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + N = 1 + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + N = np.random.randint(1, 10) + fovy = np.random.uniform(5 / 180 * np.pi, 175 / 180 * np.pi, spatial) + aspect = np.random.uniform(0.01, 100, spatial) + near = np.random.uniform(0.1, 100, spatial) + far = np.random.uniform(near*2, 1000, spatial) + eye = np.random.uniform(-10, 10, [*spatial, 3]) + lookat = np.random.uniform(-10, 10, [*spatial, 3]) + up = np.random.uniform(-10, 10, [*spatial, 3]) + points = np.random.uniform(-10, 10, [*spatial, N, 3]) + + expected = utils3d.numpy.transforms.unproject_gl( + utils3d.numpy.transforms.project_gl(points, None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far))[0], + None, + utils3d.numpy.view_look_at(eye, lookat, up), + utils3d.numpy.perspective(fovy, aspect, near, far) + ) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + fovy = torch.tensor(fovy, device=device) + aspect = torch.tensor(aspect, device=device) + near = torch.tensor(near, device=device) + far = torch.tensor(far, device=device) + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + points = torch.tensor(points, device=device) + + actual = utils3d.torch.unproject_gl( + utils3d.torch.project_gl(points, None, + utils3d.torch.view_look_at(eye, lookat, up), + utils3d.torch.perspective(fovy, aspect, near, far))[0], + None, + utils3d.torch.view_look_at(eye, lookat, up), + utils3d.torch.perspective(fovy, aspect, near, far) + ) + actual = actual.cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'\tfovy: {fovy}\n' + \ + f'\taspect: {aspect}\n' + \ + f'\tnear: {near}\n' + \ + f'\tfar: {far}\n' + \ + f'\teye: {eye}\n' + \ + f'\tlookat: {lookat}\n' + \ + f'\tup: {up}\n' + \ + f'\tpoints: {points}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/test/torch_/transforms/view_look_at.py b/utils3d/test/torch_/transforms/view_look_at.py new file mode 100644 index 0000000000000000000000000000000000000000..bae1e7498a9a9d2c2e59684981169612cd66fbc1 --- /dev/null +++ b/utils3d/test/torch_/transforms/view_look_at.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.view_look_at(eye, lookat, up) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + + actual = utils3d.torch.view_look_at(eye, lookat, up).cpu().numpy() + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/transforms/view_to_extrinsic.py b/utils3d/test/torch_/transforms/view_to_extrinsic.py new file mode 100644 index 0000000000000000000000000000000000000000..cd4170ea6ac934a8f1c73af396f0c0fda794b9fe --- /dev/null +++ b/utils3d/test/torch_/transforms/view_to_extrinsic.py @@ -0,0 +1,37 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + for i in range(100): + if i == 0: + spatial = [] + else: + dim = np.random.randint(4) + spatial = [np.random.randint(1, 10) for _ in range(dim)] + eye = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + lookat = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + up = np.random.uniform(-10, 10, [*spatial, 3]).astype(np.float32) + + expected = utils3d.numpy.view_to_extrinsics(utils3d.numpy.view_look_at(eye, lookat, up)) + + device = [torch.device('cpu'), torch.device('cuda')][np.random.randint(2)] + eye = torch.tensor(eye, device=device) + lookat = torch.tensor(lookat, device=device) + up = torch.tensor(up, device=device) + + actual = utils3d.torch.view_to_extrinsics(utils3d.torch.view_look_at(eye, lookat, up)).cpu().numpy() + + assert np.allclose(expected, actual, 1e-5, 1e-5), '\n' + \ + 'Input:\n' + \ + f'eye: {eye}\n' + \ + f'lookat: {lookat}\n' + \ + f'up: {up}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' + \ No newline at end of file diff --git a/utils3d/test/torch_/utils/image_mesh.py b/utils3d/test/torch_/utils/image_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..2951916a4d4a8d44ab9a82e7a30bc350a59ead53 --- /dev/null +++ b/utils3d/test/torch_/utils/image_mesh.py @@ -0,0 +1,33 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) +import utils3d +import numpy as np +import torch + +def run(): + args = [ + {'W':2, 'H':2, 'backslash': torch.tensor([False])}, + {'W':2, 'H':2, 'backslash': torch.tensor([True])}, + {'H':2, 'W':3, 'backslash': torch.tensor([True, False])}, + ] + + expected = [ + np.array([[0, 2, 1], [1, 2, 3]]), + np.array([[0, 2, 3], [0, 3, 1]]), + np.array([[0, 3, 4], [0, 4, 1], [1, 4, 2], [2, 4, 5]]), + ] + + for args, expected in zip(args, expected): + actual = utils3d.torch.triangulate( + utils3d.torch.image_mesh(args['H'], args['W'])[1], + backslash=args.get('backslash', None), + ).cpu().numpy() + + assert np.allclose(expected, actual), '\n' + \ + 'Input:\n' + \ + f'{args}\n' + \ + 'Actual:\n' + \ + f'{actual}\n' + \ + 'Expected:\n' + \ + f'{expected}' diff --git a/utils3d/utils3d/__init__.py b/utils3d/utils3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..662245963c140bd72cfcc88b002000f11654020f --- /dev/null +++ b/utils3d/utils3d/__init__.py @@ -0,0 +1,21 @@ +""" +A package for common utility functions in 3D computer graphics and vision. +Providing NumPy utilities in `utils3d.numpy`, PyTorch utilities in `utils3d.torch`, and IO utilities in `utils3d.io`. +""" +import importlib +from typing import TYPE_CHECKING + +try: + from ._unified import * +except ImportError: + pass + +__all__ = ['numpy', 'torch', 'io'] + +def __getattr__(name: str): + return globals().get(name, importlib.import_module(f'.{name}', __package__)) + +if TYPE_CHECKING: + from . import torch + from . import numpy + from . import io \ No newline at end of file diff --git a/utils3d/utils3d/_helpers.py b/utils3d/utils3d/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..ef2b4007d46150f1d9f999223dfef858a7df5102 --- /dev/null +++ b/utils3d/utils3d/_helpers.py @@ -0,0 +1,35 @@ +from functools import wraps +import warnings + + +def suppress_traceback(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + try: + return fn(*args, **kwargs) + except Exception as e: + e.__traceback__ = e.__traceback__.tb_next.tb_next + raise + return wrapper + + +class no_warnings: + def __init__(self, action: str = 'ignore', **kwargs): + self.action = action + self.filter_kwargs = kwargs + + def __call__(self, fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + warnings.simplefilter(self.action, **self.filter_kwargs) + return fn(*args, **kwargs) + return wrapper + + def __enter__(self): + self.warnings_manager = warnings.catch_warnings() + self.warnings_manager.__enter__() + warnings.simplefilter(self.action, **self.filter_kwargs) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.warnings_manager.__exit__(exc_type, exc_val, exc_tb) diff --git a/utils3d/utils3d/_unified/__init__.py b/utils3d/utils3d/_unified/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f29d6955fed583a330b046bab441d308b7622ea --- /dev/null +++ b/utils3d/utils3d/_unified/__init__.py @@ -0,0 +1,969 @@ +# Auto-generated implementation redirecting to numpy/torch implementations +import sys +from typing import TYPE_CHECKING +import utils3d +from .._helpers import suppress_traceback + +__all__ = ["triangulate", +"compute_face_normal", +"compute_face_angle", +"compute_vertex_normal", +"compute_vertex_normal_weighted", +"remove_corrupted_faces", +"merge_duplicate_vertices", +"remove_unreferenced_vertices", +"subdivide_mesh_simple", +"mesh_relations", +"flatten_mesh_indices", +"calc_quad_candidates", +"calc_quad_distortion", +"calc_quad_direction", +"calc_quad_smoothness", +"sovle_quad", +"sovle_quad_qp", +"tri_to_quad", +"sliding_window_1d", +"sliding_window_nd", +"sliding_window_2d", +"max_pool_1d", +"max_pool_2d", +"max_pool_nd", +"depth_edge", +"normals_edge", +"depth_aliasing", +"interpolate", +"image_scrcoord", +"image_uv", +"image_pixel_center", +"image_pixel", +"image_mesh", +"image_mesh_from_depth", +"depth_to_normals", +"points_to_normals", +"depth_to_points", +"chessboard", +"cube", +"icosahedron", +"square", +"camera_frustum", +"perspective", +"perspective_from_fov", +"perspective_from_fov_xy", +"intrinsics_from_focal_center", +"intrinsics_from_fov", +"fov_to_focal", +"focal_to_fov", +"intrinsics_to_fov", +"view_look_at", +"extrinsics_look_at", +"perspective_to_intrinsics", +"perspective_to_near_far", +"intrinsics_to_perspective", +"extrinsics_to_view", +"view_to_extrinsics", +"normalize_intrinsics", +"crop_intrinsics", +"pixel_to_uv", +"pixel_to_ndc", +"uv_to_pixel", +"project_depth", +"depth_buffer_to_linear", +"unproject_cv", +"unproject_gl", +"project_cv", +"project_gl", +"quaternion_to_matrix", +"axis_angle_to_matrix", +"matrix_to_quaternion", +"extrinsics_to_essential", +"euler_axis_angle_rotation", +"euler_angles_to_matrix", +"skew_symmetric", +"rotation_matrix_from_vectors", +"ray_intersection", +"se3_matrix", +"slerp_quaternion", +"slerp_vector", +"lerp", +"lerp_se3_matrix", +"piecewise_lerp", +"piecewise_lerp_se3_matrix", +"apply_transform", +"linear_spline_interpolate", +"RastContext", +"rasterize_triangle_faces", +"rasterize_edges", +"texture", +"warp_image_by_depth", +"test_rasterization", +"compute_face_angles", +"compute_edges", +"compute_connected_components", +"compute_edge_connected_components", +"compute_boundarys", +"compute_dual_graph", +"remove_isolated_pieces", +"compute_face_tbn", +"compute_vertex_tbn", +"laplacian", +"laplacian_smooth_mesh", +"taubin_smooth_mesh", +"laplacian_hc_smooth_mesh", +"get_rays", +"get_image_rays", +"get_mipnerf_cones", +"volume_rendering", +"bin_sample", +"importance_sample", +"nerf_render_rays", +"mipnerf_render_rays", +"nerf_render_view", +"mipnerf_render_view", +"InstantNGP", +"masked_min", +"masked_max", +"bounding_rect", +"intrinsics_from_fov_xy", +"matrix_to_euler_angles", +"matrix_to_axis_angle", +"axis_angle_to_quaternion", +"quaternion_to_axis_angle", +"slerp", +"interpolate_extrinsics", +"interpolate_view", +"to4x4", +"rotation_matrix_2d", +"rotate_2d", +"translate_2d", +"scale_2d", +"apply_2d", +"warp_image_by_forward_flow"] + +def _contains_tensor(obj): + if isinstance(obj, (list, tuple)): + return any(_contains_tensor(item) for item in obj) + elif isinstance(obj, dict): + return any(_contains_tensor(value) for value in obj.values()) + else: + import torch + return isinstance(obj, torch.Tensor) + + +@suppress_traceback +def _call_based_on_args(fname, args, kwargs): + if 'torch' in sys.modules: + if any(_contains_tensor(arg) for arg in args) or any(_contains_tensor(v) for v in kwargs.values()): + fn = getattr(utils3d.torch, fname, None) + if fn is None: + raise NotImplementedError(f"Function {fname} has no torch implementation.") + return fn(*args, **kwargs) + fn = getattr(utils3d.numpy, fname, None) + if fn is None: + raise NotImplementedError(f"Function {fname} has no numpy implementation.") + return fn(*args, **kwargs) + + +@suppress_traceback +def triangulate(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.triangulate, utils3d.torch.triangulate + return _call_based_on_args('triangulate', args, kwargs) + +@suppress_traceback +def compute_face_normal(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.compute_face_normal, utils3d.torch.compute_face_normal + return _call_based_on_args('compute_face_normal', args, kwargs) + +@suppress_traceback +def compute_face_angle(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.compute_face_angle, None + return _call_based_on_args('compute_face_angle', args, kwargs) + +@suppress_traceback +def compute_vertex_normal(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.compute_vertex_normal, utils3d.torch.compute_vertex_normal + return _call_based_on_args('compute_vertex_normal', args, kwargs) + +@suppress_traceback +def compute_vertex_normal_weighted(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.compute_vertex_normal_weighted, utils3d.torch.compute_vertex_normal_weighted + return _call_based_on_args('compute_vertex_normal_weighted', args, kwargs) + +@suppress_traceback +def remove_corrupted_faces(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.remove_corrupted_faces, utils3d.torch.remove_corrupted_faces + return _call_based_on_args('remove_corrupted_faces', args, kwargs) + +@suppress_traceback +def merge_duplicate_vertices(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.merge_duplicate_vertices, utils3d.torch.merge_duplicate_vertices + return _call_based_on_args('merge_duplicate_vertices', args, kwargs) + +@suppress_traceback +def remove_unreferenced_vertices(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.remove_unreferenced_vertices, utils3d.torch.remove_unreferenced_vertices + return _call_based_on_args('remove_unreferenced_vertices', args, kwargs) + +@suppress_traceback +def subdivide_mesh_simple(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.subdivide_mesh_simple, utils3d.torch.subdivide_mesh_simple + return _call_based_on_args('subdivide_mesh_simple', args, kwargs) + +@suppress_traceback +def mesh_relations(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.mesh_relations, None + return _call_based_on_args('mesh_relations', args, kwargs) + +@suppress_traceback +def flatten_mesh_indices(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.flatten_mesh_indices, None + return _call_based_on_args('flatten_mesh_indices', args, kwargs) + +@suppress_traceback +def calc_quad_candidates(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.calc_quad_candidates, None + return _call_based_on_args('calc_quad_candidates', args, kwargs) + +@suppress_traceback +def calc_quad_distortion(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.calc_quad_distortion, None + return _call_based_on_args('calc_quad_distortion', args, kwargs) + +@suppress_traceback +def calc_quad_direction(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.calc_quad_direction, None + return _call_based_on_args('calc_quad_direction', args, kwargs) + +@suppress_traceback +def calc_quad_smoothness(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.calc_quad_smoothness, None + return _call_based_on_args('calc_quad_smoothness', args, kwargs) + +@suppress_traceback +def sovle_quad(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.sovle_quad, None + return _call_based_on_args('sovle_quad', args, kwargs) + +@suppress_traceback +def sovle_quad_qp(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.sovle_quad_qp, None + return _call_based_on_args('sovle_quad_qp', args, kwargs) + +@suppress_traceback +def tri_to_quad(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.tri_to_quad, None + return _call_based_on_args('tri_to_quad', args, kwargs) + +@suppress_traceback +def sliding_window_1d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.sliding_window_1d, utils3d.torch.sliding_window_1d + return _call_based_on_args('sliding_window_1d', args, kwargs) + +@suppress_traceback +def sliding_window_nd(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.sliding_window_nd, utils3d.torch.sliding_window_nd + return _call_based_on_args('sliding_window_nd', args, kwargs) + +@suppress_traceback +def sliding_window_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.sliding_window_2d, utils3d.torch.sliding_window_2d + return _call_based_on_args('sliding_window_2d', args, kwargs) + +@suppress_traceback +def max_pool_1d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.max_pool_1d, None + return _call_based_on_args('max_pool_1d', args, kwargs) + +@suppress_traceback +def max_pool_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.max_pool_2d, None + return _call_based_on_args('max_pool_2d', args, kwargs) + +@suppress_traceback +def max_pool_nd(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.max_pool_nd, None + return _call_based_on_args('max_pool_nd', args, kwargs) + +@suppress_traceback +def depth_edge(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.depth_edge, utils3d.torch.depth_edge + return _call_based_on_args('depth_edge', args, kwargs) + +@suppress_traceback +def normals_edge(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.normals_edge, None + return _call_based_on_args('normals_edge', args, kwargs) + +@suppress_traceback +def depth_aliasing(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.depth_aliasing, utils3d.torch.depth_aliasing + return _call_based_on_args('depth_aliasing', args, kwargs) + +@suppress_traceback +def interpolate(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.interpolate, None + return _call_based_on_args('interpolate', args, kwargs) + +@suppress_traceback +def image_scrcoord(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_scrcoord, None + return _call_based_on_args('image_scrcoord', args, kwargs) + +@suppress_traceback +def image_uv(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_uv, utils3d.torch.image_uv + return _call_based_on_args('image_uv', args, kwargs) + +@suppress_traceback +def image_pixel_center(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_pixel_center, utils3d.torch.image_pixel_center + return _call_based_on_args('image_pixel_center', args, kwargs) + +@suppress_traceback +def image_pixel(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_pixel, None + return _call_based_on_args('image_pixel', args, kwargs) + +@suppress_traceback +def image_mesh(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_mesh, utils3d.torch.image_mesh + return _call_based_on_args('image_mesh', args, kwargs) + +@suppress_traceback +def image_mesh_from_depth(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.image_mesh_from_depth, utils3d.torch.image_mesh_from_depth + return _call_based_on_args('image_mesh_from_depth', args, kwargs) + +@suppress_traceback +def depth_to_normals(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.depth_to_normals, utils3d.torch.depth_to_normals + return _call_based_on_args('depth_to_normals', args, kwargs) + +@suppress_traceback +def points_to_normals(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.points_to_normals, utils3d.torch.points_to_normals + return _call_based_on_args('points_to_normals', args, kwargs) + +@suppress_traceback +def depth_to_points(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.depth_to_points, utils3d.torch.depth_to_points + return _call_based_on_args('depth_to_points', args, kwargs) + +@suppress_traceback +def chessboard(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.chessboard, utils3d.torch.chessboard + return _call_based_on_args('chessboard', args, kwargs) + +@suppress_traceback +def cube(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.cube, None + return _call_based_on_args('cube', args, kwargs) + +@suppress_traceback +def icosahedron(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.icosahedron, None + return _call_based_on_args('icosahedron', args, kwargs) + +@suppress_traceback +def square(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.square, None + return _call_based_on_args('square', args, kwargs) + +@suppress_traceback +def camera_frustum(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.camera_frustum, None + return _call_based_on_args('camera_frustum', args, kwargs) + +@suppress_traceback +def perspective(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.perspective, utils3d.torch.perspective + return _call_based_on_args('perspective', args, kwargs) + +@suppress_traceback +def perspective_from_fov(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.perspective_from_fov, utils3d.torch.perspective_from_fov + return _call_based_on_args('perspective_from_fov', args, kwargs) + +@suppress_traceback +def perspective_from_fov_xy(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.perspective_from_fov_xy, utils3d.torch.perspective_from_fov_xy + return _call_based_on_args('perspective_from_fov_xy', args, kwargs) + +@suppress_traceback +def intrinsics_from_focal_center(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.intrinsics_from_focal_center, utils3d.torch.intrinsics_from_focal_center + return _call_based_on_args('intrinsics_from_focal_center', args, kwargs) + +@suppress_traceback +def intrinsics_from_fov(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.intrinsics_from_fov, utils3d.torch.intrinsics_from_fov + return _call_based_on_args('intrinsics_from_fov', args, kwargs) + +@suppress_traceback +def fov_to_focal(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.fov_to_focal, utils3d.torch.fov_to_focal + return _call_based_on_args('fov_to_focal', args, kwargs) + +@suppress_traceback +def focal_to_fov(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.focal_to_fov, utils3d.torch.focal_to_fov + return _call_based_on_args('focal_to_fov', args, kwargs) + +@suppress_traceback +def intrinsics_to_fov(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.intrinsics_to_fov, utils3d.torch.intrinsics_to_fov + return _call_based_on_args('intrinsics_to_fov', args, kwargs) + +@suppress_traceback +def view_look_at(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.view_look_at, utils3d.torch.view_look_at + return _call_based_on_args('view_look_at', args, kwargs) + +@suppress_traceback +def extrinsics_look_at(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.extrinsics_look_at, utils3d.torch.extrinsics_look_at + return _call_based_on_args('extrinsics_look_at', args, kwargs) + +@suppress_traceback +def perspective_to_intrinsics(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.perspective_to_intrinsics, utils3d.torch.perspective_to_intrinsics + return _call_based_on_args('perspective_to_intrinsics', args, kwargs) + +@suppress_traceback +def perspective_to_near_far(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.perspective_to_near_far, None + return _call_based_on_args('perspective_to_near_far', args, kwargs) + +@suppress_traceback +def intrinsics_to_perspective(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.intrinsics_to_perspective, utils3d.torch.intrinsics_to_perspective + return _call_based_on_args('intrinsics_to_perspective', args, kwargs) + +@suppress_traceback +def extrinsics_to_view(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.extrinsics_to_view, utils3d.torch.extrinsics_to_view + return _call_based_on_args('extrinsics_to_view', args, kwargs) + +@suppress_traceback +def view_to_extrinsics(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.view_to_extrinsics, utils3d.torch.view_to_extrinsics + return _call_based_on_args('view_to_extrinsics', args, kwargs) + +@suppress_traceback +def normalize_intrinsics(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.normalize_intrinsics, utils3d.torch.normalize_intrinsics + return _call_based_on_args('normalize_intrinsics', args, kwargs) + +@suppress_traceback +def crop_intrinsics(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.crop_intrinsics, utils3d.torch.crop_intrinsics + return _call_based_on_args('crop_intrinsics', args, kwargs) + +@suppress_traceback +def pixel_to_uv(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.pixel_to_uv, utils3d.torch.pixel_to_uv + return _call_based_on_args('pixel_to_uv', args, kwargs) + +@suppress_traceback +def pixel_to_ndc(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.pixel_to_ndc, utils3d.torch.pixel_to_ndc + return _call_based_on_args('pixel_to_ndc', args, kwargs) + +@suppress_traceback +def uv_to_pixel(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.uv_to_pixel, utils3d.torch.uv_to_pixel + return _call_based_on_args('uv_to_pixel', args, kwargs) + +@suppress_traceback +def project_depth(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.project_depth, utils3d.torch.project_depth + return _call_based_on_args('project_depth', args, kwargs) + +@suppress_traceback +def depth_buffer_to_linear(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.depth_buffer_to_linear, utils3d.torch.depth_buffer_to_linear + return _call_based_on_args('depth_buffer_to_linear', args, kwargs) + +@suppress_traceback +def unproject_cv(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.unproject_cv, utils3d.torch.unproject_cv + return _call_based_on_args('unproject_cv', args, kwargs) + +@suppress_traceback +def unproject_gl(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.unproject_gl, utils3d.torch.unproject_gl + return _call_based_on_args('unproject_gl', args, kwargs) + +@suppress_traceback +def project_cv(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.project_cv, utils3d.torch.project_cv + return _call_based_on_args('project_cv', args, kwargs) + +@suppress_traceback +def project_gl(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.project_gl, utils3d.torch.project_gl + return _call_based_on_args('project_gl', args, kwargs) + +@suppress_traceback +def quaternion_to_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.quaternion_to_matrix, utils3d.torch.quaternion_to_matrix + return _call_based_on_args('quaternion_to_matrix', args, kwargs) + +@suppress_traceback +def axis_angle_to_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.axis_angle_to_matrix, utils3d.torch.axis_angle_to_matrix + return _call_based_on_args('axis_angle_to_matrix', args, kwargs) + +@suppress_traceback +def matrix_to_quaternion(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.matrix_to_quaternion, utils3d.torch.matrix_to_quaternion + return _call_based_on_args('matrix_to_quaternion', args, kwargs) + +@suppress_traceback +def extrinsics_to_essential(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.extrinsics_to_essential, utils3d.torch.extrinsics_to_essential + return _call_based_on_args('extrinsics_to_essential', args, kwargs) + +@suppress_traceback +def euler_axis_angle_rotation(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.euler_axis_angle_rotation, utils3d.torch.euler_axis_angle_rotation + return _call_based_on_args('euler_axis_angle_rotation', args, kwargs) + +@suppress_traceback +def euler_angles_to_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.euler_angles_to_matrix, utils3d.torch.euler_angles_to_matrix + return _call_based_on_args('euler_angles_to_matrix', args, kwargs) + +@suppress_traceback +def skew_symmetric(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.skew_symmetric, utils3d.torch.skew_symmetric + return _call_based_on_args('skew_symmetric', args, kwargs) + +@suppress_traceback +def rotation_matrix_from_vectors(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.rotation_matrix_from_vectors, utils3d.torch.rotation_matrix_from_vectors + return _call_based_on_args('rotation_matrix_from_vectors', args, kwargs) + +@suppress_traceback +def ray_intersection(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.ray_intersection, None + return _call_based_on_args('ray_intersection', args, kwargs) + +@suppress_traceback +def se3_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.se3_matrix, None + return _call_based_on_args('se3_matrix', args, kwargs) + +@suppress_traceback +def slerp_quaternion(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.slerp_quaternion, None + return _call_based_on_args('slerp_quaternion', args, kwargs) + +@suppress_traceback +def slerp_vector(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.slerp_vector, None + return _call_based_on_args('slerp_vector', args, kwargs) + +@suppress_traceback +def lerp(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.lerp, None + return _call_based_on_args('lerp', args, kwargs) + +@suppress_traceback +def lerp_se3_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.lerp_se3_matrix, None + return _call_based_on_args('lerp_se3_matrix', args, kwargs) + +@suppress_traceback +def piecewise_lerp(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.piecewise_lerp, None + return _call_based_on_args('piecewise_lerp', args, kwargs) + +@suppress_traceback +def piecewise_lerp_se3_matrix(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.piecewise_lerp_se3_matrix, None + return _call_based_on_args('piecewise_lerp_se3_matrix', args, kwargs) + +@suppress_traceback +def apply_transform(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.apply_transform, None + return _call_based_on_args('apply_transform', args, kwargs) + +@suppress_traceback +def linear_spline_interpolate(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.linear_spline_interpolate, None + return _call_based_on_args('linear_spline_interpolate', args, kwargs) + +@suppress_traceback +def RastContext(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.RastContext, utils3d.torch.RastContext + return _call_based_on_args('RastContext', args, kwargs) + +@suppress_traceback +def rasterize_triangle_faces(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.rasterize_triangle_faces, utils3d.torch.rasterize_triangle_faces + return _call_based_on_args('rasterize_triangle_faces', args, kwargs) + +@suppress_traceback +def rasterize_edges(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.rasterize_edges, None + return _call_based_on_args('rasterize_edges', args, kwargs) + +@suppress_traceback +def texture(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.texture, None + return _call_based_on_args('texture', args, kwargs) + +@suppress_traceback +def warp_image_by_depth(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.warp_image_by_depth, utils3d.torch.warp_image_by_depth + return _call_based_on_args('warp_image_by_depth', args, kwargs) + +@suppress_traceback +def test_rasterization(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + utils3d.numpy.test_rasterization, None + return _call_based_on_args('test_rasterization', args, kwargs) + +@suppress_traceback +def compute_face_angles(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_face_angles + return _call_based_on_args('compute_face_angles', args, kwargs) + +@suppress_traceback +def compute_edges(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_edges + return _call_based_on_args('compute_edges', args, kwargs) + +@suppress_traceback +def compute_connected_components(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_connected_components + return _call_based_on_args('compute_connected_components', args, kwargs) + +@suppress_traceback +def compute_edge_connected_components(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_edge_connected_components + return _call_based_on_args('compute_edge_connected_components', args, kwargs) + +@suppress_traceback +def compute_boundarys(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_boundarys + return _call_based_on_args('compute_boundarys', args, kwargs) + +@suppress_traceback +def compute_dual_graph(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_dual_graph + return _call_based_on_args('compute_dual_graph', args, kwargs) + +@suppress_traceback +def remove_isolated_pieces(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.remove_isolated_pieces + return _call_based_on_args('remove_isolated_pieces', args, kwargs) + +@suppress_traceback +def compute_face_tbn(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_face_tbn + return _call_based_on_args('compute_face_tbn', args, kwargs) + +@suppress_traceback +def compute_vertex_tbn(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.compute_vertex_tbn + return _call_based_on_args('compute_vertex_tbn', args, kwargs) + +@suppress_traceback +def laplacian(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.laplacian + return _call_based_on_args('laplacian', args, kwargs) + +@suppress_traceback +def laplacian_smooth_mesh(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.laplacian_smooth_mesh + return _call_based_on_args('laplacian_smooth_mesh', args, kwargs) + +@suppress_traceback +def taubin_smooth_mesh(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.taubin_smooth_mesh + return _call_based_on_args('taubin_smooth_mesh', args, kwargs) + +@suppress_traceback +def laplacian_hc_smooth_mesh(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.laplacian_hc_smooth_mesh + return _call_based_on_args('laplacian_hc_smooth_mesh', args, kwargs) + +@suppress_traceback +def get_rays(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.get_rays + return _call_based_on_args('get_rays', args, kwargs) + +@suppress_traceback +def get_image_rays(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.get_image_rays + return _call_based_on_args('get_image_rays', args, kwargs) + +@suppress_traceback +def get_mipnerf_cones(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.get_mipnerf_cones + return _call_based_on_args('get_mipnerf_cones', args, kwargs) + +@suppress_traceback +def volume_rendering(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.volume_rendering + return _call_based_on_args('volume_rendering', args, kwargs) + +@suppress_traceback +def bin_sample(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.bin_sample + return _call_based_on_args('bin_sample', args, kwargs) + +@suppress_traceback +def importance_sample(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.importance_sample + return _call_based_on_args('importance_sample', args, kwargs) + +@suppress_traceback +def nerf_render_rays(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.nerf_render_rays + return _call_based_on_args('nerf_render_rays', args, kwargs) + +@suppress_traceback +def mipnerf_render_rays(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.mipnerf_render_rays + return _call_based_on_args('mipnerf_render_rays', args, kwargs) + +@suppress_traceback +def nerf_render_view(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.nerf_render_view + return _call_based_on_args('nerf_render_view', args, kwargs) + +@suppress_traceback +def mipnerf_render_view(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.mipnerf_render_view + return _call_based_on_args('mipnerf_render_view', args, kwargs) + +@suppress_traceback +def InstantNGP(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.InstantNGP + return _call_based_on_args('InstantNGP', args, kwargs) + +@suppress_traceback +def masked_min(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.masked_min + return _call_based_on_args('masked_min', args, kwargs) + +@suppress_traceback +def masked_max(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.masked_max + return _call_based_on_args('masked_max', args, kwargs) + +@suppress_traceback +def bounding_rect(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.bounding_rect + return _call_based_on_args('bounding_rect', args, kwargs) + +@suppress_traceback +def intrinsics_from_fov_xy(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.intrinsics_from_fov_xy + return _call_based_on_args('intrinsics_from_fov_xy', args, kwargs) + +@suppress_traceback +def matrix_to_euler_angles(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.matrix_to_euler_angles + return _call_based_on_args('matrix_to_euler_angles', args, kwargs) + +@suppress_traceback +def matrix_to_axis_angle(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.matrix_to_axis_angle + return _call_based_on_args('matrix_to_axis_angle', args, kwargs) + +@suppress_traceback +def axis_angle_to_quaternion(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.axis_angle_to_quaternion + return _call_based_on_args('axis_angle_to_quaternion', args, kwargs) + +@suppress_traceback +def quaternion_to_axis_angle(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.quaternion_to_axis_angle + return _call_based_on_args('quaternion_to_axis_angle', args, kwargs) + +@suppress_traceback +def slerp(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.slerp + return _call_based_on_args('slerp', args, kwargs) + +@suppress_traceback +def interpolate_extrinsics(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.interpolate_extrinsics + return _call_based_on_args('interpolate_extrinsics', args, kwargs) + +@suppress_traceback +def interpolate_view(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.interpolate_view + return _call_based_on_args('interpolate_view', args, kwargs) + +@suppress_traceback +def to4x4(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.to4x4 + return _call_based_on_args('to4x4', args, kwargs) + +@suppress_traceback +def rotation_matrix_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.rotation_matrix_2d + return _call_based_on_args('rotation_matrix_2d', args, kwargs) + +@suppress_traceback +def rotate_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.rotate_2d + return _call_based_on_args('rotate_2d', args, kwargs) + +@suppress_traceback +def translate_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.translate_2d + return _call_based_on_args('translate_2d', args, kwargs) + +@suppress_traceback +def scale_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.scale_2d + return _call_based_on_args('scale_2d', args, kwargs) + +@suppress_traceback +def apply_2d(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.apply_2d + return _call_based_on_args('apply_2d', args, kwargs) + +@suppress_traceback +def warp_image_by_forward_flow(*args, **kwargs): + if TYPE_CHECKING: # redirected to: + None, utils3d.torch.warp_image_by_forward_flow + return _call_based_on_args('warp_image_by_forward_flow', args, kwargs) + diff --git a/utils3d/utils3d/_unified/__init__.pyi b/utils3d/utils3d/_unified/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..152026d67a631038fdc8c4578f3d5130a101a26d --- /dev/null +++ b/utils3d/utils3d/_unified/__init__.pyi @@ -0,0 +1,2559 @@ +# Auto-generated interface file +from typing import List, Tuple, Dict, Union, Optional, Any, overload, Literal, Callable +import numpy as numpy_ +import torch as torch_ +import nvdiffrast.torch +import numbers +from . import numpy, torch +import utils3d.numpy, utils3d.torch + +__all__ = ["triangulate", +"compute_face_normal", +"compute_face_angle", +"compute_vertex_normal", +"compute_vertex_normal_weighted", +"remove_corrupted_faces", +"merge_duplicate_vertices", +"remove_unreferenced_vertices", +"subdivide_mesh_simple", +"mesh_relations", +"flatten_mesh_indices", +"calc_quad_candidates", +"calc_quad_distortion", +"calc_quad_direction", +"calc_quad_smoothness", +"sovle_quad", +"sovle_quad_qp", +"tri_to_quad", +"sliding_window_1d", +"sliding_window_nd", +"sliding_window_2d", +"max_pool_1d", +"max_pool_2d", +"max_pool_nd", +"depth_edge", +"normals_edge", +"depth_aliasing", +"interpolate", +"image_scrcoord", +"image_uv", +"image_pixel_center", +"image_pixel", +"image_mesh", +"image_mesh_from_depth", +"depth_to_normals", +"points_to_normals", +"depth_to_points", +"chessboard", +"cube", +"icosahedron", +"square", +"camera_frustum", +"perspective", +"perspective_from_fov", +"perspective_from_fov_xy", +"intrinsics_from_focal_center", +"intrinsics_from_fov", +"fov_to_focal", +"focal_to_fov", +"intrinsics_to_fov", +"view_look_at", +"extrinsics_look_at", +"perspective_to_intrinsics", +"perspective_to_near_far", +"intrinsics_to_perspective", +"extrinsics_to_view", +"view_to_extrinsics", +"normalize_intrinsics", +"crop_intrinsics", +"pixel_to_uv", +"pixel_to_ndc", +"uv_to_pixel", +"project_depth", +"depth_buffer_to_linear", +"unproject_cv", +"unproject_gl", +"project_cv", +"project_gl", +"quaternion_to_matrix", +"axis_angle_to_matrix", +"matrix_to_quaternion", +"extrinsics_to_essential", +"euler_axis_angle_rotation", +"euler_angles_to_matrix", +"skew_symmetric", +"rotation_matrix_from_vectors", +"ray_intersection", +"se3_matrix", +"slerp_quaternion", +"slerp_vector", +"lerp", +"lerp_se3_matrix", +"piecewise_lerp", +"piecewise_lerp_se3_matrix", +"apply_transform", +"linear_spline_interpolate", +"RastContext", +"rasterize_triangle_faces", +"rasterize_edges", +"texture", +"warp_image_by_depth", +"test_rasterization", +"compute_face_angles", +"compute_edges", +"compute_connected_components", +"compute_edge_connected_components", +"compute_boundarys", +"compute_dual_graph", +"remove_isolated_pieces", +"compute_face_tbn", +"compute_vertex_tbn", +"laplacian", +"laplacian_smooth_mesh", +"taubin_smooth_mesh", +"laplacian_hc_smooth_mesh", +"get_rays", +"get_image_rays", +"get_mipnerf_cones", +"volume_rendering", +"bin_sample", +"importance_sample", +"nerf_render_rays", +"mipnerf_render_rays", +"nerf_render_view", +"mipnerf_render_view", +"InstantNGP", +"masked_min", +"masked_max", +"bounding_rect", +"intrinsics_from_fov_xy", +"matrix_to_euler_angles", +"matrix_to_axis_angle", +"axis_angle_to_quaternion", +"quaternion_to_axis_angle", +"slerp", +"interpolate_extrinsics", +"interpolate_view", +"to4x4", +"rotation_matrix_2d", +"rotate_2d", +"translate_2d", +"scale_2d", +"apply_2d", +"warp_image_by_forward_flow"] + +@overload +def triangulate(faces: numpy_.ndarray, vertices: numpy_.ndarray = None, backslash: numpy_.ndarray = None) -> numpy_.ndarray: + """Triangulate a polygonal mesh. + +Args: + faces (np.ndarray): [L, P] polygonal faces + vertices (np.ndarray, optional): [N, 3] 3-dimensional vertices. + If given, the triangulation is performed according to the distance + between vertices. Defaults to None. + backslash (np.ndarray, optional): [L] boolean array indicating + how to triangulate the quad faces. Defaults to None. + +Returns: + (np.ndarray): [L * (P - 2), 3] triangular faces""" + utils3d.numpy.mesh.triangulate + +@overload +def compute_face_normal(vertices: numpy_.ndarray, faces: numpy_.ndarray) -> numpy_.ndarray: + """Compute face normals of a triangular mesh + +Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + +Returns: + normals (np.ndarray): [..., T, 3] face normals""" + utils3d.numpy.mesh.compute_face_normal + +@overload +def compute_face_angle(vertices: numpy_.ndarray, faces: numpy_.ndarray, eps: float = 1e-12) -> numpy_.ndarray: + """Compute face angles of a triangular mesh + +Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + +Returns: + angles (np.ndarray): [..., T, 3] face angles""" + utils3d.numpy.mesh.compute_face_angle + +@overload +def compute_vertex_normal(vertices: numpy_.ndarray, faces: numpy_.ndarray, face_normal: numpy_.ndarray = None) -> numpy_.ndarray: + """Compute vertex normals of a triangular mesh by averaging neightboring face normals +TODO: can be improved. + +Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + face_normal (np.ndarray, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + +Returns: + normals (np.ndarray): [..., N, 3] vertex normals""" + utils3d.numpy.mesh.compute_vertex_normal + +@overload +def compute_vertex_normal_weighted(vertices: numpy_.ndarray, faces: numpy_.ndarray, face_normal: numpy_.ndarray = None) -> numpy_.ndarray: + """Compute vertex normals of a triangular mesh by weighted sum of neightboring face normals +according to the angles + +Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [..., T, 3] triangular face indices + face_normal (np.ndarray, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + +Returns: + normals (np.ndarray): [..., N, 3] vertex normals""" + utils3d.numpy.mesh.compute_vertex_normal_weighted + +@overload +def remove_corrupted_faces(faces: numpy_.ndarray) -> numpy_.ndarray: + """Remove corrupted faces (faces with duplicated vertices) + +Args: + faces (np.ndarray): [T, 3] triangular face indices + +Returns: + np.ndarray: [T_, 3] triangular face indices""" + utils3d.numpy.mesh.remove_corrupted_faces + +@overload +def merge_duplicate_vertices(vertices: numpy_.ndarray, faces: numpy_.ndarray, tol: float = 1e-06) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Merge duplicate vertices of a triangular mesh. +Duplicate vertices are merged by selecte one of them, and the face indices are updated accordingly. + +Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + tol (float, optional): tolerance for merging. Defaults to 1e-6. + +Returns: + vertices (np.ndarray): [N_, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices""" + utils3d.numpy.mesh.merge_duplicate_vertices + +@overload +def remove_unreferenced_vertices(faces: numpy_.ndarray, *vertice_attrs, return_indices: bool = False) -> Tuple[numpy_.ndarray, ...]: + """Remove unreferenced vertices of a mesh. +Unreferenced vertices are removed, and the face indices are updated accordingly. + +Args: + faces (np.ndarray): [T, P] face indices + *vertice_attrs: vertex attributes + +Returns: + faces (np.ndarray): [T, P] face indices + *vertice_attrs: vertex attributes + indices (np.ndarray, optional): [N] indices of vertices that are kept. Defaults to None.""" + utils3d.numpy.mesh.remove_unreferenced_vertices + +@overload +def subdivide_mesh_simple(vertices: numpy_.ndarray, faces: numpy_.ndarray, n: int = 1) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Subdivide a triangular mesh by splitting each triangle into 4 smaller triangles. +NOTE: All original vertices are kept, and new vertices are appended to the end of the vertex list. + +Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + n (int, optional): number of subdivisions. Defaults to 1. + +Returns: + vertices (np.ndarray): [N_, 3] subdivided 3-dimensional vertices + faces (np.ndarray): [4 * T, 3] subdivided triangular face indices""" + utils3d.numpy.mesh.subdivide_mesh_simple + +@overload +def mesh_relations(faces: numpy_.ndarray) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Calculate the relation between vertices and faces. +NOTE: The input mesh must be a manifold triangle mesh. + +Args: + faces (np.ndarray): [T, 3] triangular face indices + +Returns: + edges (np.ndarray): [E, 2] edge indices + edge2face (np.ndarray): [E, 2] edge to face relation. The second column is -1 if the edge is boundary. + face2edge (np.ndarray): [T, 3] face to edge relation + face2face (np.ndarray): [T, 3] face to face relation""" + utils3d.numpy.mesh.mesh_relations + +@overload +def flatten_mesh_indices(*args: numpy_.ndarray) -> Tuple[numpy_.ndarray, ...]: + utils3d.numpy.mesh.flatten_mesh_indices + +@overload +def calc_quad_candidates(edges: numpy_.ndarray, face2edge: numpy_.ndarray, edge2face: numpy_.ndarray): + """Calculate the candidate quad faces. + +Args: + edges (np.ndarray): [E, 2] edge indices + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + +Returns: + quads (np.ndarray): [Q, 4] quad candidate indices + quad2edge (np.ndarray): [Q, 4] edge to quad candidate relation + quad2adj (np.ndarray): [Q, 8] adjacent quad candidates of each quad candidate + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid""" + utils3d.numpy.quadmesh.calc_quad_candidates + +@overload +def calc_quad_distortion(vertices: numpy_.ndarray, quads: numpy_.ndarray): + """Calculate the distortion of each candidate quad face. + +Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + quads (np.ndarray): [Q, 4] quad face indices + +Returns: + distortion (np.ndarray): [Q] distortion of each quad face""" + utils3d.numpy.quadmesh.calc_quad_distortion + +@overload +def calc_quad_direction(vertices: numpy_.ndarray, quads: numpy_.ndarray): + """Calculate the direction of each candidate quad face. + +Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + quads (np.ndarray): [Q, 4] quad face indices + +Returns: + direction (np.ndarray): [Q, 4] direction of each quad face. + Represented by the angle between the crossing and each edge.""" + utils3d.numpy.quadmesh.calc_quad_direction + +@overload +def calc_quad_smoothness(quad2edge: numpy_.ndarray, quad2adj: numpy_.ndarray, quads_direction: numpy_.ndarray): + """Calculate the smoothness of each candidate quad face connection. + +Args: + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_direction (np.ndarray): [Q, 4] direction of each quad face + +Returns: + smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection""" + utils3d.numpy.quadmesh.calc_quad_smoothness + +@overload +def sovle_quad(face2edge: numpy_.ndarray, edge2face: numpy_.ndarray, quad2adj: numpy_.ndarray, quads_distortion: numpy_.ndarray, quads_smoothness: numpy_.ndarray, quads_valid: numpy_.ndarray): + """Solve the quad mesh from the candidate quad faces. + +Args: + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_distortion (np.ndarray): [Q] distortion of each quad face + quads_smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid + +Returns: + weights (np.ndarray): [Q] weight of each valid quad face""" + utils3d.numpy.quadmesh.sovle_quad + +@overload +def sovle_quad_qp(face2edge: numpy_.ndarray, edge2face: numpy_.ndarray, quad2adj: numpy_.ndarray, quads_distortion: numpy_.ndarray, quads_smoothness: numpy_.ndarray, quads_valid: numpy_.ndarray): + """Solve the quad mesh from the candidate quad faces. + +Args: + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_distortion (np.ndarray): [Q] distortion of each quad face + quads_smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid + +Returns: + weights (np.ndarray): [Q] weight of each valid quad face""" + utils3d.numpy.quadmesh.sovle_quad_qp + +@overload +def tri_to_quad(vertices: numpy_.ndarray, faces: numpy_.ndarray) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Convert a triangle mesh to a quad mesh. +NOTE: The input mesh must be a manifold mesh. + +Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + +Returns: + vertices (np.ndarray): [N_, 3] 3-dimensional vertices + faces (np.ndarray): [Q, 4] quad face indices""" + utils3d.numpy.quadmesh.tri_to_quad + +@overload +def sliding_window_1d(x: numpy_.ndarray, window_size: int, stride: int, axis: int = -1): + """Return x view of the input array with x sliding window of the given kernel size and stride. +The sliding window is performed over the given axis, and the window dimension is append to the end of the output array's shape. + +Args: + x (np.ndarray): input array with shape (..., axis_size, ...) + kernel_size (int): size of the sliding window + stride (int): stride of the sliding window + axis (int): axis to perform sliding window over + +Returns: + a_sliding (np.ndarray): view of the input array with shape (..., n_windows, ..., kernel_size), where n_windows = (axis_size - kernel_size + 1) // stride""" + utils3d.numpy.utils.sliding_window_1d + +@overload +def sliding_window_nd(x: numpy_.ndarray, window_size: Tuple[int, ...], stride: Tuple[int, ...], axis: Tuple[int, ...]) -> numpy_.ndarray: + utils3d.numpy.utils.sliding_window_nd + +@overload +def sliding_window_2d(x: numpy_.ndarray, window_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], axis: Tuple[int, int] = (-2, -1)) -> numpy_.ndarray: + utils3d.numpy.utils.sliding_window_2d + +@overload +def max_pool_1d(x: numpy_.ndarray, kernel_size: int, stride: int, padding: int = 0, axis: int = -1): + utils3d.numpy.utils.max_pool_1d + +@overload +def max_pool_2d(x: numpy_.ndarray, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int]], axis: Tuple[int, int] = (-2, -1)): + utils3d.numpy.utils.max_pool_2d + +@overload +def max_pool_nd(x: numpy_.ndarray, kernel_size: Tuple[int, ...], stride: Tuple[int, ...], padding: Tuple[int, ...], axis: Tuple[int, ...]) -> numpy_.ndarray: + utils3d.numpy.utils.max_pool_nd + +@overload +def depth_edge(depth: numpy_.ndarray, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: numpy_.ndarray = None) -> numpy_.ndarray: + """Compute the edge mask from depth map. The edge is defined as the pixels whose neighbors have large difference in depth. + +Args: + depth (np.ndarray): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + +Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool""" + utils3d.numpy.utils.depth_edge + +@overload +def normals_edge(normals: numpy_.ndarray, tol: float, kernel_size: int = 3, mask: numpy_.ndarray = None) -> numpy_.ndarray: + """Compute the edge mask from normal map. + +Args: + normal (np.ndarray): shape (..., height, width, 3), normal map + tol (float): tolerance in degrees + +Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool""" + utils3d.numpy.utils.normals_edge + +@overload +def depth_aliasing(depth: numpy_.ndarray, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: numpy_.ndarray = None) -> numpy_.ndarray: + """Compute the map that indicates the aliasing of x depth map. The aliasing is defined as the pixels which neither close to the maximum nor the minimum of its neighbors. +Args: + depth (np.ndarray): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + +Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool""" + utils3d.numpy.utils.depth_aliasing + +@overload +def interpolate(bary: numpy_.ndarray, tri_id: numpy_.ndarray, attr: numpy_.ndarray, faces: numpy_.ndarray) -> numpy_.ndarray: + """Interpolate with given barycentric coordinates and triangle indices + +Args: + bary (np.ndarray): shape (..., 3), barycentric coordinates + tri_id (np.ndarray): int array of shape (...), triangle indices + attr (np.ndarray): shape (N, M), vertices attributes + faces (np.ndarray): int array of shape (T, 3), face vertex indices + +Returns: + np.ndarray: shape (..., M) interpolated result""" + utils3d.numpy.utils.interpolate + +@overload +def image_scrcoord(width: int, height: int) -> numpy_.ndarray: + """Get OpenGL's screen space coordinates, ranging in [0, 1]. +[0, 0] is the bottom-left corner of the image. + +Args: + width (int): image width + height (int): image height + +Returns: + (np.ndarray): shape (height, width, 2)""" + utils3d.numpy.utils.image_scrcoord + +@overload +def image_uv(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, dtype: numpy_.dtype = numpy_.float32) -> numpy_.ndarray: + """Get image space UV grid, ranging in [0, 1]. + +>>> image_uv(10, 10): +[[[0.05, 0.05], [0.15, 0.05], ..., [0.95, 0.05]], + [[0.05, 0.15], [0.15, 0.15], ..., [0.95, 0.15]], + ... ... ... + [[0.05, 0.95], [0.15, 0.95], ..., [0.95, 0.95]]] + +Args: + width (int): image width + height (int): image height + +Returns: + np.ndarray: shape (height, width, 2)""" + utils3d.numpy.utils.image_uv + +@overload +def image_pixel_center(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, dtype: numpy_.dtype = numpy_.float32) -> numpy_.ndarray: + """Get image pixel center coordinates, ranging in [0, width] and [0, height]. +`image[i, j]` has pixel center coordinates `(j + 0.5, i + 0.5)`. + +>>> image_pixel_center(10, 10): +[[[0.5, 0.5], [1.5, 0.5], ..., [9.5, 0.5]], + [[0.5, 1.5], [1.5, 1.5], ..., [9.5, 1.5]], + ... ... ... +[[0.5, 9.5], [1.5, 9.5], ..., [9.5, 9.5]]] + +Args: + width (int): image width + height (int): image height + +Returns: + np.ndarray: shape (height, width, 2)""" + utils3d.numpy.utils.image_pixel_center + +@overload +def image_pixel(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, dtype: numpy_.dtype = numpy_.int32) -> numpy_.ndarray: + """Get image pixel coordinates grid, ranging in [0, width - 1] and [0, height - 1]. +`image[i, j]` has pixel center coordinates `(j, i)`. + +>>> image_pixel_center(10, 10): +[[[0, 0], [1, 0], ..., [9, 0]], + [[0, 1.5], [1, 1], ..., [9, 1]], + ... ... ... +[[0, 9.5], [1, 9], ..., [9, 9 ]]] + +Args: + width (int): image width + height (int): image height + +Returns: + np.ndarray: shape (height, width, 2)""" + utils3d.numpy.utils.image_pixel + +@overload +def image_mesh(*image_attrs: numpy_.ndarray, mask: numpy_.ndarray = None, tri: bool = False, return_indices: bool = False) -> Tuple[numpy_.ndarray, ...]: + """Get a mesh regarding image pixel uv coordinates as vertices and image grid as faces. + +Args: + *image_attrs (np.ndarray): image attributes in shape (height, width, [channels]) + mask (np.ndarray, optional): binary mask of shape (height, width), dtype=bool. Defaults to None. + +Returns: + faces (np.ndarray): faces connecting neighboring pixels. shape (T, 4) if tri is False, else (T, 3) + *vertex_attrs (np.ndarray): vertex attributes in corresponding order with input image_attrs + indices (np.ndarray, optional): indices of vertices in the original mesh""" + utils3d.numpy.utils.image_mesh + +@overload +def image_mesh_from_depth(depth: numpy_.ndarray, extrinsics: numpy_.ndarray = None, intrinsics: numpy_.ndarray = None, *vertice_attrs: numpy_.ndarray, atol: float = None, rtol: float = None, remove_by_depth: bool = False, return_uv: bool = False, return_indices: bool = False) -> Tuple[numpy_.ndarray, ...]: + """Get x triangle mesh by lifting depth map to 3D. + +Args: + depth (np.ndarray): [H, W] depth map + extrinsics (np.ndarray, optional): [4, 4] extrinsics matrix. Defaults to None. + intrinsics (np.ndarray, optional): [3, 3] intrinsics matrix. Defaults to None. + *vertice_attrs (np.ndarray): [H, W, C] vertex attributes. Defaults to None. + atol (float, optional): absolute tolerance. Defaults to None. + rtol (float, optional): relative tolerance. Defaults to None. + triangles with vertices having depth difference larger than atol + rtol * depth will be marked. + remove_by_depth (bool, optional): whether to remove triangles with large depth difference. Defaults to True. + return_uv (bool, optional): whether to return uv coordinates. Defaults to False. + return_indices (bool, optional): whether to return indices of vertices in the original mesh. Defaults to False. + +Returns: + vertices (np.ndarray): [N, 3] vertices + faces (np.ndarray): [T, 3] faces + *vertice_attrs (np.ndarray): [N, C] vertex attributes + image_uv (np.ndarray, optional): [N, 2] uv coordinates + ref_indices (np.ndarray, optional): [N] indices of vertices in the original mesh""" + utils3d.numpy.utils.image_mesh_from_depth + +@overload +def depth_to_normals(depth: numpy_.ndarray, intrinsics: numpy_.ndarray, mask: numpy_.ndarray = None) -> numpy_.ndarray: + """Calculate normal map from depth map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + +Args: + depth (np.ndarray): shape (height, width), linear depth map + intrinsics (np.ndarray): shape (3, 3), intrinsics matrix +Returns: + normal (np.ndarray): shape (height, width, 3), normal map. """ + utils3d.numpy.utils.depth_to_normals + +@overload +def points_to_normals(point: numpy_.ndarray, mask: numpy_.ndarray = None) -> numpy_.ndarray: + """Calculate normal map from point map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + +Args: + point (np.ndarray): shape (height, width, 3), point map +Returns: + normal (np.ndarray): shape (height, width, 3), normal map. """ + utils3d.numpy.utils.points_to_normals + +@overload +def depth_to_points(depth: numpy_.ndarray, extrinsics: numpy_.ndarray = None, intrinsics: numpy_.ndarray = None) -> numpy_.ndarray: + """Unproject depth map to 3D points. + +Args: + depth (np.ndarray): [..., H, W] depth value + extrinsics (optional, np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics ( np.ndarray): [..., 3, 3] intrinsics matrix + +Returns: + points (np.ndarray): [..., N, 3] 3d points""" + utils3d.numpy.utils.depth_to_points + +@overload +def chessboard(width: int, height: int, grid_size: int, color_a: numpy_.ndarray, color_b: numpy_.ndarray) -> numpy_.ndarray: + """get x chessboard image + +Args: + width (int): image width + height (int): image height + grid_size (int): size of chessboard grid + color_a (np.ndarray): color of the grid at the top-left corner + color_b (np.ndarray): color in complementary grid cells + +Returns: + image (np.ndarray): shape (height, width, channels), chessboard image""" + utils3d.numpy.utils.chessboard + +@overload +def cube(tri: bool = False) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Get x cube mesh of size 1 centered at origin. + +### Parameters + tri (bool, optional): return triangulated mesh. Defaults to False, which returns quad mesh. + +### Returns + vertices (np.ndarray): shape (8, 3) + faces (np.ndarray): shape (12, 3)""" + utils3d.numpy.utils.cube + +@overload +def icosahedron(): + utils3d.numpy.utils.icosahedron + +@overload +def square(tri: bool = False) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Get a square mesh of area 1 centered at origin in the xy-plane. + +### Returns + vertices (np.ndarray): shape (4, 3) + faces (np.ndarray): shape (1, 4)""" + utils3d.numpy.utils.square + +@overload +def camera_frustum(extrinsics: numpy_.ndarray, intrinsics: numpy_.ndarray, depth: float = 1.0) -> Tuple[numpy_.ndarray, numpy_.ndarray, numpy_.ndarray]: + """Get x triangle mesh of camera frustum.""" + utils3d.numpy.utils.camera_frustum + +@overload +def perspective(fov_y: Union[float, numpy_.ndarray], aspect: Union[float, numpy_.ndarray], near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """Get OpenGL perspective matrix + +Args: + fov_y (float | np.ndarray): field of view in y axis + aspect (float | np.ndarray): aspect ratio + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + +Returns: + (np.ndarray): [..., 4, 4] perspective matrix""" + utils3d.numpy.transforms.perspective + +@overload +def perspective_from_fov(fov: Union[float, numpy_.ndarray], width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray], near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """Get OpenGL perspective matrix from field of view in largest dimension + +Args: + fov (float | np.ndarray): field of view in largest dimension + width (int | np.ndarray): image width + height (int | np.ndarray): image height + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + +Returns: + (np.ndarray): [..., 4, 4] perspective matrix""" + utils3d.numpy.transforms.perspective_from_fov + +@overload +def perspective_from_fov_xy(fov_x: Union[float, numpy_.ndarray], fov_y: Union[float, numpy_.ndarray], near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """Get OpenGL perspective matrix from field of view in x and y axis + +Args: + fov_x (float | np.ndarray): field of view in x axis + fov_y (float | np.ndarray): field of view in y axis + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + +Returns: + (np.ndarray): [..., 4, 4] perspective matrix""" + utils3d.numpy.transforms.perspective_from_fov_xy + +@overload +def intrinsics_from_focal_center(fx: Union[float, numpy_.ndarray], fy: Union[float, numpy_.ndarray], cx: Union[float, numpy_.ndarray], cy: Union[float, numpy_.ndarray], dtype: Optional[numpy_.dtype] = numpy_.float32) -> numpy_.ndarray: + """Get OpenCV intrinsics matrix + +Returns: + (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix""" + utils3d.numpy.transforms.intrinsics_from_focal_center + +@overload +def intrinsics_from_fov(fov_max: Union[float, numpy_.ndarray] = None, fov_min: Union[float, numpy_.ndarray] = None, fov_x: Union[float, numpy_.ndarray] = None, fov_y: Union[float, numpy_.ndarray] = None, width: Union[int, numpy_.ndarray] = None, height: Union[int, numpy_.ndarray] = None) -> numpy_.ndarray: + """Get normalized OpenCV intrinsics matrix from given field of view. +You can provide either fov_max, fov_min, fov_x or fov_y + +Args: + width (int | np.ndarray): image width + height (int | np.ndarray): image height + fov_max (float | np.ndarray): field of view in largest dimension + fov_min (float | np.ndarray): field of view in smallest dimension + fov_x (float | np.ndarray): field of view in x axis + fov_y (float | np.ndarray): field of view in y axis + +Returns: + (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix""" + utils3d.numpy.transforms.intrinsics_from_fov + +@overload +def fov_to_focal(fov: numpy_.ndarray): + utils3d.numpy.transforms.fov_to_focal + +@overload +def focal_to_fov(focal: numpy_.ndarray): + utils3d.numpy.transforms.focal_to_fov + +@overload +def intrinsics_to_fov(intrinsics: numpy_.ndarray) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + utils3d.numpy.transforms.intrinsics_to_fov + +@overload +def view_look_at(eye: numpy_.ndarray, look_at: numpy_.ndarray, up: numpy_.ndarray) -> numpy_.ndarray: + """Get OpenGL view matrix looking at something + +Args: + eye (np.ndarray): [..., 3] the eye position + look_at (np.ndarray): [..., 3] the position to look at + up (np.ndarray): [..., 3] head up direction (y axis in screen space). Not necessarily othogonal to view direction + +Returns: + (np.ndarray): [..., 4, 4], view matrix""" + utils3d.numpy.transforms.view_look_at + +@overload +def extrinsics_look_at(eye: numpy_.ndarray, look_at: numpy_.ndarray, up: numpy_.ndarray) -> numpy_.ndarray: + """Get OpenCV extrinsics matrix looking at something + +Args: + eye (np.ndarray): [..., 3] the eye position + look_at (np.ndarray): [..., 3] the position to look at + up (np.ndarray): [..., 3] head up direction (-y axis in screen space). Not necessarily othogonal to view direction + +Returns: + (np.ndarray): [..., 4, 4], extrinsics matrix""" + utils3d.numpy.transforms.extrinsics_look_at + +@overload +def perspective_to_intrinsics(perspective: numpy_.ndarray) -> numpy_.ndarray: + """OpenGL perspective matrix to OpenCV intrinsics + +Args: + perspective (np.ndarray): [..., 4, 4] OpenGL perspective matrix + +Returns: + (np.ndarray): shape [..., 3, 3] OpenCV intrinsics""" + utils3d.numpy.transforms.perspective_to_intrinsics + +@overload +def perspective_to_near_far(perspective: numpy_.ndarray) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Get near and far planes from OpenGL perspective matrix + +Args:""" + utils3d.numpy.transforms.perspective_to_near_far + +@overload +def intrinsics_to_perspective(intrinsics: numpy_.ndarray, near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """OpenCV intrinsics to OpenGL perspective matrix +NOTE: not work for tile-shifting intrinsics currently + +Args: + intrinsics (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip +Returns: + (np.ndarray): [..., 4, 4] OpenGL perspective matrix""" + utils3d.numpy.transforms.intrinsics_to_perspective + +@overload +def extrinsics_to_view(extrinsics: numpy_.ndarray) -> numpy_.ndarray: + """OpenCV camera extrinsics to OpenGL view matrix + +Args: + extrinsics (np.ndarray): [..., 4, 4] OpenCV camera extrinsics matrix + +Returns: + (np.ndarray): [..., 4, 4] OpenGL view matrix""" + utils3d.numpy.transforms.extrinsics_to_view + +@overload +def view_to_extrinsics(view: numpy_.ndarray) -> numpy_.ndarray: + """OpenGL view matrix to OpenCV camera extrinsics + +Args: + view (np.ndarray): [..., 4, 4] OpenGL view matrix + +Returns: + (np.ndarray): [..., 4, 4] OpenCV camera extrinsics matrix""" + utils3d.numpy.transforms.view_to_extrinsics + +@overload +def normalize_intrinsics(intrinsics: numpy_.ndarray, width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray], integer_pixel_centers: bool = True) -> numpy_.ndarray: + """Normalize intrinsics from pixel cooridnates to uv coordinates + +Args: + intrinsics (np.ndarray): [..., 3, 3] camera intrinsics(s) to normalize + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + integer_pixel_centers (bool): whether the integer pixel coordinates are at the center of the pixel. If False, the integer coordinates are at the left-top corner of the pixel. + +Returns: + (np.ndarray): [..., 3, 3] normalized camera intrinsics(s)""" + utils3d.numpy.transforms.normalize_intrinsics + +@overload +def crop_intrinsics(intrinsics: numpy_.ndarray, width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray], left: Union[int, numpy_.ndarray], top: Union[int, numpy_.ndarray], crop_width: Union[int, numpy_.ndarray], crop_height: Union[int, numpy_.ndarray]) -> numpy_.ndarray: + """Evaluate the new intrinsics(s) after crop the image: cropped_img = img[top:top+crop_height, left:left+crop_width] + +Args: + intrinsics (np.ndarray): [..., 3, 3] camera intrinsics(s) to crop + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + left (int | np.ndarray): [...] left crop boundary + top (int | np.ndarray): [...] top crop boundary + crop_width (int | np.ndarray): [...] crop width + crop_height (int | np.ndarray): [...] crop height + +Returns: + (np.ndarray): [..., 3, 3] cropped camera intrinsics(s)""" + utils3d.numpy.transforms.crop_intrinsics + +@overload +def pixel_to_uv(pixel: numpy_.ndarray, width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray]) -> numpy_.ndarray: + """Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + +Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1)""" + utils3d.numpy.transforms.pixel_to_uv + +@overload +def pixel_to_ndc(pixel: numpy_.ndarray, width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray]) -> numpy_.ndarray: + """Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + +Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in ndc space, the range is (-1, 1)""" + utils3d.numpy.transforms.pixel_to_ndc + +@overload +def uv_to_pixel(uv: numpy_.ndarray, width: Union[int, numpy_.ndarray], height: Union[int, numpy_.ndarray]) -> numpy_.ndarray: + """Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + +Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1)""" + utils3d.numpy.transforms.uv_to_pixel + +@overload +def project_depth(depth: numpy_.ndarray, near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """Project linear depth to depth value in screen space + +Args: + depth (np.ndarray): [...] depth value + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip + +Returns: + (np.ndarray): [..., 1] depth value in screen space, value ranging in [0, 1]""" + utils3d.numpy.transforms.project_depth + +@overload +def depth_buffer_to_linear(depth_buffer: numpy_.ndarray, near: Union[float, numpy_.ndarray], far: Union[float, numpy_.ndarray]) -> numpy_.ndarray: + """OpenGL depth buffer to linear depth + +Args: + depth_buffer (np.ndarray): [...] depth value + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip + +Returns: + (np.ndarray): [..., 1] linear depth""" + utils3d.numpy.transforms.depth_buffer_to_linear + +@overload +def unproject_cv(uv_coord: numpy_.ndarray, depth: numpy_.ndarray = None, extrinsics: numpy_.ndarray = None, intrinsics: numpy_.ndarray = None) -> numpy_.ndarray: + """Unproject uv coordinates to 3D view space following the OpenCV convention + +Args: + uv_coord (np.ndarray): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + depth (np.ndarray): [..., N] depth value + extrinsics (np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics (np.ndarray): [..., 3, 3] intrinsics matrix + +Returns: + points (np.ndarray): [..., N, 3] 3d points""" + utils3d.numpy.transforms.unproject_cv + +@overload +def unproject_gl(screen_coord: numpy_.ndarray, model: numpy_.ndarray = None, view: numpy_.ndarray = None, perspective: numpy_.ndarray = None) -> numpy_.ndarray: + """Unproject screen space coordinates to 3D view space following the OpenGL convention (except for row major matrice) + +Args: + screen_coord (np.ndarray): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + model (np.ndarray): [..., 4, 4] model matrix + view (np.ndarray): [..., 4, 4] view matrix + perspective (np.ndarray): [..., 4, 4] perspective matrix + +Returns: + points (np.ndarray): [..., N, 3] 3d points""" + utils3d.numpy.transforms.unproject_gl + +@overload +def project_cv(points: numpy_.ndarray, extrinsics: numpy_.ndarray = None, intrinsics: numpy_.ndarray = None) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Project 3D points to 2D following the OpenCV convention + +Args: + points (np.ndarray): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + extrinsics (np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics (np.ndarray): [..., 3, 3] intrinsics matrix + +Returns: + uv_coord (np.ndarray): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + linear_depth (np.ndarray): [..., N] linear depth""" + utils3d.numpy.transforms.project_cv + +@overload +def project_gl(points: numpy_.ndarray, model: numpy_.ndarray = None, view: numpy_.ndarray = None, perspective: numpy_.ndarray = None) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Project 3D points to 2D following the OpenGL convention (except for row major matrice) + +Args: + points (np.ndarray): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + model (np.ndarray): [..., 4, 4] model matrix + view (np.ndarray): [..., 4, 4] view matrix + perspective (np.ndarray): [..., 4, 4] perspective matrix + +Returns: + scr_coord (np.ndarray): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + linear_depth (np.ndarray): [..., N] linear depth""" + utils3d.numpy.transforms.project_gl + +@overload +def quaternion_to_matrix(quaternion: numpy_.ndarray, eps: float = 1e-12) -> numpy_.ndarray: + """Converts a batch of quaternions (w, x, y, z) to rotation matrices + +Args: + quaternion (np.ndarray): shape (..., 4), the quaternions to convert + +Returns: + np.ndarray: shape (..., 3, 3), the rotation matrices corresponding to the given quaternions""" + utils3d.numpy.transforms.quaternion_to_matrix + +@overload +def axis_angle_to_matrix(axis_angle: numpy_.ndarray, eps: float = 1e-12) -> numpy_.ndarray: + """Convert axis-angle representation (rotation vector) to rotation matrix, whose direction is the axis of rotation and length is the angle of rotation + +Args: + axis_angle (np.ndarray): shape (..., 3), axis-angle vcetors + +Returns: + np.ndarray: shape (..., 3, 3) The rotation matrices for the given axis-angle parameters""" + utils3d.numpy.transforms.axis_angle_to_matrix + +@overload +def matrix_to_quaternion(rot_mat: numpy_.ndarray, eps: float = 1e-12) -> numpy_.ndarray: + """Convert 3x3 rotation matrix to quaternion (w, x, y, z) + +Args: + rot_mat (np.ndarray): shape (..., 3, 3), the rotation matrices to convert + +Returns: + np.ndarray: shape (..., 4), the quaternions corresponding to the given rotation matrices""" + utils3d.numpy.transforms.matrix_to_quaternion + +@overload +def extrinsics_to_essential(extrinsics: numpy_.ndarray): + """extrinsics matrix `[[R, t] [0, 0, 0, 1]]` such that `x' = R (x - t)` to essential matrix such that `x' E x = 0` + +Args: + extrinsics (np.ndaray): [..., 4, 4] extrinsics matrix + +Returns: + (np.ndaray): [..., 3, 3] essential matrix""" + utils3d.numpy.transforms.extrinsics_to_essential + +@overload +def euler_axis_angle_rotation(axis: str, angle: numpy_.ndarray) -> numpy_.ndarray: + """Return the rotation matrices for one of the rotations about an axis +of which Euler angles describe, for each value of the angle given. + +Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + +Returns: + Rotation matrices as tensor of shape (..., 3, 3).""" + utils3d.numpy.transforms.euler_axis_angle_rotation + +@overload +def euler_angles_to_matrix(euler_angles: numpy_.ndarray, convention: str = 'XYZ') -> numpy_.ndarray: + """Convert rotations given as Euler angles in radians to rotation matrices. + +Args: + euler_angles: Euler angles in radians as ndarray of shape (..., 3), XYZ + convention: permutation of "X", "Y" or "Z", representing the order of Euler rotations to apply. + +Returns: + Rotation matrices as ndarray of shape (..., 3, 3).""" + utils3d.numpy.transforms.euler_angles_to_matrix + +@overload +def skew_symmetric(v: numpy_.ndarray): + """Skew symmetric matrix from a 3D vector""" + utils3d.numpy.transforms.skew_symmetric + +@overload +def rotation_matrix_from_vectors(v1: numpy_.ndarray, v2: numpy_.ndarray): + """Rotation matrix that rotates v1 to v2""" + utils3d.numpy.transforms.rotation_matrix_from_vectors + +@overload +def ray_intersection(p1: numpy_.ndarray, d1: numpy_.ndarray, p2: numpy_.ndarray, d2: numpy_.ndarray): + """Compute the intersection/closest point of two D-dimensional rays +If the rays are intersecting, the closest point is the intersection point. + +Args: + p1 (np.ndarray): (..., D) origin of ray 1 + d1 (np.ndarray): (..., D) direction of ray 1 + p2 (np.ndarray): (..., D) origin of ray 2 + d2 (np.ndarray): (..., D) direction of ray 2 + +Returns: + (np.ndarray): (..., N) intersection point""" + utils3d.numpy.transforms.ray_intersection + +@overload +def se3_matrix(R: numpy_.ndarray, t: numpy_.ndarray) -> numpy_.ndarray: + """Convert rotation matrix and translation vector to 4x4 transformation matrix. + +Args: + R (np.ndarray): [..., 3, 3] rotation matrix + t (np.ndarray): [..., 3] translation vector + +Returns: + np.ndarray: [..., 4, 4] transformation matrix""" + utils3d.numpy.transforms.se3_matrix + +@overload +def slerp_quaternion(q1: numpy_.ndarray, q2: numpy_.ndarray, t: numpy_.ndarray) -> numpy_.ndarray: + """Spherical linear interpolation between two unit quaternions. + +Args: + q1 (np.ndarray): [..., d] unit vector 1 + q2 (np.ndarray): [..., d] unit vector 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + +Returns: + np.ndarray: [..., 3] interpolated unit vector""" + utils3d.numpy.transforms.slerp_quaternion + +@overload +def slerp_vector(v1: numpy_.ndarray, v2: numpy_.ndarray, t: numpy_.ndarray) -> numpy_.ndarray: + """Spherical linear interpolation between two unit vectors. The vectors are assumed to be normalized. + +Args: + v1 (np.ndarray): [..., d] unit vector 1 + v2 (np.ndarray): [..., d] unit vector 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + +Returns: + np.ndarray: [..., d] interpolated unit vector""" + utils3d.numpy.transforms.slerp_vector + +@overload +def lerp(x1: numpy_.ndarray, x2: numpy_.ndarray, t: numpy_.ndarray) -> numpy_.ndarray: + """Linear interpolation between two vectors. + +Args: + x1 (np.ndarray): [..., d] vector 1 + x2 (np.ndarray): [..., d] vector 2 + t (np.ndarray): [...] interpolation parameter. [0, 1] for interpolation between x1 and x2, otherwise for extrapolation. + +Returns: + np.ndarray: [..., d] interpolated vector""" + utils3d.numpy.transforms.lerp + +@overload +def lerp_se3_matrix(T1: numpy_.ndarray, T2: numpy_.ndarray, t: numpy_.ndarray) -> numpy_.ndarray: + """Linear interpolation between two SE(3) matrices. + +Args: + T1 (np.ndarray): [..., 4, 4] SE(3) matrix 1 + T2 (np.ndarray): [..., 4, 4] SE(3) matrix 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + +Returns: + np.ndarray: [..., 4, 4] interpolated SE(3) matrix""" + utils3d.numpy.transforms.lerp_se3_matrix + +@overload +def piecewise_lerp(x: numpy_.ndarray, t: numpy_.ndarray, s: numpy_.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> numpy_.ndarray: + """Linear spline interpolation. + +### Parameters: +- `x`: np.ndarray, shape (n, d): the values of data points. +- `t`: np.ndarray, shape (n,): the times of the data points. +- `s`: np.ndarray, shape (m,): the times to be interpolated. +- `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + +### Returns: +- `y`: np.ndarray, shape (..., m, d): the interpolated values.""" + utils3d.numpy.transforms.piecewise_lerp + +@overload +def piecewise_lerp_se3_matrix(T: numpy_.ndarray, t: numpy_.ndarray, s: numpy_.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> numpy_.ndarray: + """Linear spline interpolation for SE(3) matrices. + +### Parameters: +- `T`: np.ndarray, shape (n, 4, 4): the SE(3) matrices. +- `t`: np.ndarray, shape (n,): the times of the data points. +- `s`: np.ndarray, shape (m,): the times to be interpolated. +- `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + +### Returns: +- `T_interp`: np.ndarray, shape (..., m, 4, 4): the interpolated SE(3) matrices.""" + utils3d.numpy.transforms.piecewise_lerp_se3_matrix + +@overload +def apply_transform(T: numpy_.ndarray, x: numpy_.ndarray) -> numpy_.ndarray: + """Apply SE(3) transformation to a point or a set of points. + +### Parameters: +- `T`: np.ndarray, shape (..., 4, 4): the SE(3) matrix. +- `x`: np.ndarray, shape (..., 3): the point or a set of points to be transformed. + +### Returns: +- `x_transformed`: np.ndarray, shape (..., 3): the transformed point or a set of points.""" + utils3d.numpy.transforms.apply_transform + +@overload +def linear_spline_interpolate(x: numpy_.ndarray, t: numpy_.ndarray, s: numpy_.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> numpy_.ndarray: + """Linear spline interpolation. + +### Parameters: +- `x`: np.ndarray, shape (n, d): the values of data points. +- `t`: np.ndarray, shape (n,): the times of the data points. +- `s`: np.ndarray, shape (m,): the times to be interpolated. +- `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + +### Returns: +- `y`: np.ndarray, shape (..., m, d): the interpolated values.""" + utils3d.numpy.spline.linear_spline_interpolate + +@overload +def RastContext(*args, **kwargs): + utils3d.numpy.rasterization.RastContext + +@overload +def rasterize_triangle_faces(ctx: utils3d.numpy.rasterization.RastContext, vertices: numpy_.ndarray, faces: numpy_.ndarray, attr: numpy_.ndarray, width: int, height: int, transform: numpy_.ndarray = None, cull_backface: bool = True, return_depth: bool = False, image: numpy_.ndarray = None, depth: numpy_.ndarray = None) -> Tuple[numpy_.ndarray, numpy_.ndarray]: + """Rasterize vertex attribute. + +Args: + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, 3] + attr (np.ndarray): [N, C] + width (int): width of rendered image + height (int): height of rendered image + transform (np.ndarray): [4, 4] model-view-projection transformation matrix. + cull_backface (bool): whether to cull backface + image: (np.ndarray): [H, W, C] background image + depth: (np.ndarray): [H, W] background depth + +Returns: + image (np.ndarray): [H, W, C] rendered image + depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None.""" + utils3d.numpy.rasterization.rasterize_triangle_faces + +@overload +def rasterize_edges(ctx: utils3d.numpy.rasterization.RastContext, vertices: numpy_.ndarray, edges: numpy_.ndarray, attr: numpy_.ndarray, width: int, height: int, transform: numpy_.ndarray = None, line_width: float = 1.0, return_depth: bool = False, image: numpy_.ndarray = None, depth: numpy_.ndarray = None) -> Tuple[numpy_.ndarray, ...]: + """Rasterize vertex attribute. + +Args: + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, 3] + attr (np.ndarray): [N, C] + width (int): width of rendered image + height (int): height of rendered image + transform (np.ndarray): [4, 4] model-view-projection matrix + line_width (float): width of line. Defaults to 1.0. NOTE: Values other than 1.0 may not work across all platforms. + cull_backface (bool): whether to cull backface + +Returns: + image (np.ndarray): [H, W, C] rendered image + depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None.""" + utils3d.numpy.rasterization.rasterize_edges + +@overload +def texture(ctx: utils3d.numpy.rasterization.RastContext, uv: numpy_.ndarray, texture: numpy_.ndarray, interpolation: str = 'linear', wrap: str = 'clamp') -> numpy_.ndarray: + """Given an UV image, texturing from the texture map""" + utils3d.numpy.rasterization.texture + +@overload +def warp_image_by_depth(ctx: utils3d.numpy.rasterization.RastContext, src_depth: numpy_.ndarray, src_image: numpy_.ndarray = None, width: int = None, height: int = None, *, extrinsics_src: numpy_.ndarray = None, extrinsics_tgt: numpy_.ndarray = None, intrinsics_src: numpy_.ndarray = None, intrinsics_tgt: numpy_.ndarray = None, near: float = 0.1, far: float = 100.0, cull_backface: bool = True, ssaa: int = 1, return_depth: bool = False) -> Tuple[numpy_.ndarray, ...]: + """Warp image by depth map. + +Args: + ctx (RastContext): rasterizer context + src_depth (np.ndarray): [H, W] + src_image (np.ndarray, optional): [H, W, C]. The image to warp. Defaults to None (use uv coordinates). + width (int, optional): width of the output image. None to use depth map width. Defaults to None. + height (int, optional): height of the output image. None to use depth map height. Defaults to None. + extrinsics_src (np.ndarray, optional): extrinsics matrix of the source camera. Defaults to None (identity). + extrinsics_tgt (np.ndarray, optional): extrinsics matrix of the target camera. Defaults to None (identity). + intrinsics_src (np.ndarray, optional): intrinsics matrix of the source camera. Defaults to None (use the same as intrinsics_tgt). + intrinsics_tgt (np.ndarray, optional): intrinsics matrix of the target camera. Defaults to None (use the same as intrinsics_src). + cull_backface (bool, optional): whether to cull backface. Defaults to True. + ssaa (int, optional): super sampling anti-aliasing. Defaults to 1. + +Returns: + tgt_image (np.ndarray): [H, W, C] warped image (or uv coordinates if image is None). + tgt_depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None.""" + utils3d.numpy.rasterization.warp_image_by_depth + +@overload +def test_rasterization(ctx: utils3d.numpy.rasterization.RastContext): + """Test if rasterization works. It will render a cube with random colors and save it as a CHECKME.png file.""" + utils3d.numpy.rasterization.test_rasterization + +@overload +def triangulate(faces: torch_.Tensor, vertices: torch_.Tensor = None, backslash: bool = None) -> torch_.Tensor: + """Triangulate a polygonal mesh. + +Args: + faces (torch.Tensor): [..., L, P] polygonal faces + vertices (torch.Tensor, optional): [..., N, 3] 3-dimensional vertices. + If given, the triangulation is performed according to the distance + between vertices. Defaults to None. + backslash (torch.Tensor, optional): [..., L] boolean array indicating + how to triangulate the quad faces. Defaults to None. + + +Returns: + (torch.Tensor): [L * (P - 2), 3] triangular faces""" + utils3d.torch.mesh.triangulate + +@overload +def compute_face_normal(vertices: torch_.Tensor, faces: torch_.Tensor) -> torch_.Tensor: + """Compute face normals of a triangular mesh + +Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [..., T, 3] triangular face indices + +Returns: + normals (torch.Tensor): [..., T, 3] face normals""" + utils3d.torch.mesh.compute_face_normal + +@overload +def compute_face_angles(vertices: torch_.Tensor, faces: torch_.Tensor) -> torch_.Tensor: + """Compute face angles of a triangular mesh + +Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + +Returns: + angles (torch.Tensor): [..., T, 3] face angles""" + utils3d.torch.mesh.compute_face_angles + +@overload +def compute_vertex_normal(vertices: torch_.Tensor, faces: torch_.Tensor, face_normal: torch_.Tensor = None) -> torch_.Tensor: + """Compute vertex normals of a triangular mesh by averaging neightboring face normals + +Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + face_normal (torch.Tensor, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + +Returns: + normals (torch.Tensor): [..., N, 3] vertex normals""" + utils3d.torch.mesh.compute_vertex_normal + +@overload +def compute_vertex_normal_weighted(vertices: torch_.Tensor, faces: torch_.Tensor, face_normal: torch_.Tensor = None) -> torch_.Tensor: + """Compute vertex normals of a triangular mesh by weighted sum of neightboring face normals +according to the angles + +Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + face_normal (torch.Tensor, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + +Returns: + normals (torch.Tensor): [..., N, 3] vertex normals""" + utils3d.torch.mesh.compute_vertex_normal_weighted + +@overload +def compute_edges(faces: torch_.Tensor) -> Tuple[torch_.Tensor, torch_.Tensor, torch_.Tensor]: + """Compute edges of a mesh. + +Args: + faces (torch.Tensor): [T, 3] triangular face indices + +Returns: + edges (torch.Tensor): [E, 2] edge indices + face2edge (torch.Tensor): [T, 3] mapping from face to edge + counts (torch.Tensor): [E] degree of each edge""" + utils3d.torch.mesh.compute_edges + +@overload +def compute_connected_components(faces: torch_.Tensor, edges: torch_.Tensor = None, face2edge: torch_.Tensor = None) -> List[torch_.Tensor]: + """Compute connected faces of a mesh. + +Args: + faces (torch.Tensor): [T, 3] triangular face indices + edges (torch.Tensor, optional): [E, 2] edge indices. Defaults to None. + face2edge (torch.Tensor, optional): [T, 3] mapping from face to edge. Defaults to None. + NOTE: If edges and face2edge are not provided, they will be computed. + +Returns: + components (List[torch.Tensor]): list of connected faces""" + utils3d.torch.mesh.compute_connected_components + +@overload +def compute_edge_connected_components(edges: torch_.Tensor) -> List[torch_.Tensor]: + """Compute connected edges of a mesh. + +Args: + edges (torch.Tensor): [E, 2] edge indices + +Returns: + components (List[torch.Tensor]): list of connected edges""" + utils3d.torch.mesh.compute_edge_connected_components + +@overload +def compute_boundarys(faces: torch_.Tensor, edges: torch_.Tensor = None, face2edge: torch_.Tensor = None, edge_degrees: torch_.Tensor = None) -> Tuple[List[torch_.Tensor], List[torch_.Tensor]]: + """Compute boundary edges of a mesh. + +Args: + faces (torch.Tensor): [T, 3] triangular face indices + edges (torch.Tensor): [E, 2] edge indices. + face2edge (torch.Tensor): [T, 3] mapping from face to edge. + edge_degrees (torch.Tensor): [E] degree of each edge. + +Returns: + boundary_edge_indices (List[torch.Tensor]): list of boundary edge indices + boundary_face_indices (List[torch.Tensor]): list of boundary face indices""" + utils3d.torch.mesh.compute_boundarys + +@overload +def compute_dual_graph(face2edge: torch_.Tensor) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Compute dual graph of a mesh. + +Args: + face2edge (torch.Tensor): [T, 3] mapping from face to edge. + +Returns: + dual_edges (torch.Tensor): [DE, 2] face indices of dual edges + dual_edge2edge (torch.Tensor): [DE] mapping from dual edge to edge""" + utils3d.torch.mesh.compute_dual_graph + +@overload +def remove_unreferenced_vertices(faces: torch_.Tensor, *vertice_attrs, return_indices: bool = False) -> Tuple[torch_.Tensor, ...]: + """Remove unreferenced vertices of a mesh. +Unreferenced vertices are removed, and the face indices are updated accordingly. + +Args: + faces (torch.Tensor): [T, P] face indices + *vertice_attrs: vertex attributes + +Returns: + faces (torch.Tensor): [T, P] face indices + *vertice_attrs: vertex attributes + indices (torch.Tensor, optional): [N] indices of vertices that are kept. Defaults to None.""" + utils3d.torch.mesh.remove_unreferenced_vertices + +@overload +def remove_corrupted_faces(faces: torch_.Tensor) -> torch_.Tensor: + """Remove corrupted faces (faces with duplicated vertices) + +Args: + faces (torch.Tensor): [T, 3] triangular face indices + +Returns: + torch.Tensor: [T_, 3] triangular face indices""" + utils3d.torch.mesh.remove_corrupted_faces + +@overload +def remove_isolated_pieces(vertices: torch_.Tensor, faces: torch_.Tensor, connected_components: List[torch_.Tensor] = None, thresh_num_faces: int = None, thresh_radius: float = None, thresh_boundary_ratio: float = None, remove_unreferenced: bool = True) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Remove isolated pieces of a mesh. +Isolated pieces are removed, and the face indices are updated accordingly. +If no face is left, will return the largest connected component. + +Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + connected_components (List[torch.Tensor], optional): connected components of the mesh. If None, it will be computed. Defaults to None. + thresh_num_faces (int, optional): threshold of number of faces for isolated pieces. Defaults to None. + thresh_radius (float, optional): threshold of radius for isolated pieces. Defaults to None. + remove_unreferenced (bool, optional): remove unreferenced vertices after removing isolated pieces. Defaults to True. + +Returns: + vertices (torch.Tensor): [N_, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices""" + utils3d.torch.mesh.remove_isolated_pieces + +@overload +def merge_duplicate_vertices(vertices: torch_.Tensor, faces: torch_.Tensor, tol: float = 1e-06) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Merge duplicate vertices of a triangular mesh. +Duplicate vertices are merged by selecte one of them, and the face indices are updated accordingly. + +Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + tol (float, optional): tolerance for merging. Defaults to 1e-6. + +Returns: + vertices (torch.Tensor): [N_, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices""" + utils3d.torch.mesh.merge_duplicate_vertices + +@overload +def subdivide_mesh_simple(vertices: torch_.Tensor, faces: torch_.Tensor, n: int = 1) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Subdivide a triangular mesh by splitting each triangle into 4 smaller triangles. +NOTE: All original vertices are kept, and new vertices are appended to the end of the vertex list. + +Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + n (int, optional): number of subdivisions. Defaults to 1. + +Returns: + vertices (torch.Tensor): [N_, 3] subdivided 3-dimensional vertices + faces (torch.Tensor): [4 * T, 3] subdivided triangular face indices""" + utils3d.torch.mesh.subdivide_mesh_simple + +@overload +def compute_face_tbn(pos: torch_.Tensor, faces_pos: torch_.Tensor, uv: torch_.Tensor, faces_uv: torch_.Tensor, eps: float = 1e-07) -> torch_.Tensor: + """compute TBN matrix for each face + +Args: + pos (torch.Tensor): shape (..., N_pos, 3), positions + faces_pos (torch.Tensor): shape(T, 3) + uv (torch.Tensor): shape (..., N_uv, 3) uv coordinates, + faces_uv (torch.Tensor): shape(T, 3) + +Returns: + torch.Tensor: (..., T, 3, 3) TBN matrix for each face. Note TBN vectors are normalized but not necessarily orthognal""" + utils3d.torch.mesh.compute_face_tbn + +@overload +def compute_vertex_tbn(faces_topo: torch_.Tensor, pos: torch_.Tensor, faces_pos: torch_.Tensor, uv: torch_.Tensor, faces_uv: torch_.Tensor) -> torch_.Tensor: + """compute TBN matrix for each face + +Args: + faces_topo (torch.Tensor): (T, 3), face indice of topology + pos (torch.Tensor): shape (..., N_pos, 3), positions + faces_pos (torch.Tensor): shape(T, 3) + uv (torch.Tensor): shape (..., N_uv, 3) uv coordinates, + faces_uv (torch.Tensor): shape(T, 3) + +Returns: + torch.Tensor: (..., V, 3, 3) TBN matrix for each face. Note TBN vectors are normalized but not necessarily orthognal""" + utils3d.torch.mesh.compute_vertex_tbn + +@overload +def laplacian(vertices: torch_.Tensor, faces: torch_.Tensor, weight: str = 'uniform') -> torch_.Tensor: + """Laplacian smooth with cotangent weights + +Args: + vertices (torch.Tensor): shape (..., N, 3) + faces (torch.Tensor): shape (T, 3) + weight (str): 'uniform' or 'cotangent'""" + utils3d.torch.mesh.laplacian + +@overload +def laplacian_smooth_mesh(vertices: torch_.Tensor, faces: torch_.Tensor, weight: str = 'uniform', times: int = 5) -> torch_.Tensor: + """Laplacian smooth with cotangent weights + +Args: + vertices (torch.Tensor): shape (..., N, 3) + faces (torch.Tensor): shape (T, 3) + weight (str): 'uniform' or 'cotangent'""" + utils3d.torch.mesh.laplacian_smooth_mesh + +@overload +def taubin_smooth_mesh(vertices: torch_.Tensor, faces: torch_.Tensor, lambda_: float = 0.5, mu_: float = -0.51) -> torch_.Tensor: + """Taubin smooth mesh + +Args: + vertices (torch.Tensor): _description_ + faces (torch.Tensor): _description_ + lambda_ (float, optional): _description_. Defaults to 0.5. + mu_ (float, optional): _description_. Defaults to -0.51. + +Returns: + torch.Tensor: _description_""" + utils3d.torch.mesh.taubin_smooth_mesh + +@overload +def laplacian_hc_smooth_mesh(vertices: torch_.Tensor, faces: torch_.Tensor, times: int = 5, alpha: float = 0.5, beta: float = 0.5, weight: str = 'uniform'): + """HC algorithm from Improved Laplacian Smoothing of Noisy Surface Meshes by J.Vollmer et al. + """ + utils3d.torch.mesh.laplacian_hc_smooth_mesh + +@overload +def get_rays(extrinsics: torch_.Tensor, intrinsics: torch_.Tensor, uv: torch_.Tensor) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Args: + extrinsics: (..., 4, 4) extrinsics matrices. + intrinsics: (..., 3, 3) intrinsics matrices. + uv: (..., n_rays, 2) uv coordinates of the rays. + +Returns: + rays_o: (..., 1, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + NOTE: ray directions are NOT normalized. They actuallys makes rays_o + rays_d * z = world coordinates, where z is the depth.""" + utils3d.torch.nerf.get_rays + +@overload +def get_image_rays(extrinsics: torch_.Tensor, intrinsics: torch_.Tensor, width: int, height: int) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Args: + extrinsics: (..., 4, 4) extrinsics matrices. + intrinsics: (..., 3, 3) intrinsics matrices. + width: width of the image. + height: height of the image. + +Returns: + rays_o: (..., 1, 1, 3) ray origins + rays_d: (..., height, width, 3) ray directions. + NOTE: ray directions are NOT normalized. They actuallys makes rays_o + rays_d * z = world coordinates, where z is the depth.""" + utils3d.torch.nerf.get_image_rays + +@overload +def get_mipnerf_cones(rays_o: torch_.Tensor, rays_d: torch_.Tensor, z_vals: torch_.Tensor, pixel_width: torch_.Tensor) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Args: + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + z_vals: (..., n_rays, n_samples) z values. + pixel_width: (...) pixel width. = 1 / (normalized focal length * width) + +Returns: + mu: (..., n_rays, n_samples, 3) cone mu. + sigma: (..., n_rays, n_samples, 3, 3) cone sigma.""" + utils3d.torch.nerf.get_mipnerf_cones + +@overload +def volume_rendering(color: torch_.Tensor, sigma: torch_.Tensor, z_vals: torch_.Tensor, ray_length: torch_.Tensor, rgb: bool = True, depth: bool = True) -> Tuple[torch_.Tensor, torch_.Tensor, torch_.Tensor]: + """Given color, sigma and z_vals (linear depth of the sampling points), render the volume. + +NOTE: By default, color and sigma should have one less sample than z_vals, in correspondence with the average value in intervals. +If queried color are aligned with z_vals, we use trapezoidal rule to calculate the average values in intervals. + +Args: + color: (..., n_samples or n_samples - 1, 3) color values. + sigma: (..., n_samples or n_samples - 1) density values. + z_vals: (..., n_samples) z values. + ray_length: (...) length of the ray + +Returns: + rgb: (..., 3) rendered color values. + depth: (...) rendered depth values. + weights (..., n_samples) weights.""" + utils3d.torch.nerf.volume_rendering + +@overload +def bin_sample(size: Union[torch_.Size, Tuple[int, ...]], n_samples: int, min_value: numbers.Number, max_value: numbers.Number, spacing: Literal['linear', 'inverse_linear'], dtype: torch_.dtype = None, device: torch_.device = None) -> torch_.Tensor: + """Uniformly (or uniformly in inverse space) sample z values in `n_samples` bins in range [min_value, max_value]. +Args: + size: size of the rays + n_samples: number of samples to be sampled, also the number of bins + min_value: minimum value of the range + max_value: maximum value of the range + space: 'linear' or 'inverse_linear'. If 'inverse_linear', the sampling is uniform in inverse space. + +Returns: + z_rand: (*size, n_samples) sampled z values, sorted in ascending order.""" + utils3d.torch.nerf.bin_sample + +@overload +def importance_sample(z_vals: torch_.Tensor, weights: torch_.Tensor, n_samples: int) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Importance sample z values. + +NOTE: By default, weights should have one less sample than z_vals, in correspondence with the intervals. +If weights has the same number of samples as z_vals, we use trapezoidal rule to calculate the average weights in intervals. + +Args: + z_vals: (..., n_rays, n_input_samples) z values, sorted in ascending order. + weights: (..., n_rays, n_input_samples or n_input_samples - 1) weights. + n_samples: number of output samples for importance sampling. + +Returns: + z_importance: (..., n_rays, n_samples) importance sampled z values, unsorted.""" + utils3d.torch.nerf.importance_sample + +@overload +def nerf_render_rays(nerf: Union[Callable[[torch_.Tensor, torch_.Tensor], Tuple[torch_.Tensor, torch_.Tensor]], Tuple[Callable[[torch_.Tensor], Tuple[torch_.Tensor, torch_.Tensor]], Callable[[torch_.Tensor], Tuple[torch_.Tensor, torch_.Tensor]]]], rays_o: torch_.Tensor, rays_d: torch_.Tensor, *, return_dict: bool = False, n_coarse: int = 64, n_fine: int = 64, near: float = 0.1, far: float = 100.0, z_spacing: Literal['linear', 'inverse_linear'] = 'linear'): + """NeRF rendering of rays. Note that it supports arbitrary batch dimensions (denoted as `...`) + +Args: + nerf: nerf model, which takes (points, directions) as input and returns (color, density) as output. + If nerf is a tuple, it should be (nerf_coarse, nerf_fine), where nerf_coarse and nerf_fine are two nerf models for coarse and fine stages respectively. + + nerf args: + points: (..., n_rays, n_samples, 3) + directions: (..., n_rays, n_samples, 3) + nerf returns: + color: (..., n_rays, n_samples, 3) color values. + density: (..., n_rays, n_samples) density values. + + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + pixel_width: (..., n_rays) pixel width. How to compute? pixel_width = 1 / (normalized focal length * width) + +Returns + if return_dict is False, return rendered rgb and depth for short cut. (If there are separate coarse and fine results, return fine results) + rgb: (..., n_rays, 3) rendered color values. + depth: (..., n_rays) rendered depth values. + else, return a dict. If `n_fine == 0` or `nerf` is a single model, the dict only contains coarse results: + ``` + {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + ``` + If there are two models for coarse and fine stages, the dict contains both coarse and fine results: + ``` + { + "coarse": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..}, + "fine": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + } + ```""" + utils3d.torch.nerf.nerf_render_rays + +@overload +def mipnerf_render_rays(mipnerf: Callable[[torch_.Tensor, torch_.Tensor, torch_.Tensor], Tuple[torch_.Tensor, torch_.Tensor]], rays_o: torch_.Tensor, rays_d: torch_.Tensor, pixel_width: torch_.Tensor, *, return_dict: bool = False, n_coarse: int = 64, n_fine: int = 64, uniform_ratio: float = 0.4, near: float = 0.1, far: float = 100.0, z_spacing: Literal['linear', 'inverse_linear'] = 'linear') -> Union[Tuple[torch_.Tensor, torch_.Tensor], Dict[str, torch_.Tensor]]: + """MipNeRF rendering. + +Args: + mipnerf: mipnerf model, which takes (points_mu, points_sigma) as input and returns (color, density) as output. + + mipnerf args: + points_mu: (..., n_rays, n_samples, 3) cone mu. + points_sigma: (..., n_rays, n_samples, 3, 3) cone sigma. + directions: (..., n_rays, n_samples, 3) + mipnerf returns: + color: (..., n_rays, n_samples, 3) color values. + density: (..., n_rays, n_samples) density values. + + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + pixel_width: (..., n_rays) pixel width. How to compute? pixel_width = 1 / (normalized focal length * width) + +Returns + if return_dict is False, return rendered results only: (If `n_fine == 0`, return coarse results, otherwise return fine results) + rgb: (..., n_rays, 3) rendered color values. + depth: (..., n_rays) rendered depth values. + else, return a dict. If `n_fine == 0`, the dict only contains coarse results: + ``` + {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + ``` + If n_fine > 0, the dict contains both coarse and fine results : + ``` + { + "coarse": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..}, + "fine": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + } + ```""" + utils3d.torch.nerf.mipnerf_render_rays + +@overload +def nerf_render_view(nerf: torch_.Tensor, extrinsics: torch_.Tensor, intrinsics: torch_.Tensor, width: int, height: int, *, patchify: bool = False, patch_size: Tuple[int, int] = (64, 64), **options: Dict[str, Any]) -> Tuple[torch_.Tensor, torch_.Tensor]: + """NeRF rendering of views. Note that it supports arbitrary batch dimensions (denoted as `...`) + +Args: + extrinsics: (..., 4, 4) extrinsics matrice of the rendered views + intrinsics (optional): (..., 3, 3) intrinsics matrice of the rendered views. + width (optional): image width of the rendered views. + height (optional): image height of the rendered views. + patchify (optional): If the image is too large, render it patch by patch + **options: rendering options. + +Returns: + rgb: (..., channels, height, width) rendered color values. + depth: (..., height, width) rendered depth values.""" + utils3d.torch.nerf.nerf_render_view + +@overload +def mipnerf_render_view(mipnerf: torch_.Tensor, extrinsics: torch_.Tensor, intrinsics: torch_.Tensor, width: int, height: int, *, patchify: bool = False, patch_size: Tuple[int, int] = (64, 64), **options: Dict[str, Any]) -> Tuple[torch_.Tensor, torch_.Tensor]: + """MipNeRF rendering of views. Note that it supports arbitrary batch dimensions (denoted as `...`) + +Args: + extrinsics: (..., 4, 4) extrinsics matrice of the rendered views + intrinsics (optional): (..., 3, 3) intrinsics matrice of the rendered views. + width (optional): image width of the rendered views. + height (optional): image height of the rendered views. + patchify (optional): If the image is too large, render it patch by patch + **options: rendering options. + +Returns: + rgb: (..., 3, height, width) rendered color values. + depth: (..., height, width) rendered depth values.""" + utils3d.torch.nerf.mipnerf_render_view + +@overload +def InstantNGP(view_dependent: bool = True, base_resolution: int = 16, finest_resolution: int = 2048, n_levels: int = 16, num_layers_density: int = 2, hidden_dim_density: int = 64, num_layers_color: int = 3, hidden_dim_color: int = 64, log2_hashmap_size: int = 19, bound: float = 1.0, color_channels: int = 3): + """An implementation of InstantNGP, Müller et. al., https://nvlabs.github.io/instant-ngp/. +Requires `tinycudann` package. +Install it by: +``` +pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch +```""" + utils3d.torch.nerf.InstantNGP + +@overload +def sliding_window_1d(x: torch_.Tensor, window_size: int, stride: int = 1, dim: int = -1) -> torch_.Tensor: + """Sliding window view of the input tensor. The dimension of the sliding window is appended to the end of the input tensor's shape. +NOTE: Since Pytorch has `unfold` function, 1D sliding window view is just a wrapper of it.""" + utils3d.torch.utils.sliding_window_1d + +@overload +def sliding_window_2d(x: torch_.Tensor, window_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], dim: Union[int, Tuple[int, int]] = (-2, -1)) -> torch_.Tensor: + utils3d.torch.utils.sliding_window_2d + +@overload +def sliding_window_nd(x: torch_.Tensor, window_size: Tuple[int, ...], stride: Tuple[int, ...], dim: Tuple[int, ...]) -> torch_.Tensor: + utils3d.torch.utils.sliding_window_nd + +@overload +def image_uv(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, device: torch_.device = None, dtype: torch_.dtype = None) -> torch_.Tensor: + """Get image space UV grid, ranging in [0, 1]. + +>>> image_uv(10, 10): +[[[0.05, 0.05], [0.15, 0.05], ..., [0.95, 0.05]], + [[0.05, 0.15], [0.15, 0.15], ..., [0.95, 0.15]], + ... ... ... + [[0.05, 0.95], [0.15, 0.95], ..., [0.95, 0.95]]] + +Args: + width (int): image width + height (int): image height + +Returns: + torch.Tensor: shape (height, width, 2)""" + utils3d.torch.utils.image_uv + +@overload +def image_pixel_center(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, dtype: torch_.dtype = None, device: torch_.device = None) -> torch_.Tensor: + """Get image pixel center coordinates, ranging in [0, width] and [0, height]. +`image[i, j]` has pixel center coordinates `(j + 0.5, i + 0.5)`. + +>>> image_pixel_center(10, 10): +[[[0.5, 0.5], [1.5, 0.5], ..., [9.5, 0.5]], + [[0.5, 1.5], [1.5, 1.5], ..., [9.5, 1.5]], + ... ... ... +[[0.5, 9.5], [1.5, 9.5], ..., [9.5, 9.5]]] + +Args: + width (int): image width + height (int): image height + +Returns: + torch.Tensor: shape (height, width, 2)""" + utils3d.torch.utils.image_pixel_center + +@overload +def image_mesh(height: int, width: int, mask: torch_.Tensor = None, device: torch_.device = None, dtype: torch_.dtype = None) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Get a quad mesh regarding image pixel uv coordinates as vertices and image grid as faces. + +Args: + width (int): image width + height (int): image height + mask (torch.Tensor, optional): binary mask of shape (height, width), dtype=bool. Defaults to None. + +Returns: + uv (torch.Tensor): uv corresponding to pixels as described in image_uv() + faces (torch.Tensor): quad faces connecting neighboring pixels + indices (torch.Tensor, optional): indices of vertices in the original mesh""" + utils3d.torch.utils.image_mesh + +@overload +def chessboard(width: int, height: int, grid_size: int, color_a: torch_.Tensor, color_b: torch_.Tensor) -> torch_.Tensor: + """get a chessboard image + +Args: + width (int): image width + height (int): image height + grid_size (int): size of chessboard grid + color_a (torch.Tensor): shape (chanenls,), color of the grid at the top-left corner + color_b (torch.Tensor): shape (chanenls,), color in complementary grids + +Returns: + image (torch.Tensor): shape (height, width, channels), chessboard image""" + utils3d.torch.utils.chessboard + +@overload +def depth_edge(depth: torch_.Tensor, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: torch_.Tensor = None) -> torch_.BoolTensor: + """Compute the edge mask of a depth map. The edge is defined as the pixels whose neighbors have a large difference in depth. + +Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + +Returns: + edge (torch.Tensor): shape (..., height, width) of dtype torch.bool""" + utils3d.torch.utils.depth_edge + +@overload +def depth_aliasing(depth: torch_.Tensor, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: torch_.Tensor = None) -> torch_.BoolTensor: + """Compute the map that indicates the aliasing of a depth map. The aliasing is defined as the pixels which neither close to the maximum nor the minimum of its neighbors. +Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + +Returns: + edge (torch.Tensor): shape (..., height, width) of dtype torch.bool""" + utils3d.torch.utils.depth_aliasing + +@overload +def image_mesh_from_depth(depth: torch_.Tensor, extrinsics: torch_.Tensor = None, intrinsics: torch_.Tensor = None) -> Tuple[torch_.Tensor, torch_.Tensor]: + utils3d.torch.utils.image_mesh_from_depth + +@overload +def points_to_normals(point: torch_.Tensor, mask: torch_.Tensor = None) -> torch_.Tensor: + """Calculate normal map from point map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + +Args: + point (torch.Tensor): shape (..., height, width, 3), point map +Returns: + normal (torch.Tensor): shape (..., height, width, 3), normal map. """ + utils3d.torch.utils.points_to_normals + +@overload +def depth_to_points(depth: torch_.Tensor, intrinsics: torch_.Tensor, extrinsics: torch_.Tensor = None): + utils3d.torch.utils.depth_to_points + +@overload +def depth_to_normals(depth: torch_.Tensor, intrinsics: torch_.Tensor, mask: torch_.Tensor = None) -> torch_.Tensor: + """Calculate normal map from depth map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + +Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + intrinsics (torch.Tensor): shape (..., 3, 3), intrinsics matrix +Returns: + normal (torch.Tensor): shape (..., 3, height, width), normal map. """ + utils3d.torch.utils.depth_to_normals + +@overload +def masked_min(input: torch_.Tensor, mask: torch_.BoolTensor, dim: int = None, keepdim: bool = False) -> Union[torch_.Tensor, Tuple[torch_.Tensor, torch_.Tensor]]: + """Similar to torch.min, but with mask + """ + utils3d.torch.utils.masked_min + +@overload +def masked_max(input: torch_.Tensor, mask: torch_.BoolTensor, dim: int = None, keepdim: bool = False) -> Union[torch_.Tensor, Tuple[torch_.Tensor, torch_.Tensor]]: + """Similar to torch.max, but with mask + """ + utils3d.torch.utils.masked_max + +@overload +def bounding_rect(mask: torch_.BoolTensor): + """get bounding rectangle of a mask + +Args: + mask (torch.Tensor): shape (..., height, width), mask + +Returns: + rect (torch.Tensor): shape (..., 4), bounding rectangle (left, top, right, bottom)""" + utils3d.torch.utils.bounding_rect + +@overload +def perspective(fov_y: Union[float, torch_.Tensor], aspect: Union[float, torch_.Tensor], near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Get OpenGL perspective matrix + +Args: + fov_y (float | torch.Tensor): field of view in y axis + aspect (float | torch.Tensor): aspect ratio + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + +Returns: + (torch.Tensor): [..., 4, 4] perspective matrix""" + utils3d.torch.transforms.perspective + +@overload +def perspective_from_fov(fov: Union[float, torch_.Tensor], width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor], near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Get OpenGL perspective matrix from field of view in largest dimension + +Args: + fov (float | torch.Tensor): field of view in largest dimension + width (int | torch.Tensor): image width + height (int | torch.Tensor): image height + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + +Returns: + (torch.Tensor): [..., 4, 4] perspective matrix""" + utils3d.torch.transforms.perspective_from_fov + +@overload +def perspective_from_fov_xy(fov_x: Union[float, torch_.Tensor], fov_y: Union[float, torch_.Tensor], near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Get OpenGL perspective matrix from field of view in x and y axis + +Args: + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + +Returns: + (torch.Tensor): [..., 4, 4] perspective matrix""" + utils3d.torch.transforms.perspective_from_fov_xy + +@overload +def intrinsics_from_focal_center(fx: Union[float, torch_.Tensor], fy: Union[float, torch_.Tensor], cx: Union[float, torch_.Tensor], cy: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Get OpenCV intrinsics matrix + +Args: + focal_x (float | torch.Tensor): focal length in x axis + focal_y (float | torch.Tensor): focal length in y axis + cx (float | torch.Tensor): principal point in x axis + cy (float | torch.Tensor): principal point in y axis + +Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix""" + utils3d.torch.transforms.intrinsics_from_focal_center + +@overload +def intrinsics_from_fov(fov_max: Union[float, torch_.Tensor] = None, fov_min: Union[float, torch_.Tensor] = None, fov_x: Union[float, torch_.Tensor] = None, fov_y: Union[float, torch_.Tensor] = None, width: Union[int, torch_.Tensor] = None, height: Union[int, torch_.Tensor] = None) -> torch_.Tensor: + """Get normalized OpenCV intrinsics matrix from given field of view. +You can provide either fov_max, fov_min, fov_x or fov_y + +Args: + width (int | torch.Tensor): image width + height (int | torch.Tensor): image height + fov_max (float | torch.Tensor): field of view in largest dimension + fov_min (float | torch.Tensor): field of view in smallest dimension + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + +Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix""" + utils3d.torch.transforms.intrinsics_from_fov + +@overload +def intrinsics_from_fov_xy(fov_x: Union[float, torch_.Tensor], fov_y: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Get OpenCV intrinsics matrix from field of view in x and y axis + +Args: + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + +Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix""" + utils3d.torch.transforms.intrinsics_from_fov_xy + +@overload +def focal_to_fov(focal: torch_.Tensor): + utils3d.torch.transforms.focal_to_fov + +@overload +def fov_to_focal(fov: torch_.Tensor): + utils3d.torch.transforms.fov_to_focal + +@overload +def intrinsics_to_fov(intrinsics: torch_.Tensor) -> Tuple[torch_.Tensor, torch_.Tensor]: + """NOTE: approximate FOV by assuming centered principal point""" + utils3d.torch.transforms.intrinsics_to_fov + +@overload +def view_look_at(eye: torch_.Tensor, look_at: torch_.Tensor, up: torch_.Tensor) -> torch_.Tensor: + """Get OpenGL view matrix looking at something + +Args: + eye (torch.Tensor): [..., 3] the eye position + look_at (torch.Tensor): [..., 3] the position to look at + up (torch.Tensor): [..., 3] head up direction (y axis in screen space). Not necessarily othogonal to view direction + +Returns: + (torch.Tensor): [..., 4, 4], view matrix""" + utils3d.torch.transforms.view_look_at + +@overload +def extrinsics_look_at(eye: torch_.Tensor, look_at: torch_.Tensor, up: torch_.Tensor) -> torch_.Tensor: + """Get OpenCV extrinsics matrix looking at something + +Args: + eye (torch.Tensor): [..., 3] the eye position + look_at (torch.Tensor): [..., 3] the position to look at + up (torch.Tensor): [..., 3] head up direction (-y axis in screen space). Not necessarily othogonal to view direction + +Returns: + (torch.Tensor): [..., 4, 4], extrinsics matrix""" + utils3d.torch.transforms.extrinsics_look_at + +@overload +def perspective_to_intrinsics(perspective: torch_.Tensor) -> torch_.Tensor: + """OpenGL perspective matrix to OpenCV intrinsics + +Args: + perspective (torch.Tensor): [..., 4, 4] OpenGL perspective matrix + +Returns: + (torch.Tensor): shape [..., 3, 3] OpenCV intrinsics""" + utils3d.torch.transforms.perspective_to_intrinsics + +@overload +def intrinsics_to_perspective(intrinsics: torch_.Tensor, near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """OpenCV intrinsics to OpenGL perspective matrix + +Args: + intrinsics (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip +Returns: + (torch.Tensor): [..., 4, 4] OpenGL perspective matrix""" + utils3d.torch.transforms.intrinsics_to_perspective + +@overload +def extrinsics_to_view(extrinsics: torch_.Tensor) -> torch_.Tensor: + """OpenCV camera extrinsics to OpenGL view matrix + +Args: + extrinsics (torch.Tensor): [..., 4, 4] OpenCV camera extrinsics matrix + +Returns: + (torch.Tensor): [..., 4, 4] OpenGL view matrix""" + utils3d.torch.transforms.extrinsics_to_view + +@overload +def view_to_extrinsics(view: torch_.Tensor) -> torch_.Tensor: + """OpenGL view matrix to OpenCV camera extrinsics + +Args: + view (torch.Tensor): [..., 4, 4] OpenGL view matrix + +Returns: + (torch.Tensor): [..., 4, 4] OpenCV camera extrinsics matrix""" + utils3d.torch.transforms.view_to_extrinsics + +@overload +def normalize_intrinsics(intrinsics: torch_.Tensor, width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor]) -> torch_.Tensor: + """Normalize camera intrinsics(s) to uv space + +Args: + intrinsics (torch.Tensor): [..., 3, 3] camera intrinsics(s) to normalize + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + +Returns: + (torch.Tensor): [..., 3, 3] normalized camera intrinsics(s)""" + utils3d.torch.transforms.normalize_intrinsics + +@overload +def crop_intrinsics(intrinsics: torch_.Tensor, width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor], left: Union[int, torch_.Tensor], top: Union[int, torch_.Tensor], crop_width: Union[int, torch_.Tensor], crop_height: Union[int, torch_.Tensor]) -> torch_.Tensor: + """Evaluate the new intrinsics(s) after crop the image: cropped_img = img[top:top+crop_height, left:left+crop_width] + +Args: + intrinsics (torch.Tensor): [..., 3, 3] camera intrinsics(s) to crop + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + left (int | torch.Tensor): [...] left crop boundary + top (int | torch.Tensor): [...] top crop boundary + crop_width (int | torch.Tensor): [...] crop width + crop_height (int | torch.Tensor): [...] crop height + +Returns: + (torch.Tensor): [..., 3, 3] cropped camera intrinsics(s)""" + utils3d.torch.transforms.crop_intrinsics + +@overload +def pixel_to_uv(pixel: torch_.Tensor, width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor]) -> torch_.Tensor: + """Args: + pixel (torch.Tensor): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + +Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1)""" + utils3d.torch.transforms.pixel_to_uv + +@overload +def pixel_to_ndc(pixel: torch_.Tensor, width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor]) -> torch_.Tensor: + """Args: + pixel (torch.Tensor): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + +Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in ndc space, the range is (-1, 1)""" + utils3d.torch.transforms.pixel_to_ndc + +@overload +def uv_to_pixel(uv: torch_.Tensor, width: Union[int, torch_.Tensor], height: Union[int, torch_.Tensor]) -> torch_.Tensor: + """Args: + uv (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + +Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1)""" + utils3d.torch.transforms.uv_to_pixel + +@overload +def project_depth(depth: torch_.Tensor, near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Project linear depth to depth value in screen space + +Args: + depth (torch.Tensor): [...] depth value + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip + +Returns: + (torch.Tensor): [..., 1] depth value in screen space, value ranging in [0, 1]""" + utils3d.torch.transforms.project_depth + +@overload +def depth_buffer_to_linear(depth: torch_.Tensor, near: Union[float, torch_.Tensor], far: Union[float, torch_.Tensor]) -> torch_.Tensor: + """Linearize depth value to linear depth + +Args: + depth (torch.Tensor): [...] screen depth value, ranging in [0, 1] + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip + +Returns: + (torch.Tensor): [...] linear depth""" + utils3d.torch.transforms.depth_buffer_to_linear + +@overload +def project_gl(points: torch_.Tensor, model: torch_.Tensor = None, view: torch_.Tensor = None, perspective: torch_.Tensor = None) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Project 3D points to 2D following the OpenGL convention (except for row major matrice) + +Args: + points (torch.Tensor): [..., N, 3 or 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + model (torch.Tensor): [..., 4, 4] model matrix + view (torch.Tensor): [..., 4, 4] view matrix + perspective (torch.Tensor): [..., 4, 4] perspective matrix + +Returns: + scr_coord (torch.Tensor): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + linear_depth (torch.Tensor): [..., N] linear depth""" + utils3d.torch.transforms.project_gl + +@overload +def project_cv(points: torch_.Tensor, extrinsics: torch_.Tensor = None, intrinsics: torch_.Tensor = None) -> Tuple[torch_.Tensor, torch_.Tensor]: + """Project 3D points to 2D following the OpenCV convention + +Args: + points (torch.Tensor): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + intrinsics (torch.Tensor): [..., 3, 3] intrinsics matrix + +Returns: + uv_coord (torch.Tensor): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + linear_depth (torch.Tensor): [..., N] linear depth""" + utils3d.torch.transforms.project_cv + +@overload +def unproject_gl(screen_coord: torch_.Tensor, model: torch_.Tensor = None, view: torch_.Tensor = None, perspective: torch_.Tensor = None) -> torch_.Tensor: + """Unproject screen space coordinates to 3D view space following the OpenGL convention (except for row major matrice) + +Args: + screen_coord (torch.Tensor): [... N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + model (torch.Tensor): [..., 4, 4] model matrix + view (torch.Tensor): [..., 4, 4] view matrix + perspective (torch.Tensor): [..., 4, 4] perspective matrix + +Returns: + points (torch.Tensor): [..., N, 3] 3d points""" + utils3d.torch.transforms.unproject_gl + +@overload +def unproject_cv(uv_coord: torch_.Tensor, depth: torch_.Tensor = None, extrinsics: torch_.Tensor = None, intrinsics: torch_.Tensor = None) -> torch_.Tensor: + """Unproject uv coordinates to 3D view space following the OpenCV convention + +Args: + uv_coord (torch.Tensor): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + depth (torch.Tensor): [..., N] depth value + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + intrinsics (torch.Tensor): [..., 3, 3] intrinsics matrix + +Returns: + points (torch.Tensor): [..., N, 3] 3d points""" + utils3d.torch.transforms.unproject_cv + +@overload +def skew_symmetric(v: torch_.Tensor): + """Skew symmetric matrix from a 3D vector""" + utils3d.torch.transforms.skew_symmetric + +@overload +def rotation_matrix_from_vectors(v1: torch_.Tensor, v2: torch_.Tensor): + """Rotation matrix that rotates v1 to v2""" + utils3d.torch.transforms.rotation_matrix_from_vectors + +@overload +def euler_axis_angle_rotation(axis: str, angle: torch_.Tensor) -> torch_.Tensor: + """Return the rotation matrices for one of the rotations about an axis +of which Euler angles describe, for each value of the angle given. + +Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + +Returns: + Rotation matrices as tensor of shape (..., 3, 3).""" + utils3d.torch.transforms.euler_axis_angle_rotation + +@overload +def euler_angles_to_matrix(euler_angles: torch_.Tensor, convention: str = 'XYZ') -> torch_.Tensor: + """Convert rotations given as Euler angles in radians to rotation matrices. + +Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3), XYZ + convention: permutation of "X", "Y" or "Z", representing the order of Euler rotations to apply. + +Returns: + Rotation matrices as tensor of shape (..., 3, 3).""" + utils3d.torch.transforms.euler_angles_to_matrix + +@overload +def matrix_to_euler_angles(matrix: torch_.Tensor, convention: str) -> torch_.Tensor: + """Convert rotations given as rotation matrices to Euler angles in radians. +NOTE: The composition order eg. `XYZ` means `Rz * Ry * Rx` (like blender), instead of `Rx * Ry * Rz` (like pytorch3d) + +Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + +Returns: + Euler angles in radians as tensor of shape (..., 3), in the order of XYZ (like blender), instead of convention (like pytorch3d)""" + utils3d.torch.transforms.matrix_to_euler_angles + +@overload +def matrix_to_quaternion(rot_mat: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Convert 3x3 rotation matrix to quaternion (w, x, y, z) + +Args: + rot_mat (torch.Tensor): shape (..., 3, 3), the rotation matrices to convert + +Returns: + torch.Tensor: shape (..., 4), the quaternions corresponding to the given rotation matrices""" + utils3d.torch.transforms.matrix_to_quaternion + +@overload +def quaternion_to_matrix(quaternion: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Converts a batch of quaternions (w, x, y, z) to rotation matrices + +Args: + quaternion (torch.Tensor): shape (..., 4), the quaternions to convert + +Returns: + torch.Tensor: shape (..., 3, 3), the rotation matrices corresponding to the given quaternions""" + utils3d.torch.transforms.quaternion_to_matrix + +@overload +def matrix_to_axis_angle(rot_mat: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Convert a batch of 3x3 rotation matrices to axis-angle representation (rotation vector) + +Args: + rot_mat (torch.Tensor): shape (..., 3, 3), the rotation matrices to convert + +Returns: + torch.Tensor: shape (..., 3), the axis-angle vectors corresponding to the given rotation matrices""" + utils3d.torch.transforms.matrix_to_axis_angle + +@overload +def axis_angle_to_matrix(axis_angle: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Convert axis-angle representation (rotation vector) to rotation matrix, whose direction is the axis of rotation and length is the angle of rotation + +Args: + axis_angle (torch.Tensor): shape (..., 3), axis-angle vcetors + +Returns: + torch.Tensor: shape (..., 3, 3) The rotation matrices for the given axis-angle parameters""" + utils3d.torch.transforms.axis_angle_to_matrix + +@overload +def axis_angle_to_quaternion(axis_angle: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Convert axis-angle representation (rotation vector) to quaternion (w, x, y, z) + +Args: + axis_angle (torch.Tensor): shape (..., 3), axis-angle vcetors + +Returns: + torch.Tensor: shape (..., 4) The quaternions for the given axis-angle parameters""" + utils3d.torch.transforms.axis_angle_to_quaternion + +@overload +def quaternion_to_axis_angle(quaternion: torch_.Tensor, eps: float = 1e-12) -> torch_.Tensor: + """Convert a batch of quaternions (w, x, y, z) to axis-angle representation (rotation vector) + +Args: + quaternion (torch.Tensor): shape (..., 4), the quaternions to convert + +Returns: + torch.Tensor: shape (..., 3), the axis-angle vectors corresponding to the given quaternions""" + utils3d.torch.transforms.quaternion_to_axis_angle + +@overload +def slerp(rot_mat_1: torch_.Tensor, rot_mat_2: torch_.Tensor, t: Union[numbers.Number, torch_.Tensor]) -> torch_.Tensor: + """Spherical linear interpolation between two rotation matrices + +Args: + rot_mat_1 (torch.Tensor): shape (..., 3, 3), the first rotation matrix + rot_mat_2 (torch.Tensor): shape (..., 3, 3), the second rotation matrix + t (torch.Tensor): scalar or shape (...,), the interpolation factor + +Returns: + torch.Tensor: shape (..., 3, 3), the interpolated rotation matrix""" + utils3d.torch.transforms.slerp + +@overload +def interpolate_extrinsics(ext1: torch_.Tensor, ext2: torch_.Tensor, t: Union[numbers.Number, torch_.Tensor]) -> torch_.Tensor: + """Interpolate extrinsics between two camera poses. Linear interpolation for translation, spherical linear interpolation for rotation. + +Args: + ext1 (torch.Tensor): shape (..., 4, 4), the first camera pose + ext2 (torch.Tensor): shape (..., 4, 4), the second camera pose + t (torch.Tensor): scalar or shape (...,), the interpolation factor + +Returns: + torch.Tensor: shape (..., 4, 4), the interpolated camera pose""" + utils3d.torch.transforms.interpolate_extrinsics + +@overload +def interpolate_view(view1: torch_.Tensor, view2: torch_.Tensor, t: Union[numbers.Number, torch_.Tensor]): + """Interpolate view matrices between two camera poses. Linear interpolation for translation, spherical linear interpolation for rotation. + +Args: + ext1 (torch.Tensor): shape (..., 4, 4), the first camera pose + ext2 (torch.Tensor): shape (..., 4, 4), the second camera pose + t (torch.Tensor): scalar or shape (...,), the interpolation factor + +Returns: + torch.Tensor: shape (..., 4, 4), the interpolated camera pose""" + utils3d.torch.transforms.interpolate_view + +@overload +def extrinsics_to_essential(extrinsics: torch_.Tensor): + """extrinsics matrix `[[R, t] [0, 0, 0, 1]]` such that `x' = R (x - t)` to essential matrix such that `x' E x = 0` + +Args: + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + +Returns: + (torch.Tensor): [..., 3, 3] essential matrix""" + utils3d.torch.transforms.extrinsics_to_essential + +@overload +def to4x4(R: torch_.Tensor, t: torch_.Tensor): + """Compose rotation matrix and translation vector to 4x4 transformation matrix + +Args: + R (torch.Tensor): [..., 3, 3] rotation matrix + t (torch.Tensor): [..., 3] translation vector + +Returns: + (torch.Tensor): [..., 4, 4] transformation matrix""" + utils3d.torch.transforms.to4x4 + +@overload +def rotation_matrix_2d(theta: Union[float, torch_.Tensor]): + """2x2 matrix for 2D rotation + +Args: + theta (float | torch.Tensor): rotation angle in radians, arbitrary shape (...,) + +Returns: + (torch.Tensor): (..., 2, 2) rotation matrix""" + utils3d.torch.transforms.rotation_matrix_2d + +@overload +def rotate_2d(theta: Union[float, torch_.Tensor], center: torch_.Tensor = None): + """3x3 matrix for 2D rotation around a center +``` + [[Rxx, Rxy, tx], + [Ryx, Ryy, ty], + [0, 0, 1]] +``` +Args: + theta (float | torch.Tensor): rotation angle in radians, arbitrary shape (...,) + center (torch.Tensor): rotation center, arbitrary shape (..., 2). Default to (0, 0) + +Returns: + (torch.Tensor): (..., 3, 3) transformation matrix""" + utils3d.torch.transforms.rotate_2d + +@overload +def translate_2d(translation: torch_.Tensor): + """Translation matrix for 2D translation +``` + [[1, 0, tx], + [0, 1, ty], + [0, 0, 1]] +``` +Args: + translation (torch.Tensor): translation vector, arbitrary shape (..., 2) + +Returns: + (torch.Tensor): (..., 3, 3) transformation matrix""" + utils3d.torch.transforms.translate_2d + +@overload +def scale_2d(scale: Union[float, torch_.Tensor], center: torch_.Tensor = None): + """Scale matrix for 2D scaling +``` + [[s, 0, tx], + [0, s, ty], + [0, 0, 1]] +``` +Args: + scale (float | torch.Tensor): scale factor, arbitrary shape (...,) + center (torch.Tensor): scale center, arbitrary shape (..., 2). Default to (0, 0) + +Returns: + (torch.Tensor): (..., 3, 3) transformation matrix""" + utils3d.torch.transforms.scale_2d + +@overload +def apply_2d(transform: torch_.Tensor, points: torch_.Tensor): + """Apply (3x3 or 2x3) 2D affine transformation to points +``` + p = R @ p + t +``` +Args: + transform (torch.Tensor): (..., 2 or 3, 3) transformation matrix + points (torch.Tensor): (..., N, 2) points to transform + +Returns: + (torch.Tensor): (..., N, 2) transformed points""" + utils3d.torch.transforms.apply_2d + +@overload +def RastContext(nvd_ctx: Union[nvdiffrast.torch.ops.RasterizeCudaContext, nvdiffrast.torch.ops.RasterizeGLContext] = None, *, backend: Literal['cuda', 'gl'] = 'gl', device: Union[str, torch_.device] = None): + """Create a rasterization context. Nothing but a wrapper of nvdiffrast.torch.RasterizeCudaContext or nvdiffrast.torch.RasterizeGLContext.""" + utils3d.torch.rasterization.RastContext + +@overload +def rasterize_triangle_faces(ctx: utils3d.torch.rasterization.RastContext, vertices: torch_.Tensor, faces: torch_.Tensor, width: int, height: int, attr: torch_.Tensor = None, uv: torch_.Tensor = None, texture: torch_.Tensor = None, model: torch_.Tensor = None, view: torch_.Tensor = None, projection: torch_.Tensor = None, antialiasing: Union[bool, List[int]] = True, diff_attrs: Optional[List[int]] = None) -> Tuple[torch_.Tensor, torch_.Tensor, Optional[torch_.Tensor]]: + """Rasterize a mesh with vertex attributes. + +Args: + ctx (GLContext): rasterizer context + vertices (np.ndarray): (B, N, 2 or 3 or 4) + faces (torch.Tensor): (T, 3) + width (int): width of the output image + height (int): height of the output image + attr (torch.Tensor, optional): (B, N, C) vertex attributes. Defaults to None. + uv (torch.Tensor, optional): (B, N, 2) uv coordinates. Defaults to None. + texture (torch.Tensor, optional): (B, H, W, C) texture. Defaults to None. + model (torch.Tensor, optional): ([B,] 4, 4) model matrix. Defaults to None (identity). + view (torch.Tensor, optional): ([B,] 4, 4) view matrix. Defaults to None (identity). + projection (torch.Tensor, optional): ([B,] 4, 4) projection matrix. Defaults to None (identity). + antialiasing (Union[bool, List[int]], optional): whether to perform antialiasing. Defaults to True. If a list of indices is provided, only those channels will be antialiased. + diff_attrs (Union[None, List[int]], optional): indices of attributes to compute screen-space derivatives. Defaults to None. + +Returns: + Dictionary containing: + - image: (torch.Tensor): (B, C, H, W) + - depth: (torch.Tensor): (B, H, W) screen space depth, ranging from 0 (near) to 1. (far) + NOTE: Empty pixels will have depth 1., i.e. far plane. + - mask: (torch.BoolTensor): (B, H, W) mask of valid pixels + - image_dr: (torch.Tensor): (B, 4, H, W) screen space derivatives of the attributes + - face_id: (torch.Tensor): (B, H, W) face ids + - uv: (torch.Tensor): (B, N, 2) uv coordinates (if uv is not None) + - uv_dr: (torch.Tensor): (B, N, 4) uv derivatives (if uv is not None) + - texture: (torch.Tensor): (B, H, W, C) texture (if uv and texture are not None)""" + utils3d.torch.rasterization.rasterize_triangle_faces + +@overload +def warp_image_by_depth(ctx: utils3d.torch.rasterization.RastContext, depth: torch_.FloatTensor, image: torch_.FloatTensor = None, mask: torch_.BoolTensor = None, width: int = None, height: int = None, *, extrinsics_src: torch_.FloatTensor = None, extrinsics_tgt: torch_.FloatTensor = None, intrinsics_src: torch_.FloatTensor = None, intrinsics_tgt: torch_.FloatTensor = None, near: float = 0.1, far: float = 100.0, antialiasing: bool = True, backslash: bool = False, padding: int = 0, return_uv: bool = False, return_dr: bool = False) -> Tuple[torch_.FloatTensor, torch_.FloatTensor, torch_.BoolTensor, Optional[torch_.FloatTensor], Optional[torch_.FloatTensor]]: + """Warp image by depth. +NOTE: if batch size is 1, image mesh will be triangulated aware of the depth, yielding less distorted results. +Otherwise, image mesh will be triangulated simply for batch rendering. + +Args: + ctx (Union[dr.RasterizeCudaContext, dr.RasterizeGLContext]): rasterization context + depth (torch.Tensor): (B, H, W) linear depth + image (torch.Tensor): (B, C, H, W). None to use image space uv. Defaults to None. + width (int, optional): width of the output image. None to use the same as depth. Defaults to None. + height (int, optional): height of the output image. Defaults the same as depth.. + extrinsics_src (torch.Tensor, optional): (B, 4, 4) extrinsics matrix for source. None to use identity. Defaults to None. + extrinsics_tgt (torch.Tensor, optional): (B, 4, 4) extrinsics matrix for target. None to use identity. Defaults to None. + intrinsics_src (torch.Tensor, optional): (B, 3, 3) intrinsics matrix for source. None to use the same as target. Defaults to None. + intrinsics_tgt (torch.Tensor, optional): (B, 3, 3) intrinsics matrix for target. None to use the same as source. Defaults to None. + near (float, optional): near plane. Defaults to 0.1. + far (float, optional): far plane. Defaults to 100.0. + antialiasing (bool, optional): whether to perform antialiasing. Defaults to True. + backslash (bool, optional): whether to use backslash triangulation. Defaults to False. + padding (int, optional): padding of the image. Defaults to 0. + return_uv (bool, optional): whether to return the uv. Defaults to False. + return_dr (bool, optional): whether to return the image-space derivatives of uv. Defaults to False. + +Returns: + image: (torch.FloatTensor): (B, C, H, W) rendered image + depth: (torch.FloatTensor): (B, H, W) linear depth, ranging from 0 to inf + mask: (torch.BoolTensor): (B, H, W) mask of valid pixels + uv: (torch.FloatTensor): (B, 2, H, W) image-space uv + dr: (torch.FloatTensor): (B, 4, H, W) image-space derivatives of uv""" + utils3d.torch.rasterization.warp_image_by_depth + +@overload +def warp_image_by_forward_flow(ctx: utils3d.torch.rasterization.RastContext, image: torch_.FloatTensor, flow: torch_.FloatTensor, depth: torch_.FloatTensor = None, *, antialiasing: bool = True, backslash: bool = False) -> Tuple[torch_.FloatTensor, torch_.BoolTensor]: + """Warp image by forward flow. +NOTE: if batch size is 1, image mesh will be triangulated aware of the depth, yielding less distorted results. +Otherwise, image mesh will be triangulated simply for batch rendering. + +Args: + ctx (Union[dr.RasterizeCudaContext, dr.RasterizeGLContext]): rasterization context + image (torch.Tensor): (B, C, H, W) image + flow (torch.Tensor): (B, 2, H, W) forward flow + depth (torch.Tensor, optional): (B, H, W) linear depth. If None, will use the same for all pixels. Defaults to None. + antialiasing (bool, optional): whether to perform antialiasing. Defaults to True. + backslash (bool, optional): whether to use backslash triangulation. Defaults to False. + +Returns: + image: (torch.FloatTensor): (B, C, H, W) rendered image + mask: (torch.BoolTensor): (B, H, W) mask of valid pixels""" + utils3d.torch.rasterization.warp_image_by_forward_flow + diff --git a/utils3d/utils3d/io/__init__.py b/utils3d/utils3d/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..306adf39bdc4c37d371183e0d3249f15f09c93d3 --- /dev/null +++ b/utils3d/utils3d/io/__init__.py @@ -0,0 +1,3 @@ +from .obj import * +from .colmap import * +from .ply import * diff --git a/utils3d/utils3d/io/colmap.py b/utils3d/utils3d/io/colmap.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c8fe5c50391a7ad1b239b87173f7d89bdd0435 --- /dev/null +++ b/utils3d/utils3d/io/colmap.py @@ -0,0 +1,139 @@ +from typing import * +from pathlib import Path + +import numpy as np +from scipy.spatial.transform import Rotation + + +__all__ = ['read_extrinsics_from_colmap', 'read_intrinsics_from_colmap', 'write_extrinsics_as_colmap', 'write_intrinsics_as_colmap'] + + +def write_extrinsics_as_colmap(file: Union[str, Path], extrinsics: np.ndarray, image_names: Union[str, List[str]] = 'image_{i:04d}.png', camera_ids: List[int] = None): + """ + Write extrinsics to colmap `images.txt` file. + Args: + file: Path to `images.txt` file. + extrinsics: (N, 4, 4) array of extrinsics. + image_names: str or List of str, image names. Length is N. + If str, it should be a format string with `i` as the index. (i starts from 1, in correspondence with IMAGE_ID in colmap) + camera_ids: List of int, camera ids. Length is N. + If None, it will be set to [1, 2, ..., N]. + """ + assert extrinsics.shape[1:] == (4, 4) and extrinsics.ndim == 3 or extrinsics.shape == (4, 4) + if extrinsics.ndim == 2: + extrinsics = extrinsics[np.newaxis, ...] + quats = Rotation.from_matrix(extrinsics[:, :3, :3]).as_quat() + trans = extrinsics[:, :3, 3] + if camera_ids is None: + camera_ids = list(range(1, len(extrinsics) + 1)) + if isinstance(image_names, str): + image_names = [image_names.format(i=i) for i in range(1, len(extrinsics) + 1)] + assert len(extrinsics) == len(image_names) == len(camera_ids), \ + f'Number of extrinsics ({len(extrinsics)}), image_names ({len(image_names)}), and camera_ids ({len(camera_ids)}) must be the same' + with open(file, 'w') as fp: + print("# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME", file=fp) + for i, (quat, t, name, camera_id) in enumerate(zip(quats.tolist(), trans.tolist(), image_names, camera_ids)): + # Colmap has wxyz order while scipy.spatial.transform.Rotation has xyzw order. + qx, qy, qz, qw = quat + tx, ty, tz = t + print(f'{i + 1} {qw:f} {qx:f} {qy:f} {qz:f} {tx:f} {ty:f} {tz:f} {camera_id:d} {name}', file=fp) + print() + + +def write_intrinsics_as_colmap(file: Union[str, Path], intrinsics: np.ndarray, width: int, height: int, normalized: bool = False): + """ + Write intrinsics to colmap `cameras.txt` file. Currently only support PINHOLE model (no distortion) + Args: + file: Path to `cameras.txt` file. + intrinsics: (N, 3, 3) array of intrinsics. + width: Image width. + height: Image height. + normalized: Whether the intrinsics are normalized. If True, the intrinsics will unnormalized for writing. + """ + assert intrinsics.shape[1:] == (3, 3) and intrinsics.ndim == 3 or intrinsics.shape == (3, 3) + if intrinsics.ndim == 2: + intrinsics = intrinsics[np.newaxis, ...] + if normalized: + intrinsics = intrinsics * np.array([width, height, 1])[:, None] + with open(file, 'w') as fp: + print("# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]", file=fp) + for i, intr in enumerate(intrinsics): + fx, fy, cx, cy = intr[0, 0], intr[1, 1], intr[0, 2], intr[1, 2] + print(f'{i + 1} PINHOLE {width:d} {height:d} {fx:f} {fy:f} {cx:f} {cy:f}', file=fp) + + +def read_extrinsics_from_colmap(file: Union[str, Path]) -> Union[np.ndarray, List[int], List[str]]: + """ + Read extrinsics from colmap `images.txt` file. + Args: + file: Path to `images.txt` file. + Returns: + extrinsics: (N, 4, 4) array of extrinsics. + camera_ids: List of int, camera ids. Length is N. Note that camera ids in colmap typically starts from 1. + image_names: List of str, image names. Length is N. + """ + with open(file) as fp: + lines = fp.readlines() + image_names, quats, trans, camera_ids = [], [], [], [] + i_line = 0 + for line in lines: + line = line.strip() + if line.startswith('#'): + continue + i_line += 1 + if i_line % 2 == 0: + continue + image_id, qw, qx, qy, qz, tx, ty, tz, camera_id, name = line.split() + quats.append([float(qx), float(qy), float(qz), float(qw)]) + trans.append([float(tx), float(ty), float(tz)]) + camera_ids.append(int(camera_id)) + image_names.append(name) + + quats = np.array(quats, dtype=np.float32) + trans = np.array(trans, dtype=np.float32) + rotation = Rotation.from_quat(quats).as_matrix() + extrinsics = np.concatenate([ + np.concatenate([rotation, trans[..., None]], axis=-1), + np.array([0, 0, 0, 1], dtype=np.float32)[None, None, :].repeat(len(quats), axis=0) + ], axis=-2) + + return extrinsics, camera_ids, image_names + + +def read_intrinsics_from_colmap(file: Union[str, Path], normalize: bool = False) -> Tuple[List[int], np.ndarray, np.ndarray]: + """ + Read intrinsics from colmap `cameras.txt` file. + Args: + file: Path to `cameras.txt` file. + normalize: Whether to normalize the intrinsics. If True, the intrinsics will be normalized. (mapping coordinates to [0, 1] range) + Returns: + camera_ids: List of int, camera ids. Length is N. Note that camera ids in colmap typically starts from 1. + intrinsics: (N, 3, 3) array of intrinsics. + distortions: (N, 5) array of distortions. + """ + with open(file) as fp: + lines = fp.readlines() + intrinsics, distortions, camera_ids = [], [], [] + for line in lines: + line = line.strip() + if not line or line.startswith('#'): + continue + camera_id, model, width, height, *params = line.split() + camera_id, width, height = int(camera_id), int(width), int(height) + if model == 'PINHOLE': + fx, fy, cx, cy = map(float, params[:4]) + k1 = k2 = k3 = p1 = p2 = 0.0 + elif model == 'OPENCV': + fx, fy, cx, cy, k1, k2, p1, p2, k3 = *map(float, params[:8]), 0.0 + elif model == 'SIMPLE_RADIAL': + f, cx, cy, k = map(float, params[:4]) + fx = fy = f + k1, k2, p1, p2, k3 = k, 0.0, 0.0, 0.0, 0.0 + camera_ids.append(camera_id) + if normalize: + fx, fy, cx, cy = fx / width, fy / height, cx / width, cy / height + intrinsics.append([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) + distortions.append([k1, k2, p1, p2, k3]) + intrinsics = np.array(intrinsics, dtype=np.float32) + distortions = np.array(distortions, dtype=np.float32) + return camera_ids, intrinsics, distortions diff --git a/utils3d/utils3d/io/obj.py b/utils3d/utils3d/io/obj.py new file mode 100644 index 0000000000000000000000000000000000000000..58927cab933335bcae925d361ba8a16e2806b46d --- /dev/null +++ b/utils3d/utils3d/io/obj.py @@ -0,0 +1,146 @@ +from io import TextIOWrapper +from typing import Dict, Any, Union, Iterable +import numpy as np +from pathlib import Path + +__all__ = [ + 'read_obj', + 'write_obj', + 'simple_write_obj' +] + +def read_obj( + file : Union[str, Path, TextIOWrapper], + encoding: Union[str, None] = None, + ignore_unknown: bool = False +): + """ + Read wavefront .obj file, without preprocessing. + + Why bothering having this read_obj() while we already have other libraries like `trimesh`? + This function read the raw format from .obj file and keeps the order of vertices and faces, + while trimesh which involves modification like merge/split vertices, which could break the orders of vertices and faces, + Those libraries are commonly aiming at geometry processing and rendering supporting various formats. + If you want mesh geometry processing, you may turn to `trimesh` for more features. + + ### Parameters + `file` (str, Path, TextIOWrapper): filepath or file object + encoding (str, optional): + + ### Returns + obj (dict): A dict containing .obj components + { + 'mtllib': [], + 'v': [[0,1, 0.2, 1.0], [1.2, 0.0, 0.0], ...], + 'vt': [[0.5, 0.5], ...], + 'vn': [[0., 0.7, 0.7], [0., -0.7, 0.7], ...], + 'f': [[0, 1, 2], [2, 3, 4],...], + 'usemtl': [{'name': 'mtl1', 'f': 7}] + } + """ + if hasattr(file,'read'): + lines = file.read().splitlines() + else: + with open(file, 'r', encoding=encoding) as fp: + lines = fp.read().splitlines() + mtllib = [] + v, vt, vn, vp = [], [], [], [] # Vertex coordinates, Vertex texture coordinate, Vertex normal, Vertex parameter + f, ft, fn = [], [], [] # Face indices, Face texture indices, Face normal indices + o = [] + s = [] + usemtl = [] + + def pad(l: list, n: Any): + return l + [n] * (3 - len(l)) + + for i, line in enumerate(lines): + sq = line.strip().split() + if len(sq) == 0: + continue + if sq[0] == 'v': + assert 4 <= len(sq) <= 5, f'Invalid format of line {i}: {line}' + v.append([float(e) for e in sq[1:]][:3]) + elif sq[0] == 'vt': + assert 3 <= len(sq) <= 4, f'Invalid format of line {i}: {line}' + vt.append([float(e) for e in sq[1:]][:2]) + elif sq[0] == 'vn': + assert len(sq) == 4, f'Invalid format of line {i}: {line}' + vn.append([float(e) for e in sq[1:]]) + elif sq[0] == 'vp': + assert 2 <= len(sq) <= 4, f'Invalid format of line {i}: {line}' + vp.append(pad([float(e) for e in sq[1:]], 0)) + elif sq[0] == 'f': + spliting = [pad([int(j) - 1 for j in e.split('/')], -1) for e in sq[1:]] + f.append([e[0] for e in spliting]) + ft.append([e[1] for e in spliting]) + fn.append([e[2] for e in spliting]) + elif sq[0] == 'usemtl': + assert len(sq) == 2 + usemtl.append((sq[1], len(f))) + elif sq[0] == 'o': + assert len(sq) == 2 + o.append((sq[1], len(f))) + elif sq[0] == 's': + s.append((sq[1], len(f))) + elif sq[0] == 'mtllib': + assert len(sq) == 2 + mtllib.append(sq[1]) + elif sq[0][0] == '#': + continue + else: + if not ignore_unknown: + raise Exception(f'Unknown keyword {sq[0]}') + + min_poly_vertices = min(len(f) for f in f) + max_poly_vertices = max(len(f) for f in f) + + return { + 'mtllib': mtllib, + 'v': np.array(v, dtype=np.float32), + 'vt': np.array(vt, dtype=np.float32), + 'vn': np.array(vn, dtype=np.float32), + 'vp': np.array(vp, dtype=np.float32), + 'f': np.array(f, dtype=np.int32) if min_poly_vertices == max_poly_vertices else f, + 'ft': np.array(ft, dtype=np.int32) if min_poly_vertices == max_poly_vertices else ft, + 'fn': np.array(fn, dtype=np.int32) if min_poly_vertices == max_poly_vertices else fn, + 'o': o, + 's': s, + 'usemtl': usemtl, + } + + +def write_obj( + file: Union[str, Path], + obj: Dict[str, Any], + encoding: Union[str, None] = None + ): + with open(file, 'w', encoding=encoding) as fp: + for k in ['v', 'vt', 'vn', 'vp']: + if k not in obj: + continue + for v in obj[k]: + print(k, *map(float, v), file=fp) + for f in obj['f']: + print('f', *((str('/').join(map(int, i)) if isinstance(int(i), Iterable) else i) for i in f), file=fp) + + +def simple_write_obj( + file: Union[str, Path], + vertices: np.ndarray, + faces: np.ndarray, + encoding: Union[str, None] = None + ): + """ + Write wavefront .obj file, without preprocessing. + + Args: + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, 3] + file (Any): filepath + encoding (str, optional): + """ + with open(file, 'w', encoding=encoding) as fp: + for v in vertices: + print('v', *map(float, v), file=fp) + for f in faces: + print('f', *map(int, f + 1), file=fp) diff --git a/utils3d/utils3d/io/ply.py b/utils3d/utils3d/io/ply.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1fc337117304aa8637dca74afe0b022e24605f --- /dev/null +++ b/utils3d/utils3d/io/ply.py @@ -0,0 +1,104 @@ +import numpy as np + +from typing import * +from pathlib import Path + + +def read_ply( + file: Union[str, Path], + encoding: Union[str, None] = None, + ignore_unknown: bool = False +) -> Tuple[np.ndarray, np.ndarray]: + """ + Read .ply file, without preprocessing. + + Args: + file (Any): filepath + encoding (str, optional): + + Returns: + Tuple[np.ndarray, np.ndarray]: vertices, faces + """ + import plyfile + plydata = plyfile.PlyData.read(file) + vertices = np.stack([plydata['vertex'][k] for k in ['x', 'y', 'z']], axis=-1) + if 'face' in plydata: + faces = np.array(plydata['face']['vertex_indices'].tolist()) + else: + faces = None + return vertices, faces + + +def write_ply( + file: Union[str, Path], + vertices: np.ndarray, + faces: np.ndarray = None, + edges: np.ndarray = None, + vertex_colors: np.ndarray = None, + edge_colors: np.ndarray = None, + text: bool = False +): + """ + Write .ply file, without preprocessing. + + Args: + file (Any): filepath + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, E] + edges (np.ndarray): [E, 2] + vertex_colors (np.ndarray, optional): [N, 3]. Defaults to None. + edge_colors (np.ndarray, optional): [E, 3]. Defaults to None. + text (bool, optional): save data in text format. Defaults to False. + """ + import plyfile + assert vertices.ndim == 2 and vertices.shape[1] == 3 + vertices = vertices.astype(np.float32) + if faces is not None: + assert faces.ndim == 2 + faces = faces.astype(np.int32) + if edges is not None: + assert edges.ndim == 2 and edges.shape[1] == 2 + edges = edges.astype(np.int32) + + if vertex_colors is not None: + assert vertex_colors.ndim == 2 and vertex_colors.shape[1] == 3 + if vertex_colors.dtype in [np.float32, np.float64]: + vertex_colors = vertex_colors * 255 + vertex_colors = np.clip(vertex_colors, 0, 255).astype(np.uint8) + vertices_data = np.zeros(len(vertices), dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + vertices_data['x'] = vertices[:, 0] + vertices_data['y'] = vertices[:, 1] + vertices_data['z'] = vertices[:, 2] + vertices_data['red'] = vertex_colors[:, 0] + vertices_data['green'] = vertex_colors[:, 1] + vertices_data['blue'] = vertex_colors[:, 2] + else: + vertices_data = np.array([tuple(v) for v in vertices], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) + + if faces is not None: + faces_data = np.zeros(len(faces), dtype=[('vertex_indices', 'i4', (faces.shape[1],))]) + faces_data['vertex_indices'] = faces + + if edges is not None: + if edge_colors is not None: + assert edge_colors.ndim == 2 and edge_colors.shape[1] == 3 + if edge_colors.dtype in [np.float32, np.float64]: + edge_colors = edge_colors * 255 + edge_colors = np.clip(edge_colors, 0, 255).astype(np.uint8) + edges_data = np.zeros(len(edges), dtype=[('vertex1', 'i4'), ('vertex2', 'i4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + edges_data['vertex1'] = edges[:, 0] + edges_data['vertex2'] = edges[:, 1] + edges_data['red'] = edge_colors[:, 0] + edges_data['green'] = edge_colors[:, 1] + edges_data['blue'] = edge_colors[:, 2] + else: + edges_data = np.array([tuple(e) for e in edges], dtype=[('vertex1', 'i4'), ('vertex2', 'i4')]) + + ply_data = [plyfile.PlyElement.describe(vertices_data, 'vertex')] + if faces is not None: + ply_data.append(plyfile.PlyElement.describe(faces_data, 'face')) + if edges is not None: + ply_data.append(plyfile.PlyElement.describe(edges_data, 'edge')) + + plyfile.PlyData(ply_data, text=text).write(file) + \ No newline at end of file diff --git a/utils3d/utils3d/numpy/__init__.py b/utils3d/utils3d/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dedb9df3e445b62a9d75ed049b44e1ca007950f9 --- /dev/null +++ b/utils3d/utils3d/numpy/__init__.py @@ -0,0 +1,143 @@ +""" +3D utility functions workings with NumPy. +""" +import importlib +import itertools +import numpy +from typing import TYPE_CHECKING + + +__modules_all__ = { + 'mesh':[ + 'triangulate', + 'compute_face_normal', + 'compute_face_angle', + 'compute_vertex_normal', + 'compute_vertex_normal_weighted', + 'remove_corrupted_faces', + 'merge_duplicate_vertices', + 'remove_unreferenced_vertices', + 'subdivide_mesh_simple', + 'mesh_relations', + 'flatten_mesh_indices' + ], + 'quadmesh': [ + 'calc_quad_candidates', + 'calc_quad_distortion', + 'calc_quad_direction', + 'calc_quad_smoothness', + 'sovle_quad', + 'sovle_quad_qp', + 'tri_to_quad' + ], + 'utils': [ + 'sliding_window_1d', + 'sliding_window_nd', + 'sliding_window_2d', + 'max_pool_1d', + 'max_pool_2d', + 'max_pool_nd', + 'depth_edge', + 'normals_edge', + 'depth_aliasing', + 'interpolate', + 'image_scrcoord', + 'image_uv', + 'image_pixel_center', + 'image_pixel', + 'image_mesh', + 'image_mesh_from_depth', + 'depth_to_normals', + 'points_to_normals', + 'depth_to_points', + 'chessboard', + 'cube', + 'icosahedron', + 'square', + 'camera_frustum', + ], + 'transforms': [ + 'perspective', + 'perspective_from_fov', + 'perspective_from_fov_xy', + 'intrinsics_from_focal_center', + 'intrinsics_from_fov', + 'fov_to_focal', + 'focal_to_fov', + 'intrinsics_to_fov', + 'view_look_at', + 'extrinsics_look_at', + 'perspective_to_intrinsics', + 'perspective_to_near_far', + 'intrinsics_to_perspective', + 'extrinsics_to_view', + 'view_to_extrinsics', + 'normalize_intrinsics', + 'crop_intrinsics', + 'pixel_to_uv', + 'pixel_to_ndc', + 'uv_to_pixel', + 'project_depth', + 'depth_buffer_to_linear', + 'unproject_cv', + 'unproject_gl', + 'project_cv', + 'project_gl', + 'quaternion_to_matrix', + 'axis_angle_to_matrix', + 'matrix_to_quaternion', + 'extrinsics_to_essential', + 'euler_axis_angle_rotation', + 'euler_angles_to_matrix', + 'skew_symmetric', + 'rotation_matrix_from_vectors', + 'ray_intersection', + 'se3_matrix', + 'slerp_quaternion', + 'slerp_vector', + 'lerp', + 'lerp_se3_matrix', + 'piecewise_lerp', + 'piecewise_lerp_se3_matrix', + 'apply_transform' + ], + 'spline': [ + 'linear_spline_interpolate', + ], + 'rasterization': [ + 'RastContext', + 'rasterize_triangle_faces', + 'rasterize_edges', + 'texture', + 'warp_image_by_depth', + 'test_rasterization' + ], +} + + +__all__ = list(itertools.chain(*__modules_all__.values())) + +def __getattr__(name): + try: + return globals()[name] + except KeyError: + pass + + try: + module_name = next(m for m in __modules_all__ if name in __modules_all__[m]) + except StopIteration: + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + module = importlib.import_module(f'.{module_name}', __name__) + for key in __modules_all__[module_name]: + globals()[key] = getattr(module, key) + + return globals()[name] + + +if TYPE_CHECKING: + from .quadmesh import * + from .transforms import * + from .mesh import * + from .utils import * + from .rasterization import * + from .spline import * \ No newline at end of file diff --git a/utils3d/utils3d/numpy/_helpers.py b/utils3d/utils3d/numpy/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..04b008d115af9d0d5b801a46952079ad7687b124 --- /dev/null +++ b/utils3d/utils3d/numpy/_helpers.py @@ -0,0 +1,93 @@ +# decorator +import numpy as np +from numbers import Number +import inspect +from functools import wraps +from typing import * +from .._helpers import suppress_traceback + + +def get_args_order(func, args, kwargs): + """ + Get the order of the arguments of a function. + """ + names = inspect.getfullargspec(func).args + names_idx = {name: i for i, name in enumerate(names)} + args_order = [] + kwargs_order = {} + for name, arg in kwargs.items(): + if name in names: + kwargs_order[name] = names_idx[name] + names.remove(name) + for i, arg in enumerate(args): + if i < len(names): + args_order.append(names_idx[names[i]]) + return args_order, kwargs_order + + +def broadcast_args(args, kwargs, args_dim, kwargs_dim): + spatial = [] + for arg, arg_dim in zip(args + list(kwargs.values()), args_dim + list(kwargs_dim.values())): + if isinstance(arg, np.ndarray) and arg_dim is not None: + arg_spatial = arg.shape[:arg.ndim-arg_dim] + if len(arg_spatial) > len(spatial): + spatial = [1] * (len(arg_spatial) - len(spatial)) + spatial + for j in range(len(arg_spatial)): + if spatial[-j] < arg_spatial[-j]: + if spatial[-j] == 1: + spatial[-j] = arg_spatial[-j] + else: + raise ValueError("Cannot broadcast arguments.") + for i, arg in enumerate(args): + if isinstance(arg, np.ndarray) and args_dim[i] is not None: + args[i] = np.broadcast_to(arg, [*spatial, *arg.shape[arg.ndim-args_dim[i]:]]) + for key, arg in kwargs.items(): + if isinstance(arg, np.ndarray) and kwargs_dim[key] is not None: + kwargs[key] = np.broadcast_to(arg, [*spatial, *arg.shape[arg.ndim-kwargs_dim[key]:]]) + return args, kwargs, spatial + + +def batched(*dims): + """ + Decorator that allows a function to be called with batched arguments. + """ + def decorator(func): + @wraps(func) + @suppress_traceback + def wrapper(*args, **kwargs): + args = list(args) + # get arguments dimensions + args_order, kwargs_order = get_args_order(func, args, kwargs) + args_dim = [dims[i] for i in args_order] + kwargs_dim = {key: dims[i] for key, i in kwargs_order.items()} + # convert to numpy array + for i, arg in enumerate(args): + if isinstance(arg, (Number, list, tuple)) and args_dim[i] is not None: + args[i] = np.array(arg) + for key, arg in kwargs.items(): + if isinstance(arg, (Number, list, tuple)) and kwargs_dim[key] is not None: + kwargs[key] = np.array(arg) + # broadcast arguments + args, kwargs, spatial = broadcast_args(args, kwargs, args_dim, kwargs_dim) + for i, (arg, arg_dim) in enumerate(zip(args, args_dim)): + if isinstance(arg, np.ndarray) and arg_dim is not None: + args[i] = arg.reshape([-1, *arg.shape[arg.ndim-arg_dim:]]) + for key, arg in kwargs.items(): + if isinstance(arg, np.ndarray) and kwargs_dim[key] is not None: + kwargs[key] = arg.reshape([-1, *arg.shape[arg.ndim-kwargs_dim[key]:]]) + # call function + results = func(*args, **kwargs) + type_results = type(results) + results = list(results) if isinstance(results, (tuple, list)) else [results] + # restore spatial dimensions + for i, result in enumerate(results): + results[i] = result.reshape([*spatial, *result.shape[1:]]) + if type_results == tuple: + results = tuple(results) + elif type_results == list: + results = list(results) + else: + results = results[0] + return results + return wrapper + return decorator diff --git a/utils3d/utils3d/numpy/mesh.py b/utils3d/utils3d/numpy/mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..57fe6b635209bae3abbe43d998cebebefbed9075 --- /dev/null +++ b/utils3d/utils3d/numpy/mesh.py @@ -0,0 +1,355 @@ +import numpy as np +from typing import * +from ._helpers import batched + + +__all__ = [ + 'triangulate', + 'compute_face_normal', + 'compute_face_angle', + 'compute_vertex_normal', + 'compute_vertex_normal_weighted', + 'remove_corrupted_faces', + 'merge_duplicate_vertices', + 'remove_unreferenced_vertices', + 'subdivide_mesh_simple', + 'mesh_relations', + 'flatten_mesh_indices' +] + + +def triangulate( + faces: np.ndarray, + vertices: np.ndarray = None, + backslash: np.ndarray = None +) -> np.ndarray: + """ + Triangulate a polygonal mesh. + + Args: + faces (np.ndarray): [L, P] polygonal faces + vertices (np.ndarray, optional): [N, 3] 3-dimensional vertices. + If given, the triangulation is performed according to the distance + between vertices. Defaults to None. + backslash (np.ndarray, optional): [L] boolean array indicating + how to triangulate the quad faces. Defaults to None. + + Returns: + (np.ndarray): [L * (P - 2), 3] triangular faces + """ + if faces.shape[-1] == 3: + return faces + P = faces.shape[-1] + if vertices is not None: + assert faces.shape[-1] == 4, "now only support quad mesh" + if backslash is None: + backslash = np.linalg.norm(vertices[faces[:, 0]] - vertices[faces[:, 2]], axis=-1) < \ + np.linalg.norm(vertices[faces[:, 1]] - vertices[faces[:, 3]], axis=-1) + if backslash is None: + loop_indice = np.stack([ + np.zeros(P - 2, dtype=int), + np.arange(1, P - 1, 1, dtype=int), + np.arange(2, P, 1, dtype=int) + ], axis=1) + return faces[:, loop_indice].reshape((-1, 3)) + else: + assert faces.shape[-1] == 4, "now only support quad mesh" + faces = np.where( + backslash[:, None], + faces[:, [0, 1, 2, 0, 2, 3]], + faces[:, [0, 1, 3, 3, 1, 2]] + ).reshape((-1, 3)) + return faces + + +@batched(2, None) +def compute_face_normal( + vertices: np.ndarray, + faces: np.ndarray +) -> np.ndarray: + """ + Compute face normals of a triangular mesh + + Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + + Returns: + normals (np.ndarray): [..., T, 3] face normals + """ + normal = np.cross( + vertices[..., faces[:, 1], :] - vertices[..., faces[:, 0], :], + vertices[..., faces[:, 2], :] - vertices[..., faces[:, 0], :] + ) + normal_norm = np.linalg.norm(normal, axis=-1, keepdims=True) + normal_norm[normal_norm == 0] = 1 + normal /= normal_norm + return normal + + +@batched(2, None) +def compute_face_angle( + vertices: np.ndarray, + faces: np.ndarray, + eps: float = 1e-12 + ) -> np.ndarray: + """ + Compute face angles of a triangular mesh + + Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + + Returns: + angles (np.ndarray): [..., T, 3] face angles + """ + face_angle = np.zeros_like(faces, dtype=vertices.dtype) + for i in range(3): + edge1 = vertices[..., faces[:, (i + 1) % 3], :] - vertices[..., faces[:, i], :] + edge2 = vertices[..., faces[:, (i + 2) % 3], :] - vertices[..., faces[:, i], :] + face_angle[..., i] = np.arccos(np.sum( + edge1 / np.clip(np.linalg.norm(edge1, axis=-1, keepdims=True), eps, None) * + edge2 / np.clip(np.linalg.norm(edge2, axis=-1, keepdims=True), eps, None), + axis=-1 + )) + return face_angle + + +@batched(2, None, 2) +def compute_vertex_normal( + vertices: np.ndarray, + faces: np.ndarray, + face_normal: np.ndarray = None +) -> np.ndarray: + """ + Compute vertex normals of a triangular mesh by averaging neightboring face normals + TODO: can be improved. + + Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + face_normal (np.ndarray, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + + Returns: + normals (np.ndarray): [..., N, 3] vertex normals + """ + if face_normal is None: + face_normal = compute_face_normal(vertices, faces) + vertex_normal = np.zeros_like(vertices, dtype=vertices.dtype) + for n in range(vertices.shape[0]): + for i in range(3): + vertex_normal[n, :, 0] += np.bincount(faces[:, i], weights=face_normal[n, :, 0], minlength=vertices.shape[1]) + vertex_normal[n, :, 1] += np.bincount(faces[:, i], weights=face_normal[n, :, 1], minlength=vertices.shape[1]) + vertex_normal[n, :, 2] += np.bincount(faces[:, i], weights=face_normal[n, :, 2], minlength=vertices.shape[1]) + vertex_normal_norm = np.linalg.norm(vertex_normal, axis=-1, keepdims=True) + vertex_normal_norm[vertex_normal_norm == 0] = 1 + vertex_normal /= vertex_normal_norm + return vertex_normal + + +@batched(2, None, 2) +def compute_vertex_normal_weighted( + vertices: np.ndarray, + faces: np.ndarray, + face_normal: np.ndarray = None +) -> np.ndarray: + """ + Compute vertex normals of a triangular mesh by weighted sum of neightboring face normals + according to the angles + + Args: + vertices (np.ndarray): [..., N, 3] 3-dimensional vertices + faces (np.ndarray): [..., T, 3] triangular face indices + face_normal (np.ndarray, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + + Returns: + normals (np.ndarray): [..., N, 3] vertex normals + """ + if face_normal is None: + face_normal = compute_face_normal(vertices, faces) + face_angle = compute_face_angle(vertices, faces) + vertex_normal = np.zeros_like(vertices) + for n in range(vertices.shape[0]): + for i in range(3): + vertex_normal[n, :, 0] += np.bincount(faces[n, :, i], weights=face_normal[n, :, 0] * face_angle[n, :, i], minlength=vertices.shape[1]) + vertex_normal[n, :, 1] += np.bincount(faces[n, :, i], weights=face_normal[n, :, 1] * face_angle[n, :, i], minlength=vertices.shape[1]) + vertex_normal[n, :, 2] += np.bincount(faces[n, :, i], weights=face_normal[n, :, 2] * face_angle[n, :, i], minlength=vertices.shape[1]) + vertex_normal_norm = np.linalg.norm(vertex_normal, axis=-1, keepdims=True) + vertex_normal_norm[vertex_normal_norm == 0] = 1 + vertex_normal /= vertex_normal_norm + return vertex_normal + + +def remove_corrupted_faces( + faces: np.ndarray + ) -> np.ndarray: + """ + Remove corrupted faces (faces with duplicated vertices) + + Args: + faces (np.ndarray): [T, 3] triangular face indices + + Returns: + np.ndarray: [T_, 3] triangular face indices + """ + corrupted = (faces[:, 0] == faces[:, 1]) | (faces[:, 1] == faces[:, 2]) | (faces[:, 2] == faces[:, 0]) + return faces[~corrupted] + + +def merge_duplicate_vertices( + vertices: np.ndarray, + faces: np.ndarray, + tol: float = 1e-6 + ) -> Tuple[np.ndarray, np.ndarray]: + """ + Merge duplicate vertices of a triangular mesh. + Duplicate vertices are merged by selecte one of them, and the face indices are updated accordingly. + + Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + tol (float, optional): tolerance for merging. Defaults to 1e-6. + + Returns: + vertices (np.ndarray): [N_, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + """ + vertices_round = np.round(vertices / tol) + _, uni_i, uni_inv = np.unique(vertices_round, return_index=True, return_inverse=True, axis=0) + vertices = vertices[uni_i] + faces = uni_inv[faces] + return vertices, faces + + +def remove_unreferenced_vertices( + faces: np.ndarray, + *vertice_attrs, + return_indices: bool = False +) -> Tuple[np.ndarray, ...]: + """ + Remove unreferenced vertices of a mesh. + Unreferenced vertices are removed, and the face indices are updated accordingly. + + Args: + faces (np.ndarray): [T, P] face indices + *vertice_attrs: vertex attributes + + Returns: + faces (np.ndarray): [T, P] face indices + *vertice_attrs: vertex attributes + indices (np.ndarray, optional): [N] indices of vertices that are kept. Defaults to None. + """ + P = faces.shape[-1] + fewer_indices, inv_map = np.unique(faces, return_inverse=True) + faces = inv_map.astype(np.int32).reshape(-1, P) + ret = [faces] + for attr in vertice_attrs: + ret.append(attr[fewer_indices]) + if return_indices: + ret.append(fewer_indices) + return tuple(ret) + + +def subdivide_mesh_simple( + vertices: np.ndarray, + faces: np.ndarray, + n: int = 1 +) -> Tuple[np.ndarray, np.ndarray]: + """ + Subdivide a triangular mesh by splitting each triangle into 4 smaller triangles. + NOTE: All original vertices are kept, and new vertices are appended to the end of the vertex list. + + Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + n (int, optional): number of subdivisions. Defaults to 1. + + Returns: + vertices (np.ndarray): [N_, 3] subdivided 3-dimensional vertices + faces (np.ndarray): [4 * T, 3] subdivided triangular face indices + """ + for _ in range(n): + edges = np.stack([faces[:, [0, 1]], faces[:, [1, 2]], faces[:, [2, 0]]], axis=0) + edges = np.sort(edges, axis=2) + uni_edges, uni_inv = np.unique(edges.reshape(-1, 2), return_inverse=True, axis=0) + uni_inv = uni_inv.reshape(3, -1) + midpoints = (vertices[uni_edges[:, 0]] + vertices[uni_edges[:, 1]]) / 2 + + n_vertices = vertices.shape[0] + vertices = np.concatenate([vertices, midpoints], axis=0) + faces = np.concatenate([ + np.stack([faces[:, 0], n_vertices + uni_inv[0], n_vertices + uni_inv[2]], axis=1), + np.stack([faces[:, 1], n_vertices + uni_inv[1], n_vertices + uni_inv[0]], axis=1), + np.stack([faces[:, 2], n_vertices + uni_inv[2], n_vertices + uni_inv[1]], axis=1), + np.stack([n_vertices + uni_inv[0], n_vertices + uni_inv[1], n_vertices + uni_inv[2]], axis=1), + ], axis=0) + return vertices, faces + + +def mesh_relations( + faces: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + """ + Calculate the relation between vertices and faces. + NOTE: The input mesh must be a manifold triangle mesh. + + Args: + faces (np.ndarray): [T, 3] triangular face indices + + Returns: + edges (np.ndarray): [E, 2] edge indices + edge2face (np.ndarray): [E, 2] edge to face relation. The second column is -1 if the edge is boundary. + face2edge (np.ndarray): [T, 3] face to edge relation + face2face (np.ndarray): [T, 3] face to face relation + """ + T = faces.shape[0] + edges = np.stack([faces[:, [0, 1]], faces[:, [1, 2]], faces[:, [2, 0]]], axis=1).reshape(-1, 2) # [3T, 2] + edges = np.sort(edges, axis=1) # [3T, 2] + edges, face2edge, occurence = np.unique(edges, axis=0, return_inverse=True, return_counts=True) # [E, 2], [3T], [E] + E = edges.shape[0] + assert np.all(occurence <= 2), "The input mesh is not a manifold mesh." + + # Edge to face relation + padding = np.arange(E, dtype=np.int32)[occurence == 1] + padded_face2edge = np.concatenate([face2edge, padding], axis=0) # [2E] + edge2face = np.argsort(padded_face2edge, kind='stable').reshape(-1, 2) // 3 # [E, 2] + edge2face_valid = edge2face[:, 1] < T # [E] + edge2face[~edge2face_valid, 1] = -1 + + # Face to edge relation + face2edge = face2edge.reshape(-1, 3) # [T, 3] + + # Face to face relation + face2face = edge2face[face2edge] # [T, 3, 2] + face2face = face2face[face2face != np.arange(T)[:, None, None]].reshape(T, 3) # [T, 3] + + return edges, edge2face, face2edge, face2face + + +@overload +def flatten_mesh_indices(faces1: np.ndarray, attr1: np.ndarray, *other_faces_attrs_pairs: np.ndarray) -> Tuple[np.ndarray, ...]: + """ + Rearrange the indices of a mesh to a flattened version. Vertices will be no longer shared. + + ### Parameters: + - `faces1`: [T, P] face indices of the first attribute + - `attr1`: [N1, ...] attributes of the first mesh + - ... + + ### Returns: + - `faces`: [T, P] flattened face indices, contigous from 0 to T * P - 1 + - `attr1`: [T * P, ...] attributes of the first mesh, where every P values correspond to a face + _ ... + """ +def flatten_mesh_indices(*args: np.ndarray) -> Tuple[np.ndarray, ...]: + assert len(args) % 2 == 0, "The number of arguments must be even." + T, P = args[0].shape + assert all(arg.shape[0] == T and arg.shape[1] == P for arg in args[::2]), "The faces must have the same shape." + attr_flat = [] + for faces_, attr_ in zip(args[::2], args[1::2]): + attr_flat_ = attr_[faces_].reshape(-1, *attr_.shape[1:]) + attr_flat.append(attr_flat_) + faces_flat = np.arange(T * P, dtype=np.int32).reshape(T, P) + return faces_flat, *attr_flat \ No newline at end of file diff --git a/utils3d/utils3d/numpy/quadmesh.py b/utils3d/utils3d/numpy/quadmesh.py new file mode 100644 index 0000000000000000000000000000000000000000..0d0db80823116aac81996188c997f44ad19ee58e --- /dev/null +++ b/utils3d/utils3d/numpy/quadmesh.py @@ -0,0 +1,472 @@ +import numpy as np +import scipy as sp +import scipy.optimize as spopt +from typing import * + + +__all__ = [ + 'calc_quad_candidates', + 'calc_quad_distortion', + 'calc_quad_direction', + 'calc_quad_smoothness', + 'sovle_quad', + 'sovle_quad_qp', + 'tri_to_quad' +] + + +def calc_quad_candidates( + edges: np.ndarray, + face2edge: np.ndarray, + edge2face: np.ndarray, +): + """ + Calculate the candidate quad faces. + + Args: + edges (np.ndarray): [E, 2] edge indices + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + + Returns: + quads (np.ndarray): [Q, 4] quad candidate indices + quad2edge (np.ndarray): [Q, 4] edge to quad candidate relation + quad2adj (np.ndarray): [Q, 8] adjacent quad candidates of each quad candidate + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid + """ + E = edges.shape[0] + T = face2edge.shape[0] + + quads_valid = edge2face[:, 1] != -1 + Q = quads_valid.sum() + quad2face = edge2face[quads_valid] # [Q, 2] + quad2edge = face2edge[quad2face] # [Q, 2, 3] + flag = quad2edge == np.arange(E)[quads_valid][:, None, None] # [Q, 2, 3] + flag = flag.argmax(axis=-1) # [Q, 2] + quad2edge = np.stack([ + quad2edge[np.arange(Q)[:, None], np.arange(2)[None, :], (flag + 1) % 3], + quad2edge[np.arange(Q)[:, None], np.arange(2)[None, :], (flag + 2) % 3], + ], axis=-1).reshape(Q, 4) # [Q, 4] + + quads = np.concatenate([ + np.where( + (edges[quad2edge[:, 0:1], 1:] == edges[quad2edge[:, 1:2], :]).any(axis=-1), + edges[quad2edge[:, 0:1], [[0, 1]]], + edges[quad2edge[:, 0:1], [[1, 0]]], + ), + np.where( + (edges[quad2edge[:, 2:3], 1:] == edges[quad2edge[:, 3:4], :]).any(axis=-1), + edges[quad2edge[:, 2:3], [[0, 1]]], + edges[quad2edge[:, 2:3], [[1, 0]]], + ), + ], axis=1) # [Q, 4] + + quad2adj = edge2face[quad2edge] # [Q, 4, 2] + quad2adj = quad2adj[quad2adj != quad2face[:, [0,0,1,1], None]].reshape(Q, 4) # [Q, 4] + quad2adj_valid = quad2adj != -1 + quad2adj = face2edge[quad2adj] # [Q, 4, 3] + quad2adj[~quad2adj_valid, 0] = quad2edge[~quad2adj_valid] + quad2adj[~quad2adj_valid, 1:] = -1 + quad2adj = quad2adj[quad2adj != quad2edge[..., None]].reshape(Q, 8) # [Q, 8] + edge_valid = -np.ones(E, dtype=np.int32) + edge_valid[quads_valid] = np.arange(Q) + quad2adj_valid = quad2adj != -1 + quad2adj[quad2adj_valid] = edge_valid[quad2adj[quad2adj_valid]] # [Q, 8] + + return quads, quad2edge, quad2adj, quads_valid + + +def calc_quad_distortion( + vertices: np.ndarray, + quads: np.ndarray, +): + """ + Calculate the distortion of each candidate quad face. + + Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + quads (np.ndarray): [Q, 4] quad face indices + + Returns: + distortion (np.ndarray): [Q] distortion of each quad face + """ + edge0 = vertices[quads[:, 1]] - vertices[quads[:, 0]] # [Q, 3] + edge1 = vertices[quads[:, 2]] - vertices[quads[:, 1]] # [Q, 3] + edge2 = vertices[quads[:, 3]] - vertices[quads[:, 2]] # [Q, 3] + edge3 = vertices[quads[:, 0]] - vertices[quads[:, 3]] # [Q, 3] + cross = vertices[quads[:, 0]] - vertices[quads[:, 2]] # [Q, 3] + + len0 = np.maximum(np.linalg.norm(edge0, axis=-1), 1e-10) # [Q] + len1 = np.maximum(np.linalg.norm(edge1, axis=-1), 1e-10) # [Q] + len2 = np.maximum(np.linalg.norm(edge2, axis=-1), 1e-10) # [Q] + len3 = np.maximum(np.linalg.norm(edge3, axis=-1), 1e-10) # [Q] + len_cross = np.maximum(np.linalg.norm(cross, axis=-1), 1e-10) # [Q] + + angle0 = np.arccos(np.clip(np.sum(-edge0 * edge1, axis=-1) / (len0 * len1), -1, 1)) # [Q] + angle1 = np.arccos(np.clip(np.sum(-edge1 * cross, axis=-1) / (len1 * len_cross), -1, 1)) \ + + np.arccos(np.clip(np.sum(cross * edge2, axis=-1) / (len_cross * len2), -1, 1)) # [Q] + angle2 = np.arccos(np.clip(np.sum(-edge2 * edge3, axis=-1) / (len2 * len3), -1, 1)) # [Q] + angle3 = np.arccos(np.clip(np.sum(-edge3 * -cross, axis=-1) / (len3 * len_cross), -1, 1)) \ + + np.arccos(np.clip(np.sum(-cross * edge0, axis=-1) / (len_cross * len0), -1, 1)) # [Q] + + normal0 = np.cross(edge0, edge1) # [Q, 3] + normal1 = np.cross(edge2, edge3) # [Q, 3] + normal0 = normal0 / np.maximum(np.linalg.norm(normal0, axis=-1, keepdims=True), 1e-10) # [Q, 3] + normal1 = normal1 / np.maximum(np.linalg.norm(normal1, axis=-1, keepdims=True), 1e-10) # [Q, 3] + angle_normal = np.arccos(np.clip(np.sum(normal0 * normal1, axis=-1), -1, 1)) # [Q] + + D90 = np.pi / 2 + D180 = np.pi + D360 = np.pi * 2 + ang_eng = (np.abs(angle0 - D90)**2 + np.abs(angle1 - D90)**2 + np.abs(angle2 - D90)**2 + np.abs(angle3 - D90)**2) / 4 # [Q] + dist_eng = np.abs(angle0 - angle2)**2 / np.minimum(np.maximum(np.minimum(angle0, angle2), 1e-10), np.maximum(D180 - np.maximum(angle0, angle2), 1e-10)) \ + + np.abs(angle1 - angle3)**2 / np.minimum(np.maximum(np.minimum(angle1, angle3), 1e-10), np.maximum(D180 - np.maximum(angle1, angle3), 1e-10)) # [Q] + plane_eng = np.where(angle_normal < D90/2, np.abs(angle_normal)**2, 1e10) # [Q] + eng = ang_eng + 2 * dist_eng + 2 * plane_eng # [Q] + + return eng + + +def calc_quad_direction( + vertices: np.ndarray, + quads: np.ndarray, + ): + """ + Calculate the direction of each candidate quad face. + + Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + quads (np.ndarray): [Q, 4] quad face indices + + Returns: + direction (np.ndarray): [Q, 4] direction of each quad face. + Represented by the angle between the crossing and each edge. + """ + mid0 = (vertices[quads[:, 0]] + vertices[quads[:, 1]]) / 2 # [Q, 3] + mid1 = (vertices[quads[:, 1]] + vertices[quads[:, 2]]) / 2 # [Q, 3] + mid2 = (vertices[quads[:, 2]] + vertices[quads[:, 3]]) / 2 # [Q, 3] + mid3 = (vertices[quads[:, 3]] + vertices[quads[:, 0]]) / 2 # [Q, 3] + + cross0 = mid2 - mid0 # [Q, 3] + cross1 = mid3 - mid1 # [Q, 3] + cross0 = cross0 / np.maximum(np.linalg.norm(cross0, axis=-1, keepdims=True), 1e-10) # [Q, 3] + cross1 = cross1 / np.maximum(np.linalg.norm(cross1, axis=-1, keepdims=True), 1e-10) # [Q, 3] + + edge0 = vertices[quads[:, 1]] - vertices[quads[:, 0]] # [Q, 3] + edge1 = vertices[quads[:, 2]] - vertices[quads[:, 1]] # [Q, 3] + edge2 = vertices[quads[:, 3]] - vertices[quads[:, 2]] # [Q, 3] + edge3 = vertices[quads[:, 0]] - vertices[quads[:, 3]] # [Q, 3] + edge0 = edge0 / np.maximum(np.linalg.norm(edge0, axis=-1, keepdims=True), 1e-10) # [Q, 3] + edge1 = edge1 / np.maximum(np.linalg.norm(edge1, axis=-1, keepdims=True), 1e-10) # [Q, 3] + edge2 = edge2 / np.maximum(np.linalg.norm(edge2, axis=-1, keepdims=True), 1e-10) # [Q, 3] + edge3 = edge3 / np.maximum(np.linalg.norm(edge3, axis=-1, keepdims=True), 1e-10) # [Q, 3] + + direction = np.stack([ + np.arccos(np.clip(np.sum(cross0 * edge0, axis=-1), -1, 1)), + np.arccos(np.clip(np.sum(cross1 * edge1, axis=-1), -1, 1)), + np.arccos(np.clip(np.sum(-cross0 * edge2, axis=-1), -1, 1)), + np.arccos(np.clip(np.sum(-cross1 * edge3, axis=-1), -1, 1)), + ], axis=-1) # [Q, 4] + + return direction + + +def calc_quad_smoothness( + quad2edge: np.ndarray, + quad2adj: np.ndarray, + quads_direction: np.ndarray, + ): + """ + Calculate the smoothness of each candidate quad face connection. + + Args: + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_direction (np.ndarray): [Q, 4] direction of each quad face + + Returns: + smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection + """ + Q = quad2adj.shape[0] + quad2adj_valid = quad2adj != -1 + connections = np.stack([ + np.arange(Q)[:, None].repeat(8, axis=1), + quad2adj, + ], axis=-1)[quad2adj_valid] # [C, 2] + shared_edge_idx_0 = np.array([[0, 0, 1, 1, 2, 2, 3, 3]]).repeat(Q, axis=0)[quad2adj_valid] # [C] + shared_edge_idx_1 = np.argmax(quad2edge[quad2adj][quad2adj_valid] == quad2edge[connections[:, 0], shared_edge_idx_0][:, None], axis=-1) # [C] + valid_smoothness = np.abs(quads_direction[connections[:, 0], shared_edge_idx_0] - quads_direction[connections[:, 1], shared_edge_idx_1])**2 # [C] + smoothness = np.zeros([Q, 8], dtype=np.float32) + smoothness[quad2adj_valid] = valid_smoothness + return smoothness + + +def sovle_quad( + face2edge: np.ndarray, + edge2face: np.ndarray, + quad2adj: np.ndarray, + quads_distortion: np.ndarray, + quads_smoothness: np.ndarray, + quads_valid: np.ndarray, + ): + """ + Solve the quad mesh from the candidate quad faces. + + Args: + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_distortion (np.ndarray): [Q] distortion of each quad face + quads_smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid + + Returns: + weights (np.ndarray): [Q] weight of each valid quad face + """ + T = face2edge.shape[0] + E = edge2face.shape[0] + Q = quads_distortion.shape[0] + edge_valid = -np.ones(E, dtype=np.int32) + edge_valid[quads_valid] = np.arange(Q) + + quads_connection = np.stack([ + np.arange(Q)[:, None].repeat(8, axis=1), + quad2adj, + ], axis=-1)[quad2adj != -1] # [C, 2] + quads_connection = np.sort(quads_connection, axis=-1) # [C, 2] + quads_connection, quads_connection_idx = np.unique(quads_connection, axis=0, return_index=True) # [C, 2], [C] + quads_smoothness = quads_smoothness[quad2adj != -1] # [C] + quads_smoothness = quads_smoothness[quads_connection_idx] # [C] + C = quads_connection.shape[0] + + # Construct the linear programming problem + + # Variables: + # quads_weight: [Q] weight of each quad face + # tri_min_weight: [T] minimum weight of each triangle face + # conn_min_weight: [C] minimum weight of each quad face connection + # conn_max_weight: [C] maximum weight of each quad face connection + # Objective: + # mimi + + c = np.concatenate([ + quads_distortion - 3, + quads_smoothness*4 - 2, + quads_smoothness*4, + ], axis=0) # [Q+C] + + A_ub_triplet = np.concatenate([ + np.stack([np.arange(T), edge_valid[face2edge[:, 0]], np.ones(T)], axis=1), # [T, 3] + np.stack([np.arange(T), edge_valid[face2edge[:, 1]], np.ones(T)], axis=1), # [T, 3] + np.stack([np.arange(T), edge_valid[face2edge[:, 2]], np.ones(T)], axis=1), # [T, 3] + np.stack([np.arange(T, T+C), np.arange(Q, Q+C), np.ones(C)], axis=1), # [C, 3] + np.stack([np.arange(T, T+C), quads_connection[:, 0], -np.ones(C)], axis=1), # [C, 3] + np.stack([np.arange(T, T+C), quads_connection[:, 1], -np.ones(C)], axis=1), # [C, 3] + np.stack([np.arange(T+C, T+2*C), np.arange(Q+C, Q+2*C), -np.ones(C)], axis=1), # [C, 3] + np.stack([np.arange(T+C, T+2*C), quads_connection[:, 0], np.ones(C)], axis=1), # [C, 3] + np.stack([np.arange(T+C, T+2*C), quads_connection[:, 1], np.ones(C)], axis=1), # [C, 3] + ], axis=0) # [3T+6C, 3] + A_ub_triplet = A_ub_triplet[A_ub_triplet[:, 1] != -1] # [3T', 3] + A_ub = sp.sparse.coo_matrix((A_ub_triplet[:, 2], (A_ub_triplet[:, 0], A_ub_triplet[:, 1])), shape=[T+2*C, Q+2*C]) # [T, + b_ub = np.concatenate([np.ones(T), -np.ones(C), np.ones(C)], axis=0) # [T+2C] + bound = np.stack([ + np.concatenate([np.zeros(Q), -np.ones(C), np.zeros(C)], axis=0), + np.concatenate([np.ones(Q), np.ones(C), np.ones(C)], axis=0), + ], axis=1) # [Q+2C, 2] + A_eq = None + b_eq = None + + print('Solver statistics:') + print(f' #T = {T}') + print(f' #Q = {Q}') + print(f' #C = {C}') + + # Solve the linear programming problem + last_num_valid = 0 + for i in range(100): + res_ = spopt.linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bound) + if not res_.success: + print(f' Iter {i} | Failed with {res_.message}') + break + res = res_ + weights = res.x[:Q] + valid = (weights > 0.5) + num_valid = valid.sum() + print(f' Iter {i} | #Q_valid = {num_valid}') + if num_valid == last_num_valid: + break + last_num_valid = num_valid + A_eq_triplet = np.stack([ + np.arange(num_valid), + np.arange(Q)[valid], + np.ones(num_valid), + ], axis=1) # [num_valid, 3] + A_eq = sp.sparse.coo_matrix((A_eq_triplet[:, 2], (A_eq_triplet[:, 0], A_eq_triplet[:, 1])), shape=[num_valid, Q+2*C]) # [num_valid, Q+C] + b_eq = np.where(weights[valid] > 0.5, 1, 0) # [num_valid] + + # Return the result + quads_weight = res.x[:Q] + conn_min_weight = res.x[Q:Q+C] + conn_max_weight = res.x[Q+C:Q+2*C] + return quads_weight, conn_min_weight, conn_max_weight + + +def sovle_quad_qp( + face2edge: np.ndarray, + edge2face: np.ndarray, + quad2adj: np.ndarray, + quads_distortion: np.ndarray, + quads_smoothness: np.ndarray, + quads_valid: np.ndarray, + ): + """ + Solve the quad mesh from the candidate quad faces. + + Args: + face2edge (np.ndarray): [T, 3] face to edge relation + edge2face (np.ndarray): [E, 2] edge to face relation + quad2adj (np.ndarray): [Q, 8] adjacent quad faces of each quad face + quads_distortion (np.ndarray): [Q] distortion of each quad face + quads_smoothness (np.ndarray): [Q, 8] smoothness of each quad face connection + quads_valid (np.ndarray): [E] whether the quad corresponding to the edge is valid + + Returns: + weights (np.ndarray): [Q] weight of each valid quad face + """ + T = face2edge.shape[0] + E = edge2face.shape[0] + Q = quads_distortion.shape[0] + edge_valid = -np.ones(E, dtype=np.int32) + edge_valid[quads_valid] = np.arange(Q) + + # Construct the quadratic programming problem + C_smoothness_triplet = np.stack([ + np.arange(Q)[:, None].repeat(8, axis=1)[quad2adj != -1], + quad2adj[quad2adj != -1], + 5 * quads_smoothness[quad2adj != -1], + ], axis=-1) # [C, 3] + # C_smoothness_triplet = np.concatenate([ + # C_smoothness_triplet, + # np.stack([np.arange(Q), np.arange(Q), 20*np.ones(Q)], axis=1), + # ], axis=0) # [C+Q, 3] + C_smoothness = sp.sparse.coo_matrix((C_smoothness_triplet[:, 2], (C_smoothness_triplet[:, 0], C_smoothness_triplet[:, 1])), shape=[Q, Q]) # [Q, Q] + C_smoothness = C_smoothness.tocsc() + C_dist = quads_distortion - 20 # [Q] + + A_eq = sp.sparse.coo_matrix((np.zeros(Q), (np.zeros(Q), np.arange(Q))), shape=[1, Q]) # [1, Q]\ + A_eq = A_eq.tocsc() + b_eq = np.array([0]) + + A_ub_triplet = np.concatenate([ + np.stack([np.arange(T), edge_valid[face2edge[:, 0]], np.ones(T)], axis=1), # [T, 3] + np.stack([np.arange(T), edge_valid[face2edge[:, 1]], np.ones(T)], axis=1), # [T, 3] + np.stack([np.arange(T), edge_valid[face2edge[:, 2]], np.ones(T)], axis=1), # [T, 3] + ], axis=0) # [3T, 3] + A_ub_triplet = A_ub_triplet[A_ub_triplet[:, 1] != -1] # [3T', 3] + A_ub = sp.sparse.coo_matrix((A_ub_triplet[:, 2], (A_ub_triplet[:, 0], A_ub_triplet[:, 1])), shape=[T, Q]) # [T, Q] + A_ub = A_ub.tocsc() + b_ub = np.ones(T) + + lb = np.zeros(Q) + ub = np.ones(Q) + + import piqp + solver = piqp.SparseSolver() + solver.settings.verbose = True + solver.settings.compute_timings = True + solver.setup(C_smoothness, C_dist, A_eq, b_eq, A_ub, b_ub, lb, ub) + + status = solver.solve() + + # x = cp.Variable(Q) + # prob = cp.Problem( + # cp.Minimize(cp.quad_form(x, C_smoothness) + C_dist.T @ x), + # [ + # A_ub @ x <= b_ub, + # x >= 0, x <= 1, + # ] + # ) + + # # Solve the quadratic programming problem + # prob.solve(solver=cp.PIQP, verbose=True) + + # Return the result + weights = solver.result.x + return weights + + +def tri_to_quad( + vertices: np.ndarray, + faces: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray]: + """ + Convert a triangle mesh to a quad mesh. + NOTE: The input mesh must be a manifold mesh. + + Args: + vertices (np.ndarray): [N, 3] 3-dimensional vertices + faces (np.ndarray): [T, 3] triangular face indices + + Returns: + vertices (np.ndarray): [N_, 3] 3-dimensional vertices + faces (np.ndarray): [Q, 4] quad face indices + """ + raise NotImplementedError + + +if __name__ == '__main__': + import os + import sys + sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))) + import utils3d + import numpy as np + import cv2 + from vis import vis_edge_color + + file = 'miku' + + vertices, faces = utils3d.io.read_ply(f'test/assets/{file}.ply') + edges, edge2face, face2edge, face2face = calc_relations(faces) + quad_cands, quad2edge, quad2adj, quad_valid = calc_quad_candidates(edges, face2edge, edge2face) + distortion = calc_quad_distortion(vertices, quad_cands) + direction = calc_quad_direction(vertices, quad_cands) + smoothness = calc_quad_smoothness(quad2edge, quad2adj, direction) + boundary_edges = edges[edge2face[:, 1] == -1] + quads_weight, conn_min_weight, conn_max_weight = sovle_quad(face2edge, edge2face, quad2adj, distortion, smoothness, quad_valid) + quads = quad_cands[quads_weight > 0.5] + print('Mesh statistics') + print(f' #V = {vertices.shape[0]}') + print(f' #F = {faces.shape[0]}') + print(f' #E = {edges.shape[0]}') + print(f' #B = {boundary_edges.shape[0]}') + print(f' #Q_cand = {quad_cands.shape[0]}') + print(f' #Q = {quads.shape[0]}') + + utils3d.io.write_ply(f'test/assets/{file}_boundary_edges.ply', vertices=vertices, edges=boundary_edges) + utils3d.io.write_ply(f'test/assets/{file}_quad_candidates.ply', vertices=vertices, faces=quads) + + edge_colors = np.zeros([edges.shape[0], 3], dtype=np.uint8) + distortion = (distortion - distortion.min()) / (distortion.max() - distortion.min()) + distortion = (distortion * 255).astype(np.uint8) + edge_colors[quad_valid] = cv2.cvtColor(cv2.applyColorMap(distortion, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB).reshape(-1, 3) + utils3d.io.write_ply(f'test/assets/{file}_quad_candidates_distortion.ply', **vis_edge_color(vertices, edges, edge_colors)) + + edge_colors = np.zeros([edges.shape[0], 3], dtype=np.uint8) + edge_colors[quad_valid] = cv2.cvtColor(cv2.applyColorMap((quads_weight * 255).astype(np.uint8), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB).reshape(-1, 3) + utils3d.io.write_ply(f'test/assets/{file}_quad_candidates_weights.ply', **vis_edge_color(vertices, edges, edge_colors)) + utils3d.io.write_ply(f'test/assets/{file}_quad.ply', vertices=vertices, faces=quads) + + quad_centers = vertices[quad_cands].mean(axis=1) + conns = np.stack([ + np.arange(quad_cands.shape[0])[:, None].repeat(8, axis=1), + quad2adj, + ], axis=-1)[quad2adj != -1] # [C, 2] + conns, conns_idx = np.unique(np.sort(conns, axis=-1), axis=0, return_index=True) # [C, 2], [C] + smoothness = smoothness[quad2adj != -1][conns_idx] # [C] + conns_color = cv2.cvtColor(cv2.applyColorMap((smoothness * 255).astype(np.uint8), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB).reshape(-1, 3) + utils3d.io.write_ply(f'test/assets/{file}_quad_conn_smoothness.ply', **vis_edge_color(quad_centers, conns, conns_color)) + conns_color = cv2.cvtColor(cv2.applyColorMap((conn_min_weight * 255).astype(np.uint8), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB).reshape(-1, 3) + utils3d.io.write_ply(f'test/assets/{file}_quad_conn_min.ply', **vis_edge_color(quad_centers, conns, conns_color)) + conns_color = cv2.cvtColor(cv2.applyColorMap((conn_max_weight * 255).astype(np.uint8), cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB).reshape(-1, 3) + utils3d.io.write_ply(f'test/assets/{file}_quad_conn_max.ply', **vis_edge_color(quad_centers, conns, conns_color)) + + \ No newline at end of file diff --git a/utils3d/utils3d/numpy/rasterization.py b/utils3d/utils3d/numpy/rasterization.py new file mode 100644 index 0000000000000000000000000000000000000000..e893049bb3050443bb396fe33566b619979641c6 --- /dev/null +++ b/utils3d/utils3d/numpy/rasterization.py @@ -0,0 +1,469 @@ +import os +from typing import * + +import numpy as np +import moderngl + +from . import transforms, utils, mesh + + +__all__ = [ + 'RastContext', + 'rasterize_triangle_faces', + 'rasterize_edges', + 'texture', + 'test_rasterization', + 'warp_image_by_depth', +] + + +def map_np_dtype(dtype) -> str: + if dtype == int: + return 'i4' + elif dtype == np.uint8: + return 'u1' + elif dtype == np.uint32: + return 'u2' + elif dtype == np.float16: + return 'f2' + elif dtype == np.float32: + return 'f4' + + +def one_value(dtype): + if dtype == 'u1': + return 255 + elif dtype == 'u2': + return 65535 + else: + return 1 + + +class RastContext: + def __init__(self, *args, **kwargs): + """ + Create a moderngl context. + + Args: + See moderngl.create_context + """ + if len(args) == 1 and isinstance(args[0], moderngl.Context): + self.mgl_ctx = args[0] + else: + self.mgl_ctx = moderngl.create_context(*args, **kwargs) + self.__prog_src = {} + self.__prog = {} + + def program_vertex_attribute(self, n: int) -> moderngl.Program: + assert n in [1, 2, 3, 4], 'vertex attribute only supports channels 1, 2, 3, 4' + + if 'vertex_attribute_vsh' not in self.__prog_src: + with open(os.path.join(os.path.dirname(__file__), 'shaders', 'vertex_attribute.vsh'), 'r') as f: + self.__prog_src['vertex_attribute_vsh'] = f.read() + if 'vertex_attribute_fsh' not in self.__prog_src: + with open(os.path.join(os.path.dirname(__file__), 'shaders', 'vertex_attribute.fsh'), 'r') as f: + self.__prog_src['vertex_attribute_fsh'] = f.read() + + if f'vertex_attribute_{n}' not in self.__prog: + vsh = self.__prog_src['vertex_attribute_vsh'].replace('vecN', f'vec{n}') + fsh = self.__prog_src['vertex_attribute_fsh'].replace('vecN', f'vec{n}') + self.__prog[f'vertex_attribute_{n}'] = self.mgl_ctx.program(vertex_shader=vsh, fragment_shader=fsh) + + return self.__prog[f'vertex_attribute_{n}'] + + def program_texture(self, n: int) -> moderngl.Program: + assert n in [1, 2, 3, 4], 'texture only supports channels 1, 2, 3, 4' + + if 'texture_vsh' not in self.__prog_src: + with open(os.path.join(os.path.dirname(__file__), 'shaders', 'texture.vsh'), 'r') as f: + self.__prog_src['texture_vsh'] = f.read() + if 'texture_fsh' not in self.__prog_src: + with open(os.path.join(os.path.dirname(__file__), 'shaders', 'texture.fsh'), 'r') as f: + self.__prog_src['texture_fsh'] = f.read() + + if f'texture_{n}' not in self.__prog: + vsh = self.__prog_src['texture_vsh'].replace('vecN', f'vec{n}') + fsh = self.__prog_src['texture_fsh'].replace('vecN', f'vec{n}') + self.__prog[f'texture_{n}'] = self.mgl_ctx.program(vertex_shader=vsh, fragment_shader=fsh) + self.__prog[f'texture_{n}']['tex'] = 0 + self.__prog[f'texture_{n}']['uv'] = 1 + + return self.__prog[f'texture_{n}'] + + +def rasterize_triangle_faces( + ctx: RastContext, + vertices: np.ndarray, + faces: np.ndarray, + attr: np.ndarray, + width: int, + height: int, + transform: np.ndarray = None, + cull_backface: bool = True, + return_depth: bool = False, + image: np.ndarray = None, + depth: np.ndarray = None +) -> Tuple[np.ndarray, np.ndarray]: + """ + Rasterize vertex attribute. + + Args: + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, 3] + attr (np.ndarray): [N, C] + width (int): width of rendered image + height (int): height of rendered image + transform (np.ndarray): [4, 4] model-view-projection transformation matrix. + cull_backface (bool): whether to cull backface + image: (np.ndarray): [H, W, C] background image + depth: (np.ndarray): [H, W] background depth + + Returns: + image (np.ndarray): [H, W, C] rendered image + depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None. + """ + assert vertices.ndim == 2 and vertices.shape[1] == 3 + assert faces.ndim == 2 and faces.shape[1] == 3, f"Faces should be a 2D array with shape (T, 3), but got {faces.shape}" + assert attr.ndim == 2 and attr.shape[1] in [1, 2, 3, 4], f'Vertex attribute only supports channels 1, 2, 3, 4, but got {attr.shape}' + assert vertices.shape[0] == attr.shape[0] + assert vertices.dtype == np.float32 + assert faces.dtype == np.uint32 or faces.dtype == np.int32 + assert attr.dtype == np.float32, "Attribute should be float32" + assert transform is None or transform.shape == (4, 4), f"Transform should be a 4x4 matrix, but got {transform.shape}" + assert transform is None or transform.dtype == np.float32, f"Transform should be float32, but got {transform.dtype}" + if image is not None: + assert image.ndim == 3 and image.shape == (height, width, attr.shape[1]), f"Image should be a 3D array with shape (H, W, {attr.shape[1]}), but got {image.shape}" + assert image.dtype == np.float32, f"Image should be float32, but got {image.dtype}" + if depth is not None: + assert depth.ndim == 2 and depth.shape == (height, width), f"Depth should be a 2D array with shape (H, W), but got {depth.shape}" + assert depth.dtype == np.float32, f"Depth should be float32, but got {depth.dtype}" + + C = attr.shape[1] + prog = ctx.program_vertex_attribute(C) + + transform = np.eye(4, np.float32) if transform is None else transform + + # Create buffers + ibo = ctx.mgl_ctx.buffer(np.ascontiguousarray(faces, dtype='i4')) + vbo_vertices = ctx.mgl_ctx.buffer(np.ascontiguousarray(vertices, dtype='f4')) + vbo_attr = ctx.mgl_ctx.buffer(np.ascontiguousarray(attr, dtype='f4')) + vao = ctx.mgl_ctx.vertex_array( + prog, + [ + (vbo_vertices, '3f', 'i_position'), + (vbo_attr, f'{C}f', 'i_attr'), + ], + ibo, + mode=moderngl.TRIANGLES, + ) + + # Create framebuffer + image_tex = ctx.mgl_ctx.texture((width, height), C, dtype='f4', data=np.ascontiguousarray(image[::-1, :, :]) if image is not None else None) + depth_tex = ctx.mgl_ctx.depth_texture((width, height), data=np.ascontiguousarray(depth[::-1, :]) if depth is not None else None) + fbo = ctx.mgl_ctx.framebuffer( + color_attachments=[image_tex], + depth_attachment=depth_tex, + ) + + # Render + prog['u_mvp'].write(transform.transpose().copy().astype('f4')) + fbo.use() + fbo.viewport = (0, 0, width, height) + ctx.mgl_ctx.depth_func = '<' + if depth is None: + ctx.mgl_ctx.clear(depth=1.0) + ctx.mgl_ctx.enable(ctx.mgl_ctx.DEPTH_TEST) + if cull_backface: + ctx.mgl_ctx.enable(ctx.mgl_ctx.CULL_FACE) + else: + ctx.mgl_ctx.disable(ctx.mgl_ctx.CULL_FACE) + vao.render() + ctx.mgl_ctx.disable(ctx.mgl_ctx.DEPTH_TEST) + + # Read + image = np.zeros((height, width, C), dtype='f4') + image_tex.read_into(image) + image = image[::-1, :, :] + if return_depth: + depth = np.zeros((height, width), dtype='f4') + depth_tex.read_into(depth) + depth = depth[::-1, :] + else: + depth = None + + # Release + vao.release() + ibo.release() + vbo_vertices.release() + vbo_attr.release() + fbo.release() + image_tex.release() + depth_tex.release() + + return image, depth + + +def rasterize_edges( + ctx: RastContext, + vertices: np.ndarray, + edges: np.ndarray, + attr: np.ndarray, + width: int, + height: int, + transform: np.ndarray = None, + line_width: float = 1.0, + return_depth: bool = False, + image: np.ndarray = None, + depth: np.ndarray = None +) -> Tuple[np.ndarray, ...]: + """ + Rasterize vertex attribute. + + Args: + vertices (np.ndarray): [N, 3] + faces (np.ndarray): [T, 3] + attr (np.ndarray): [N, C] + width (int): width of rendered image + height (int): height of rendered image + transform (np.ndarray): [4, 4] model-view-projection matrix + line_width (float): width of line. Defaults to 1.0. NOTE: Values other than 1.0 may not work across all platforms. + cull_backface (bool): whether to cull backface + + Returns: + image (np.ndarray): [H, W, C] rendered image + depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None. + """ + assert vertices.ndim == 2 and vertices.shape[1] == 3 + assert edges.ndim == 2 and edges.shape[1] == 2, f"Edges should be a 2D array with shape (T, 2), but got {edges.shape}" + assert attr.ndim == 2 and attr.shape[1] in [1, 2, 3, 4], f'Vertex attribute only supports channels 1, 2, 3, 4, but got {attr.shape}' + assert vertices.shape[0] == attr.shape[0] + assert vertices.dtype == np.float32 + assert edges.dtype == np.uint32 or edges.dtype == np.int32 + assert attr.dtype == np.float32, "Attribute should be float32" + + C = attr.shape[1] + prog = ctx.program_vertex_attribute(C) + + transform = transform if transform is not None else np.eye(4, np.float32) + + # Create buffers + ibo = ctx.mgl_ctx.buffer(np.ascontiguousarray(edges, dtype='i4')) + vbo_vertices = ctx.mgl_ctx.buffer(np.ascontiguousarray(vertices, dtype='f4')) + vbo_attr = ctx.mgl_ctx.buffer(np.ascontiguousarray(attr, dtype='f4')) + vao = ctx.mgl_ctx.vertex_array( + prog, + [ + (vbo_vertices, '3f', 'i_position'), + (vbo_attr, f'{C}f', 'i_attr'), + ], + ibo, + mode=moderngl.LINES, + ) + + # Create framebuffer + image_tex = ctx.mgl_ctx.texture((width, height), C, dtype='f4', data=np.ascontiguousarray(image[::-1, :, :]) if image is not None else None) + depth_tex = ctx.mgl_ctx.depth_texture((width, height), data=np.ascontiguousarray(depth[::-1, :]) if depth is not None else None) + fbo = ctx.mgl_ctx.framebuffer( + color_attachments=[image_tex], + depth_attachment=depth_tex, + ) + + # Render + prog['u_mvp'].write(transform.transpose().copy().astype('f4')) + fbo.use() + fbo.viewport = (0, 0, width, height) + if depth is None: + ctx.mgl_ctx.clear(depth=1.0) + ctx.mgl_ctx.depth_func = '<' + ctx.mgl_ctx.enable(ctx.mgl_ctx.DEPTH_TEST) + ctx.mgl_ctx.line_width = line_width + vao.render() + ctx.mgl_ctx.disable(ctx.mgl_ctx.DEPTH_TEST) + + # Read + image = np.zeros((height, width, C), dtype='f4') + image_tex.read_into(image) + image = image[::-1, :, :] + if return_depth: + depth = np.zeros((height, width), dtype='f4') + depth_tex.read_into(depth) + depth = depth[::-1, :] + else: + depth = None + + # Release + vao.release() + ibo.release() + vbo_vertices.release() + vbo_attr.release() + fbo.release() + image_tex.release() + depth_tex.release() + + return image, depth + + +def texture( + ctx: RastContext, + uv: np.ndarray, + texture: np.ndarray, + interpolation: str= 'linear', + wrap: str = 'clamp' +) -> np.ndarray: + """ + Given an UV image, texturing from the texture map + """ + assert len(texture.shape) == 3 and 1 <= texture.shape[2] <= 4 + assert uv.shape[2] == 2 + height, width = uv.shape[:2] + texture_dtype = map_np_dtype(texture.dtype) + + # Create VAO + screen_quad_vbo = ctx.mgl_ctx.buffer(np.array([[-1, -1], [1, -1], [1, 1], [-1, 1]], dtype='f4')) + screen_quad_ibo = ctx.mgl_ctx.buffer(np.array([0, 1, 2, 0, 2, 3], dtype=np.int32)) + screen_quad_vao = ctx.mgl_ctx.vertex_array(ctx.program_texture(texture.shape[2]), [(screen_quad_vbo, '2f4', 'in_vert')], index_buffer=screen_quad_ibo, index_element_size=4) + + # Create texture, set filter and bind. TODO: min mag filter, mipmap + texture_tex = ctx.mgl_ctx.texture((texture.shape[1], texture.shape[0]), texture.shape[2], dtype=texture_dtype, data=np.ascontiguousarray(texture)) + if interpolation == 'linear': + texture_tex.filter = (moderngl.LINEAR, moderngl.LINEAR) + elif interpolation == 'nearest': + texture_tex.filter = (moderngl.NEAREST, moderngl.NEAREST) + texture_tex.use(location=0) + texture_uv = ctx.mgl_ctx.texture((width, height), 2, dtype='f4', data=np.ascontiguousarray(uv.astype('f4', copy=False))) + texture_uv.filter = (moderngl.NEAREST, moderngl.NEAREST) + texture_uv.use(location=1) + + # Create render buffer and frame buffer + rb = ctx.mgl_ctx.renderbuffer((uv.shape[1], uv.shape[0]), texture.shape[2], dtype=texture_dtype) + fbo = ctx.mgl_ctx.framebuffer(color_attachments=[rb]) + + # Render + fbo.use() + fbo.viewport = (0, 0, width, height) + ctx.mgl_ctx.disable(ctx.mgl_ctx.BLEND) + screen_quad_vao.render() + + # Read buffer + image_buffer = np.frombuffer(fbo.read(components=texture.shape[2], attachment=0, dtype=texture_dtype), dtype=texture_dtype).reshape((height, width, texture.shape[2])) + + # Release + texture_tex.release() + rb.release() + fbo.release() + + return image_buffer + + +def warp_image_by_depth( + ctx: RastContext, + src_depth: np.ndarray, + src_image: np.ndarray = None, + width: int = None, + height: int = None, + *, + extrinsics_src: np.ndarray = None, + extrinsics_tgt: np.ndarray = None, + intrinsics_src: np.ndarray = None, + intrinsics_tgt: np.ndarray = None, + near: float = 0.1, + far: float = 100.0, + cull_backface: bool = True, + ssaa: int = 1, + return_depth: bool = False, +) -> Tuple[np.ndarray, ...]: + """ + Warp image by depth map. + + Args: + ctx (RastContext): rasterizer context + src_depth (np.ndarray): [H, W] + src_image (np.ndarray, optional): [H, W, C]. The image to warp. Defaults to None (use uv coordinates). + width (int, optional): width of the output image. None to use depth map width. Defaults to None. + height (int, optional): height of the output image. None to use depth map height. Defaults to None. + extrinsics_src (np.ndarray, optional): extrinsics matrix of the source camera. Defaults to None (identity). + extrinsics_tgt (np.ndarray, optional): extrinsics matrix of the target camera. Defaults to None (identity). + intrinsics_src (np.ndarray, optional): intrinsics matrix of the source camera. Defaults to None (use the same as intrinsics_tgt). + intrinsics_tgt (np.ndarray, optional): intrinsics matrix of the target camera. Defaults to None (use the same as intrinsics_src). + cull_backface (bool, optional): whether to cull backface. Defaults to True. + ssaa (int, optional): super sampling anti-aliasing. Defaults to 1. + + Returns: + tgt_image (np.ndarray): [H, W, C] warped image (or uv coordinates if image is None). + tgt_depth (np.ndarray): [H, W] screen space depth, ranging from 0 to 1. If return_depth is False, it is None. + """ + assert src_depth.ndim == 2 + + if width is None: + width = src_depth.shape[1] + if height is None: + height = src_depth.shape[0] + if src_image is not None: + assert src_image.shape[-2:] == src_depth.shape[-2:], f'Shape of source image {src_image.shape} does not match shape of source depth {src_depth.shape}' + + # set up default camera parameters + extrinsics_src = np.eye(4) if extrinsics_src is None else extrinsics_src + extrinsics_tgt = np.eye(4) if extrinsics_tgt is None else extrinsics_tgt + intrinsics_src = intrinsics_tgt if intrinsics_src is None else intrinsics_src + intrinsics_tgt = intrinsics_src if intrinsics_tgt is None else intrinsics_tgt + + assert all(x is not None for x in [extrinsics_src, extrinsics_tgt, intrinsics_src, intrinsics_tgt]), "Make sure you have provided all the necessary camera parameters." + + # check shapes + assert extrinsics_src.shape == (4, 4) and extrinsics_tgt.shape == (4, 4) + assert intrinsics_src.shape == (3, 3) and intrinsics_tgt.shape == (3, 3) + + # convert to view and perspective matrices + view_tgt = transforms.extrinsics_to_view(extrinsics_tgt) + perspective_tgt = transforms.intrinsics_to_perspective(intrinsics_tgt, near=near, far=far) + + # unproject depth map + uv, faces = utils.image_mesh(*src_depth.shape[-2:]) + pts = transforms.unproject_cv(uv, src_depth.reshape(-1), extrinsics_src, intrinsics_src) + faces = mesh.triangulate(faces, vertices=pts) + + # rasterize attributes + if src_image is not None: + attr = src_image.reshape(-1, src_image.shape[-1]) + else: + attr = uv + + tgt_image, tgt_depth = rasterize_triangle_faces( + ctx, + pts, + faces, + attr, + width * ssaa, + height * ssaa, + transform=perspective_tgt @ view_tgt, + cull_backface=cull_backface, + return_depth=return_depth, + ) + + if ssaa > 1: + tgt_image = tgt_image.reshape(height, ssaa, width, ssaa, -1).mean(axis=(1, 3)) + tgt_depth = tgt_depth.reshape(height, ssaa, width, ssaa, -1).mean(axis=(1, 3)) if return_depth else None + + return tgt_image, tgt_depth + +def test_rasterization(ctx: RastContext): + """ + Test if rasterization works. It will render a cube with random colors and save it as a CHECKME.png file. + """ + vertices, faces = utils.cube(tri=True) + attr = np.random.rand(len(vertices), 3).astype(np.float32) + perspective = transforms.perspective(np.deg2rad(60), 1, 0.01, 100) + view = transforms.view_look_at(np.array([2, 2, 2]), np.array([0, 0, 0]), np.array([0, 1, 0])) + image, depth = rasterize_triangle_faces( + ctx, + vertices, + faces, + attr, + 512, 512, + transform=(perspective @ view).astype(np.float32), + cull_backface=False, + return_depth=True, + ) + import cv2 + cv2.imwrite('CHECKME.png', cv2.cvtColor((image.clip(0, 1) * 255).astype(np.uint8), cv2.COLOR_RGB2BGR)) + \ No newline at end of file diff --git a/utils3d/utils3d/numpy/shaders/texture.fsh b/utils3d/utils3d/numpy/shaders/texture.fsh new file mode 100644 index 0000000000000000000000000000000000000000..db6f0299ce72428af75f8c2a69e8b825481778c2 --- /dev/null +++ b/utils3d/utils3d/numpy/shaders/texture.fsh @@ -0,0 +1,11 @@ +#version 330 + +uniform sampler2D tex; +uniform sampler2D uv; + +in vec2 scr_coord; +out vecN tex_color; + +void main() { + tex_color = vecN(texture(tex, texture(uv, scr_coord).xy)); +} \ No newline at end of file diff --git a/utils3d/utils3d/numpy/shaders/texture.vsh b/utils3d/utils3d/numpy/shaders/texture.vsh new file mode 100644 index 0000000000000000000000000000000000000000..b177dde06fa91597c773db3f53c9002c392dbb25 --- /dev/null +++ b/utils3d/utils3d/numpy/shaders/texture.vsh @@ -0,0 +1,9 @@ + #version 330 core + +in vec2 in_vert; +out vec2 scr_coord; + +void main() { + scr_coord = in_vert * 0.5 + 0.5; + gl_Position = vec4(in_vert, 0., 1.); +} \ No newline at end of file diff --git a/utils3d/utils3d/numpy/shaders/vertex_attribute.fsh b/utils3d/utils3d/numpy/shaders/vertex_attribute.fsh new file mode 100644 index 0000000000000000000000000000000000000000..917cbd559680c288d9ea2eda7f91417a36b514bf --- /dev/null +++ b/utils3d/utils3d/numpy/shaders/vertex_attribute.fsh @@ -0,0 +1,9 @@ +#version 330 + +in vecN v_attr; + +out vecN f_attr; + +void main() { + f_attr = v_attr; +} diff --git a/utils3d/utils3d/numpy/shaders/vertex_attribute.vsh b/utils3d/utils3d/numpy/shaders/vertex_attribute.vsh new file mode 100644 index 0000000000000000000000000000000000000000..ecf69a084d3b391e1ceb24677dea429546e9bbc3 --- /dev/null +++ b/utils3d/utils3d/numpy/shaders/vertex_attribute.vsh @@ -0,0 +1,13 @@ +#version 330 + +uniform mat4 u_mvp; + +in vec3 i_position; +in vecN i_attr; + +out vecN v_attr; + +void main() { + gl_Position = u_mvp * vec4(i_position, 1.0); + v_attr = i_attr; +} diff --git a/utils3d/utils3d/numpy/spline.py b/utils3d/utils3d/numpy/spline.py new file mode 100644 index 0000000000000000000000000000000000000000..9a785c03e656d67cf1bcc8f30de341b7043669a8 --- /dev/null +++ b/utils3d/utils3d/numpy/spline.py @@ -0,0 +1,82 @@ +from typing import * + +import numpy as np + + +__all__ = ['linear_spline_interpolate'] + + +def linear_spline_interpolate(x: np.ndarray, t: np.ndarray, s: np.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> np.ndarray: + """ + Linear spline interpolation. + + ### Parameters: + - `x`: np.ndarray, shape (n, d): the values of data points. + - `t`: np.ndarray, shape (n,): the times of the data points. + - `s`: np.ndarray, shape (m,): the times to be interpolated. + - `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + + ### Returns: + - `y`: np.ndarray, shape (..., m, d): the interpolated values. + """ + i = np.searchsorted(t, s, side='left') + if extrapolation_mode == 'constant': + prev = np.clip(i - 1, 0, len(t) - 1) + suc = np.clip(i, 0, len(t) - 1) + elif extrapolation_mode == 'linear': + prev = np.clip(i - 1, 0, len(t) - 2) + suc = np.clip(i, 1, len(t) - 1) + else: + raise ValueError(f'Invalid extrapolation_mode: {extrapolation_mode}') + + u = (s - t[prev]) / np.maximum(t[suc] - t[prev], 1e-12) + y = u * x[suc] + (1 - u) * x[prev] + + return y + + + +def _solve_tridiagonal(a: np.ndarray, b: np.ndarray, c: np.ndarray, d: np.ndarray) -> np.ndarray: + n = b.shape[-1] + cc = np.zeros_like(b) + dd = np.zeros_like(b) + cc[..., 0] = c[..., 0] / b[..., 0] + dd[..., 0] = d[..., 0] / b[..., 0] + for i in range(1, n): + cc[..., i] = c[..., i] / (b[..., i] - a[..., i - 1] * cc[..., i - 1]) + dd[..., i] = (d[..., i] - a[..., i - 1] * dd[..., i - 1]) / (b[..., i] - a[..., i - 1] * cc[..., i - 1]) + x = np.zeros_like(b) + x[..., -1] = dd[..., -1] + for i in range(n - 2, -1, -1): + x[..., i] = dd[..., i] - cc[..., i] * x[..., i + 1] + return x + + +def cubic_spline_interpolate(x: np.ndarray, t: np.ndarray, s: np.ndarray, v0: np.ndarray = None, vn: np.ndarray = None) -> np.ndarray: + """ + Cubic spline interpolation. + + ### Parameters: + - `x`: np.ndarray, shape (..., n,): the x-coordinates of the data points. + - `t`: np.ndarray, shape (n,): the knot vector. NOTE: t must be sorted in ascending order. + - `s`: np.ndarray, shape (..., m,): the y-coordinates of the data points. + - `v0`: np.ndarray, shape (...,): the value of the derivative at the first knot, as the boundary condition. If None, it is set to zero. + - `vn`: np.ndarray, shape (...,): the value of the derivative at the last knot, as the boundary condition. If None, it is set to zero. + + ### Returns: + - `y`: np.ndarray, shape (..., m): the interpolated values. + """ + h = t[..., 1:] - t[..., :-1] + mu = h[..., :-1] / (h[..., :-1] + h[..., 1:]) + la = 1 - mu + d = (x[..., 1:] - x[..., :-1]) / h + d = 6 * (d[..., 1:] - d[..., :-1]) / (t[..., 2:] - t[..., :-2]) + + mu = np.concatenate([mu, np.ones_like(mu[..., :1])], axis=-1) + la = np.concatenate([np.ones_like(la[..., :1]), la], axis=-1) + d = np.concatenate([(((x[..., 1] - x[..., 0]) / h[0] - v0) / h[0])[..., None], d, ((vn - (x[..., -1] - x[..., -2]) / h[-1]) / h[-1])[..., None]], axis=-1) + + M = _solve_tridiagonal(mu, np.full_like(d, fill_value=2), la, d) + + i = np.searchsorted(t, s, side='left') + diff --git a/utils3d/utils3d/numpy/transforms.py b/utils3d/utils3d/numpy/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..b01f1680b42dc0fee36fa4ac50362f2010b81f77 --- /dev/null +++ b/utils3d/utils3d/numpy/transforms.py @@ -0,0 +1,1104 @@ +import numpy as np +from typing import * +from numbers import Number +from ._helpers import batched +from .._helpers import no_warnings + + +__all__ = [ + 'perspective', + 'perspective_from_fov', + 'perspective_from_fov_xy', + 'intrinsics_from_focal_center', + 'intrinsics_from_fov', + 'fov_to_focal', + 'focal_to_fov', + 'intrinsics_to_fov', + 'view_look_at', + 'extrinsics_look_at', + 'perspective_to_intrinsics', + 'perspective_to_near_far', + 'intrinsics_to_perspective', + 'extrinsics_to_view', + 'view_to_extrinsics', + 'normalize_intrinsics', + 'crop_intrinsics', + 'pixel_to_uv', + 'pixel_to_ndc', + 'uv_to_pixel', + 'project_depth', + 'depth_buffer_to_linear', + 'unproject_cv', + 'unproject_gl', + 'project_cv', + 'project_gl', + 'quaternion_to_matrix', + 'axis_angle_to_matrix', + 'matrix_to_quaternion', + 'extrinsics_to_essential', + 'euler_axis_angle_rotation', + 'euler_angles_to_matrix', + 'skew_symmetric', + 'rotation_matrix_from_vectors', + 'ray_intersection', + 'se3_matrix', + 'slerp_quaternion', + 'slerp_vector', + 'lerp', + 'lerp_se3_matrix', + 'piecewise_lerp', + 'piecewise_lerp_se3_matrix', + 'apply_transform' +] + + +@batched(0,0,0,0) +def perspective( + fov_y: Union[float, np.ndarray], + aspect: Union[float, np.ndarray], + near: Union[float, np.ndarray], + far: Union[float, np.ndarray] +) -> np.ndarray: + """ + Get OpenGL perspective matrix + + Args: + fov_y (float | np.ndarray): field of view in y axis + aspect (float | np.ndarray): aspect ratio + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + + Returns: + (np.ndarray): [..., 4, 4] perspective matrix + """ + N = fov_y.shape[0] + ret = np.zeros((N, 4, 4), dtype=fov_y.dtype) + ret[:, 0, 0] = 1. / (np.tan(fov_y / 2) * aspect) + ret[:, 1, 1] = 1. / (np.tan(fov_y / 2)) + ret[:, 2, 2] = (near + far) / (near - far) + ret[:, 2, 3] = 2. * near * far / (near - far) + ret[:, 3, 2] = -1. + return ret + + +def perspective_from_fov( + fov: Union[float, np.ndarray], + width: Union[int, np.ndarray], + height: Union[int, np.ndarray], + near: Union[float, np.ndarray], + far: Union[float, np.ndarray] +) -> np.ndarray: + """ + Get OpenGL perspective matrix from field of view in largest dimension + + Args: + fov (float | np.ndarray): field of view in largest dimension + width (int | np.ndarray): image width + height (int | np.ndarray): image height + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + + Returns: + (np.ndarray): [..., 4, 4] perspective matrix + """ + fov_y = 2 * np.arctan(np.tan(fov / 2) * height / np.maximum(width, height)) + aspect = width / height + return perspective(fov_y, aspect, near, far) + + +def perspective_from_fov_xy( + fov_x: Union[float, np.ndarray], + fov_y: Union[float, np.ndarray], + near: Union[float, np.ndarray], + far: Union[float, np.ndarray] +) -> np.ndarray: + """ + Get OpenGL perspective matrix from field of view in x and y axis + + Args: + fov_x (float | np.ndarray): field of view in x axis + fov_y (float | np.ndarray): field of view in y axis + near (float | np.ndarray): near plane to clip + far (float | np.ndarray): far plane to clip + + Returns: + (np.ndarray): [..., 4, 4] perspective matrix + """ + aspect = np.tan(fov_x / 2) / np.tan(fov_y / 2) + return perspective(fov_y, aspect, near, far) + + +def intrinsics_from_focal_center( + fx: Union[float, np.ndarray], + fy: Union[float, np.ndarray], + cx: Union[float, np.ndarray], + cy: Union[float, np.ndarray], + dtype: Optional[np.dtype] = np.float32 +) -> np.ndarray: + """ + Get OpenCV intrinsics matrix + + Returns: + (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix + """ + if any(isinstance(x, np.ndarray) for x in (fx, fy, cx, cy)): + dtype = np.result_type(fx, fy, cx, cy) + fx, fy, cx, cy = np.broadcast_arrays(fx, fy, cx, cy) + ret = np.zeros((*fx.shape, 3, 3), dtype=dtype) + ret[..., 0, 0] = fx + ret[..., 1, 1] = fy + ret[..., 0, 2] = cx + ret[..., 1, 2] = cy + ret[..., 2, 2] = 1. + return ret + + +def intrinsics_from_fov( + fov_max: Union[float, np.ndarray] = None, + fov_min: Union[float, np.ndarray] = None, + fov_x: Union[float, np.ndarray] = None, + fov_y: Union[float, np.ndarray] = None, + width: Union[int, np.ndarray] = None, + height: Union[int, np.ndarray] = None, +) -> np.ndarray: + """ + Get normalized OpenCV intrinsics matrix from given field of view. + You can provide either fov_max, fov_min, fov_x or fov_y + + Args: + width (int | np.ndarray): image width + height (int | np.ndarray): image height + fov_max (float | np.ndarray): field of view in largest dimension + fov_min (float | np.ndarray): field of view in smallest dimension + fov_x (float | np.ndarray): field of view in x axis + fov_y (float | np.ndarray): field of view in y axis + + Returns: + (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix + """ + if fov_max is not None: + fx = np.maximum(width, height) / width / (2 * np.tan(fov_max / 2)) + fy = np.maximum(width, height) / height / (2 * np.tan(fov_max / 2)) + elif fov_min is not None: + fx = np.minimum(width, height) / width / (2 * np.tan(fov_min / 2)) + fy = np.minimum(width, height) / height / (2 * np.tan(fov_min / 2)) + elif fov_x is not None and fov_y is not None: + fx = 1 / (2 * np.tan(fov_x / 2)) + fy = 1 / (2 * np.tan(fov_y / 2)) + elif fov_x is not None: + fx = 1 / (2 * np.tan(fov_x / 2)) + fy = fx * width / height + elif fov_y is not None: + fy = 1 / (2 * np.tan(fov_y / 2)) + fx = fy * height / width + cx = 0.5 + cy = 0.5 + ret = intrinsics_from_focal_center(fx, fy, cx, cy) + return ret + + +def focal_to_fov(focal: np.ndarray): + return 2 * np.arctan(0.5 / focal) + + +def fov_to_focal(fov: np.ndarray): + return 0.5 / np.tan(fov / 2) + + +def intrinsics_to_fov(intrinsics: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + fov_x = focal_to_fov(intrinsics[..., 0, 0]) + fov_y = focal_to_fov(intrinsics[..., 1, 1]) + return fov_x, fov_y + + +@batched(1,1,1) +def view_look_at( + eye: np.ndarray, + look_at: np.ndarray, + up: np.ndarray + ) -> np.ndarray: + """ + Get OpenGL view matrix looking at something + + Args: + eye (np.ndarray): [..., 3] the eye position + look_at (np.ndarray): [..., 3] the position to look at + up (np.ndarray): [..., 3] head up direction (y axis in screen space). Not necessarily othogonal to view direction + + Returns: + (np.ndarray): [..., 4, 4], view matrix + """ + z = eye - look_at + x = np.cross(up, z) + y = np.cross(z, x) + # x = np.cross(y, z) + x = x / np.linalg.norm(x, axis=-1, keepdims=True) + y = y / np.linalg.norm(y, axis=-1, keepdims=True) + z = z / np.linalg.norm(z, axis=-1, keepdims=True) + R = np.stack([x, y, z], axis=-2) + t = -np.matmul(R, eye[..., None]) + return np.concatenate([ + np.concatenate([R, t], axis=-1), + np.array([[[0., 0., 0., 1.]]]).repeat(eye.shape[0], axis=0) + ], axis=-2) + + +@batched(1,1,1) +def extrinsics_look_at( + eye: np.ndarray, + look_at: np.ndarray, + up: np.ndarray +) -> np.ndarray: + """ + Get OpenCV extrinsics matrix looking at something + + Args: + eye (np.ndarray): [..., 3] the eye position + look_at (np.ndarray): [..., 3] the position to look at + up (np.ndarray): [..., 3] head up direction (-y axis in screen space). Not necessarily othogonal to view direction + + Returns: + (np.ndarray): [..., 4, 4], extrinsics matrix + """ + z = look_at - eye + x = np.cross(-up, z) + y = np.cross(z, x) + # x = np.cross(y, z) + x = x / np.linalg.norm(x, axis=-1, keepdims=True) + y = y / np.linalg.norm(y, axis=-1, keepdims=True) + z = z / np.linalg.norm(z, axis=-1, keepdims=True) + R = np.stack([x, y, z], axis=-2) + t = -np.matmul(R, eye[..., None]) + return np.concatenate([ + np.concatenate([R, t], axis=-1), + np.array([[[0., 0., 0., 1.]]], dtype=eye.dtype).repeat(eye.shape[0], axis=0) + ], axis=-2) + + +def perspective_to_intrinsics( + perspective: np.ndarray +) -> np.ndarray: + """ + OpenGL perspective matrix to OpenCV intrinsics + + Args: + perspective (np.ndarray): [..., 4, 4] OpenGL perspective matrix + + Returns: + (np.ndarray): shape [..., 3, 3] OpenCV intrinsics + """ + ret = np.array([[0.5, 0., 0.5], [0., -0.5, 0.5], [0., 0., 1.]], dtype=perspective.dtype) \ + @ perspective[..., [0, 1, 3], :3] \ + @ np.diag(np.array([1, -1, -1], dtype=perspective.dtype)) + return ret + + +def perspective_to_near_far(perspective: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """ + Get near and far planes from OpenGL perspective matrix + + Args: + """ + a, b = perspective[..., 2, 2], perspective[..., 2, 3] + near, far = b / (a - 1), b / (a + 1) + return near, far + + +@batched(2,0,0) +def intrinsics_to_perspective( + intrinsics: np.ndarray, + near: Union[float, np.ndarray], + far: Union[float, np.ndarray], +) -> np.ndarray: + """ + OpenCV intrinsics to OpenGL perspective matrix + NOTE: not work for tile-shifting intrinsics currently + + Args: + intrinsics (np.ndarray): [..., 3, 3] OpenCV intrinsics matrix + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip + Returns: + (np.ndarray): [..., 4, 4] OpenGL perspective matrix + """ + N = intrinsics.shape[0] + fx, fy = intrinsics[:, 0, 0], intrinsics[:, 1, 1] + cx, cy = intrinsics[:, 0, 2], intrinsics[:, 1, 2] + ret = np.zeros((N, 4, 4), dtype=intrinsics.dtype) + ret[:, 0, 0] = 2 * fx + ret[:, 1, 1] = 2 * fy + ret[:, 0, 2] = -2 * cx + 1 + ret[:, 1, 2] = 2 * cy - 1 + ret[:, 2, 2] = (near + far) / (near - far) + ret[:, 2, 3] = 2. * near * far / (near - far) + ret[:, 3, 2] = -1. + return ret + + +@batched(2) +def extrinsics_to_view( + extrinsics: np.ndarray + ) -> np.ndarray: + """ + OpenCV camera extrinsics to OpenGL view matrix + + Args: + extrinsics (np.ndarray): [..., 4, 4] OpenCV camera extrinsics matrix + + Returns: + (np.ndarray): [..., 4, 4] OpenGL view matrix + """ + return extrinsics * np.array([1, -1, -1, 1], dtype=extrinsics.dtype)[:, None] + + +@batched(2) +def view_to_extrinsics( + view: np.ndarray + ) -> np.ndarray: + """ + OpenGL view matrix to OpenCV camera extrinsics + + Args: + view (np.ndarray): [..., 4, 4] OpenGL view matrix + + Returns: + (np.ndarray): [..., 4, 4] OpenCV camera extrinsics matrix + """ + return view * np.array([1, -1, -1, 1], dtype=view.dtype)[:, None] + + +@batched(2, 0, 0, None) +def normalize_intrinsics( + intrinsics: np.ndarray, + width: Union[int, np.ndarray], + height: Union[int, np.ndarray], + integer_pixel_centers: bool = True +) -> np.ndarray: + """ + Normalize intrinsics from pixel cooridnates to uv coordinates + + Args: + intrinsics (np.ndarray): [..., 3, 3] camera intrinsics(s) to normalize + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + integer_pixel_centers (bool): whether the integer pixel coordinates are at the center of the pixel. If False, the integer coordinates are at the left-top corner of the pixel. + + Returns: + (np.ndarray): [..., 3, 3] normalized camera intrinsics(s) + """ + zeros = np.zeros_like(width) + ones = np.ones_like(width) + if integer_pixel_centers: + transform = np.stack([ + 1 / width, zeros, 0.5 / width, + zeros, 1 / height, 0.5 / height, + zeros, zeros, ones + ]).reshape(*zeros.shape, 3, 3) + else: + transform = np.stack([ + 1 / width, zeros, zeros, + zeros, 1 / height, zeros, + zeros, zeros, ones + ]).reshape(*zeros.shape, 3, 3) + return transform @ intrinsics + + +@batched(2,0,0,0,0,0,0) +def crop_intrinsics( + intrinsics: np.ndarray, + width: Union[int, np.ndarray], + height: Union[int, np.ndarray], + left: Union[int, np.ndarray], + top: Union[int, np.ndarray], + crop_width: Union[int, np.ndarray], + crop_height: Union[int, np.ndarray] +) -> np.ndarray: + """ + Evaluate the new intrinsics(s) after crop the image: cropped_img = img[top:top+crop_height, left:left+crop_width] + + Args: + intrinsics (np.ndarray): [..., 3, 3] camera intrinsics(s) to crop + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + left (int | np.ndarray): [...] left crop boundary + top (int | np.ndarray): [...] top crop boundary + crop_width (int | np.ndarray): [...] crop width + crop_height (int | np.ndarray): [...] crop height + + Returns: + (np.ndarray): [..., 3, 3] cropped camera intrinsics(s) + """ + zeros = np.zeros_like(width) + ones = np.ones_like(width) + transform = np.stack([ + width / crop_width, zeros, -left / crop_width, + zeros, height / crop_height, -top / crop_height, + zeros, zeros, ones + ]).reshape(*zeros.shape, 3, 3) + return transform @ intrinsics + + +@batched(1,0,0) +def pixel_to_uv( + pixel: np.ndarray, + width: Union[int, np.ndarray], + height: Union[int, np.ndarray] +) -> np.ndarray: + """ + Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + + Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + """ + if not np.issubdtype(pixel.dtype, np.floating): + pixel = pixel.astype(np.float32) + dtype = pixel.dtype + uv = (pixel + np.array(0.5, dtype=dtype)) / np.stack([width, height], axis=-1) + return uv + + +@batched(1,0,0) +def uv_to_pixel( + uv: np.ndarray, + width: Union[int, np.ndarray], + height: Union[int, np.ndarray] +) -> np.ndarray: + """ + Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + + Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + """ + pixel = uv * np.stack([width, height], axis=-1).astype(uv.dtype) - 0.5 + return pixel + + +@batched(1,0,0) +def pixel_to_ndc( + pixel: np.ndarray, + width: Union[int, np.ndarray], + height: Union[int, np.ndarray] +) -> np.ndarray: + """ + Args: + pixel (np.ndarray): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | np.ndarray): [...] image width(s) + height (int | np.ndarray): [...] image height(s) + + Returns: + (np.ndarray): [..., 2] pixel coordinrates defined in ndc space, the range is (-1, 1) + """ + if not np.issubdtype(pixel.dtype, np.floating): + pixel = pixel.astype(np.float32) + dtype = pixel.dtype + ndc = (pixel + np.array(0.5, dtype=dtype)) / (np.stack([width, height], dim=-1) * np.array([2, -2], dtype=dtype)) \ + + np.array([-1, 1], dtype=dtype) + return ndc + + +@batched(0,0,0) +def project_depth( + depth: np.ndarray, + near: Union[float, np.ndarray], + far: Union[float, np.ndarray] +) -> np.ndarray: + """ + Project linear depth to depth value in screen space + + Args: + depth (np.ndarray): [...] depth value + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip + + Returns: + (np.ndarray): [..., 1] depth value in screen space, value ranging in [0, 1] + """ + return (far - near * far / depth) / (far - near) + + +@batched(0,0,0) +def depth_buffer_to_linear( + depth_buffer: np.ndarray, + near: Union[float, np.ndarray], + far: Union[float, np.ndarray] +) -> np.ndarray: + """ + OpenGL depth buffer to linear depth + + Args: + depth_buffer (np.ndarray): [...] depth value + near (float | np.ndarray): [...] near plane to clip + far (float | np.ndarray): [...] far plane to clip + + Returns: + (np.ndarray): [..., 1] linear depth + """ + return near * far / (far - (far - near) * depth_buffer) + + +@batched(2,2,2,2) +def project_gl( + points: np.ndarray, + model: np.ndarray = None, + view: np.ndarray = None, + perspective: np.ndarray = None + ) -> Tuple[np.ndarray, np.ndarray]: + """ + Project 3D points to 2D following the OpenGL convention (except for row major matrice) + + Args: + points (np.ndarray): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + model (np.ndarray): [..., 4, 4] model matrix + view (np.ndarray): [..., 4, 4] view matrix + perspective (np.ndarray): [..., 4, 4] perspective matrix + + Returns: + scr_coord (np.ndarray): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + linear_depth (np.ndarray): [..., N] linear depth + """ + assert perspective is not None, "perspective matrix is required" + if points.shape[-1] == 3: + points = np.concatenate([points, np.ones_like(points[..., :1])], axis=-1) + if model is not None: + points = points @ model.swapaxes(-1, -2) + if view is not None: + points = points @ view.swapaxes(-1, -2) + clip_coord = points @ perspective.swapaxes(-1, -2) + ndc_coord = clip_coord[..., :3] / clip_coord[..., 3:] + scr_coord = ndc_coord * 0.5 + 0.5 + linear_depth = clip_coord[..., 3] + return scr_coord, linear_depth + + +@batched(2,2,2) +def project_cv( + points: np.ndarray, + extrinsics: np.ndarray = None, + intrinsics: np.ndarray = None + ) -> Tuple[np.ndarray, np.ndarray]: + """ + Project 3D points to 2D following the OpenCV convention + + Args: + points (np.ndarray): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + extrinsics (np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics (np.ndarray): [..., 3, 3] intrinsics matrix + + Returns: + uv_coord (np.ndarray): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + linear_depth (np.ndarray): [..., N] linear depth + """ + assert intrinsics is not None, "intrinsics matrix is required" + if points.shape[-1] == 3: + points = np.concatenate([points, np.ones_like(points[..., :1])], axis=-1) + if extrinsics is not None: + points = points @ extrinsics.swapaxes(-1, -2) + points = points[..., :3] @ intrinsics.swapaxes(-1, -2) + with no_warnings(): + uv_coord = points[..., :2] / points[..., 2:] + linear_depth = points[..., 2] + return uv_coord, linear_depth + + +@batched(2,2,2,2) +def unproject_gl( + screen_coord: np.ndarray, + model: np.ndarray = None, + view: np.ndarray = None, + perspective: np.ndarray = None + ) -> np.ndarray: + """ + Unproject screen space coordinates to 3D view space following the OpenGL convention (except for row major matrice) + + Args: + screen_coord (np.ndarray): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + model (np.ndarray): [..., 4, 4] model matrix + view (np.ndarray): [..., 4, 4] view matrix + perspective (np.ndarray): [..., 4, 4] perspective matrix + + Returns: + points (np.ndarray): [..., N, 3] 3d points + """ + assert perspective is not None, "perspective matrix is required" + ndc_xy = screen_coord * 2 - 1 + clip_coord = np.concatenate([ndc_xy, np.ones_like(ndc_xy[..., :1])], axis=-1) + transform = perspective + if view is not None: + transform = transform @ view + if model is not None: + transform = transform @ model + transform = np.linalg.inv(transform) + points = clip_coord @ transform.swapaxes(-1, -2) + points = points[..., :3] / points[..., 3:] + return points + + +@batched(2,1,2,2) +def unproject_cv( + uv_coord: np.ndarray, + depth: np.ndarray = None, + extrinsics: np.ndarray = None, + intrinsics: np.ndarray = None +) -> np.ndarray: + """ + Unproject uv coordinates to 3D view space following the OpenCV convention + + Args: + uv_coord (np.ndarray): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + depth (np.ndarray): [..., N] depth value + extrinsics (np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics (np.ndarray): [..., 3, 3] intrinsics matrix + + Returns: + points (np.ndarray): [..., N, 3] 3d points + """ + assert intrinsics is not None, "intrinsics matrix is required" + points = np.concatenate([uv_coord, np.ones_like(uv_coord[..., :1])], axis=-1) + points = points @ np.linalg.inv(intrinsics).swapaxes(-1, -2) + if depth is not None: + points = points * depth[..., None] + if extrinsics is not None: + points = np.concatenate([points, np.ones_like(points[..., :1])], axis=-1) + points = (points @ np.linalg.inv(extrinsics).swapaxes(-1, -2))[..., :3] + return points + + +def quaternion_to_matrix(quaternion: np.ndarray, eps: float = 1e-12) -> np.ndarray: + """Converts a batch of quaternions (w, x, y, z) to rotation matrices + + Args: + quaternion (np.ndarray): shape (..., 4), the quaternions to convert + + Returns: + np.ndarray: shape (..., 3, 3), the rotation matrices corresponding to the given quaternions + """ + assert quaternion.shape[-1] == 4 + quaternion = quaternion / np.linalg.norm(quaternion, axis=-1, keepdims=True).clip(min=eps) + w, x, y, z = quaternion[..., 0], quaternion[..., 1], quaternion[..., 2], quaternion[..., 3] + zeros = np.zeros_like(w) + I = np.eye(3, dtype=quaternion.dtype) + xyz = quaternion[..., 1:] + A = xyz[..., :, None] * xyz[..., None, :] - I * (xyz ** 2).sum(axis=-1)[..., None, None] + B = np.stack([ + zeros, -z, y, + z, zeros, -x, + -y, x, zeros + ], axis=-1).reshape(*quaternion.shape[:-1], 3, 3) + rot_mat = I + 2 * (A + w[..., None, None] * B) + return rot_mat + + +def matrix_to_quaternion(rot_mat: np.ndarray, eps: float = 1e-12) -> np.ndarray: + """Convert 3x3 rotation matrix to quaternion (w, x, y, z) + + Args: + rot_mat (np.ndarray): shape (..., 3, 3), the rotation matrices to convert + + Returns: + np.ndarray: shape (..., 4), the quaternions corresponding to the given rotation matrices + """ + # Extract the diagonal and off-diagonal elements of the rotation matrix + m00, m01, m02, m10, m11, m12, m20, m21, m22 = [rot_mat[..., i, j] for i in range(3) for j in range(3)] + + diag = np.diagonal(rot_mat, axis1=-2, axis2=-1) + M = np.array([ + [1, 1, 1], + [1, -1, -1], + [-1, 1, -1], + [-1, -1, 1] + ], dtype=rot_mat.dtype) + wxyz = 0.5 * np.clip(1 + diag @ M.T, 0.0, None) ** 0.5 + max_idx = np.argmax(wxyz, axis=-1) + xw = np.sign(m21 - m12) + yw = np.sign(m02 - m20) + zw = np.sign(m10 - m01) + yz = np.sign(m21 + m12) + xz = np.sign(m02 + m20) + xy = np.sign(m01 + m10) + ones = np.ones_like(xw) + sign = np.where( + max_idx[..., None] == 0, + np.stack([ones, xw, yw, zw], axis=-1), + np.where( + max_idx[..., None] == 1, + np.stack([xw, ones, xy, xz], axis=-1), + np.where( + max_idx[..., None] == 2, + np.stack([yw, xy, ones, yz], axis=-1), + np.stack([zw, xz, yz, ones], axis=-1) + ) + ) + ) + quat = sign * wxyz + quat = quat / np.linalg.norm(quat, axis=-1, keepdims=True).clip(min=eps) + return quat + + +def extrinsics_to_essential(extrinsics: np.ndarray): + """ + extrinsics matrix `[[R, t] [0, 0, 0, 1]]` such that `x' = R (x - t)` to essential matrix such that `x' E x = 0` + + Args: + extrinsics (np.ndaray): [..., 4, 4] extrinsics matrix + + Returns: + (np.ndaray): [..., 3, 3] essential matrix + """ + assert extrinsics.shape[-2:] == (4, 4) + R = extrinsics[..., :3, :3] + t = extrinsics[..., :3, 3] + zeros = np.zeros_like(t[..., 0]) + t_x = np.stack([ + zeros, -t[..., 2], t[..., 1], + t[..., 2], zeros, -t[..., 0], + -t[..., 1], t[..., 0], zeros + ]).reshape(*t.shape[:-1], 3, 3) + return t_x @ R + + +def euler_axis_angle_rotation(axis: str, angle: np.ndarray) -> np.ndarray: + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = np.cos(angle) + sin = np.sin(angle) + one = np.ones_like(angle) + zero = np.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError("letter must be either X, Y or Z.") + + return np.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: np.ndarray, convention: str = 'XYZ') -> np.ndarray: + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as ndarray of shape (..., 3), XYZ + convention: permutation of "X", "Y" or "Z", representing the order of Euler rotations to apply. + + Returns: + Rotation matrices as ndarray of shape (..., 3, 3). + """ + if euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = [ + euler_axis_angle_rotation(c, euler_angles[..., 'XYZ'.index(c)]) + for c in convention + ] + return matrices[2] @ matrices[1] @ matrices[0] + + +def skew_symmetric(v: np.ndarray): + "Skew symmetric matrix from a 3D vector" + assert v.shape[-1] == 3, "v must be 3D" + x, y, z = v[..., 0], v[..., 1], v[..., 2] + zeros = np.zeros_like(x) + return np.stack([ + zeros, -z, y, + z, zeros, -x, + -y, x, zeros, + ], axis=-1).reshape(*v.shape[:-1], 3, 3) + + +def rotation_matrix_from_vectors(v1: np.ndarray, v2: np.ndarray): + "Rotation matrix that rotates v1 to v2" + I = np.eye(3, dtype=v1.dtype) + v1 = v1 / np.linalg.norm(v1, axis=-1) + v2 = v2 / np.linalg.norm(v2, axis=-1) + v = np.cross(v1, v2, axis=-1) + c = np.sum(v1 * v2, axis=-1) + K = skew_symmetric(v) + R = I + K + (1 / (1 + c)).astype(v1.dtype)[None, None] * (K @ K) # Avoid numpy's default type casting for scalars + return R + + +def axis_angle_to_matrix(axis_angle: np.ndarray, eps: float = 1e-12) -> np.ndarray: + """Convert axis-angle representation (rotation vector) to rotation matrix, whose direction is the axis of rotation and length is the angle of rotation + + Args: + axis_angle (np.ndarray): shape (..., 3), axis-angle vcetors + + Returns: + np.ndarray: shape (..., 3, 3) The rotation matrices for the given axis-angle parameters + """ + batch_shape = axis_angle.shape[:-1] + dtype = axis_angle.dtype + + angle = np.linalg.norm(axis_angle, axis=-1, keepdims=True) + axis = axis_angle / (angle + eps) + + cos = np.cos(angle)[..., None, :] + sin = np.sin(angle)[..., None, :] + + rx, ry, rz = np.split(axis, 3, axis=-1) + zeros = np.zeros((*batch_shape, 1), dtype=dtype) + K = np.concatenate([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], axis=-1).reshape((*batch_shape, 3, 3)) + + ident = np.eye(3, dtype=dtype) + rot_mat = ident + sin * K + (1 - cos) * (K @ K) + return rot_mat + + +def ray_intersection(p1: np.ndarray, d1: np.ndarray, p2: np.ndarray, d2: np.ndarray): + """ + Compute the intersection/closest point of two D-dimensional rays + If the rays are intersecting, the closest point is the intersection point. + + Args: + p1 (np.ndarray): (..., D) origin of ray 1 + d1 (np.ndarray): (..., D) direction of ray 1 + p2 (np.ndarray): (..., D) origin of ray 2 + d2 (np.ndarray): (..., D) direction of ray 2 + + Returns: + (np.ndarray): (..., N) intersection point + """ + p1, d1, p2, d2 = np.broadcast_arrays(p1, d1, p2, d2) + dtype = p1.dtype + dim = p1.shape[-1] + d = np.stack([d1, d2], axis=-2) # (..., 2, D) + p = np.stack([p1, p2], axis=-2) # (..., 2, D) + A = np.concatenate([ + (np.eye(dim, dtype=dtype) * np.ones((*p.shape[:-2], 2, 1, 1))).reshape(*d.shape[:-2], 2 * dim, dim), # (..., 2 * D, D) + -(np.eye(2, dtype=dtype)[..., None] * d[..., None, :]).swapaxes(-2, -1).reshape(*d.shape[:-2], 2 * dim, 2) # (..., 2 * D, 2) + ], axis=-1) # (..., 2 * D, D + 2) + b = p.reshape(*p.shape[:-2], 2 * dim) # (..., 2 * D) + x = np.linalg.solve(A.swapaxes(-1, -2) @ A + 1e-12 * np.eye(dim + 2, dtype=dtype), (A.swapaxes(-1, -2) @ b[..., :, None]))[..., 0] + return x[..., :dim], (x[..., dim], x[..., dim + 1]) + + +def se3_matrix(R: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Convert rotation matrix and translation vector to 4x4 transformation matrix. + + Args: + R (np.ndarray): [..., 3, 3] rotation matrix + t (np.ndarray): [..., 3] translation vector + + Returns: + np.ndarray: [..., 4, 4] transformation matrix + """ + assert R.shape[:-2] == t.shape[:-1] + assert R.shape[-1] == 3 and R.shape[-2] == 3 + return np.concatenate([ + np.concatenate([R, t[..., None]], axis=-1), + np.concatenate([np.zeros_like(t), np.ones_like(t[..., :1])], axis=-1)[..., None, :] + ], axis=-2) + + +def slerp_quaternion(q1: np.ndarray, q2: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Spherical linear interpolation between two unit quaternions. + + Args: + q1 (np.ndarray): [..., d] unit vector 1 + q2 (np.ndarray): [..., d] unit vector 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + + Returns: + np.ndarray: [..., 3] interpolated unit vector + """ + q1 = q1 / np.linalg.norm(q1, axis=-1, keepdims=True) + q2 = q2 / np.linalg.norm(q2, axis=-1, keepdims=True) + dot = np.sum(q1 * q2, axis=-1, keepdims=True) + + dot = np.where(dot < 0, -dot, dot) # handle negative dot product + + dot = np.minimum(dot, 1.) + theta = np.arccos(dot) * t + + q_ortho = q2 - q1 * dot + q_ortho = q_ortho / np.maximum(np.linalg.norm(q_ortho, axis=-1, keepdims=True), 1e-12) + q = q1 * np.cos(theta) + q_ortho * np.sin(theta) + return q + + +def slerp_rotation_matrix(R1: np.ndarray, R2: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Spherical linear interpolation between two rotation matrices. + + Args: + R1 (np.ndarray): [..., 3, 3] rotation matrix 1 + R2 (np.ndarray): [..., 3, 3] rotation matrix 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + + Returns: + np.ndarray: [..., 3, 3] interpolated rotation matrix + """ + quat1 = matrix_to_quaternion(R1) + quat2 = matrix_to_quaternion(R2) + quat = slerp_quaternion(quat1, quat2, t) + return quaternion_to_matrix(quat) + + +def slerp_vector(v1: np.ndarray, v2: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Spherical linear interpolation between two unit vectors. The vectors are assumed to be normalized. + + Args: + v1 (np.ndarray): [..., d] unit vector 1 + v2 (np.ndarray): [..., d] unit vector 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + + Returns: + np.ndarray: [..., d] interpolated unit vector + """ + dot = np.sum(v1 * v2, axis=-1, keepdims=True) + + dot = np.minimum(dot, 1.) + theta = np.arccos(dot) * t + + v_ortho = v2 - v1 * dot + v_ortho = v_ortho / np.maximum(np.linalg.norm(v_ortho, axis=-1, keepdims=True), 1e-12) + v = v1 * np.cos(theta) + v_ortho * np.sin(theta) + return v + + +def lerp(x1: np.ndarray, x2: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Linear interpolation between two vectors. + + Args: + x1 (np.ndarray): [..., d] vector 1 + x2 (np.ndarray): [..., d] vector 2 + t (np.ndarray): [...] interpolation parameter. [0, 1] for interpolation between x1 and x2, otherwise for extrapolation. + + Returns: + np.ndarray: [..., d] interpolated vector + """ + return x1 + np.asarray(t)[..., None] * (x2 - x1) + + +def lerp_se3_matrix(T1: np.ndarray, T2: np.ndarray, t: np.ndarray) -> np.ndarray: + """ + Linear interpolation between two SE(3) matrices. + + Args: + T1 (np.ndarray): [..., 4, 4] SE(3) matrix 1 + T2 (np.ndarray): [..., 4, 4] SE(3) matrix 2 + t (np.ndarray): [...] interpolation parameter in [0, 1] + + Returns: + np.ndarray: [..., 4, 4] interpolated SE(3) matrix + """ + R1 = T1[..., :3, :3] + R2 = T2[..., :3, :3] + trans1 = T1[..., :3, 3] + trans2 = T2[..., :3, 3] + R = slerp_rotation_matrix(R1, R2, t) + trans = lerp(trans1, trans2, t) + return se3_matrix(R, trans) + + +def piecewise_lerp(x: np.ndarray, t: np.ndarray, s: np.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> np.ndarray: + """ + Linear spline interpolation. + + ### Parameters: + - `x`: np.ndarray, shape (n, d): the values of data points. + - `t`: np.ndarray, shape (n,): the times of the data points. + - `s`: np.ndarray, shape (m,): the times to be interpolated. + - `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + + ### Returns: + - `y`: np.ndarray, shape (..., m, d): the interpolated values. + """ + i = np.searchsorted(t, s, side='left') + if extrapolation_mode == 'constant': + prev = np.clip(i - 1, 0, len(t) - 1) + suc = np.clip(i, 0, len(t) - 1) + elif extrapolation_mode == 'linear': + prev = np.clip(i - 1, 0, len(t) - 2) + suc = np.clip(i, 1, len(t) - 1) + else: + raise ValueError(f'Invalid extrapolation_mode: {extrapolation_mode}') + + u = (s - t[prev]) / np.maximum(t[suc] - t[prev], 1e-12) + y = lerp(x[prev], x[suc], u) + + return y + + +def piecewise_lerp_se3_matrix(T: np.ndarray, t: np.ndarray, s: np.ndarray, extrapolation_mode: Literal['constant', 'linear'] = 'constant') -> np.ndarray: + """ + Linear spline interpolation for SE(3) matrices. + + ### Parameters: + - `T`: np.ndarray, shape (n, 4, 4): the SE(3) matrices. + - `t`: np.ndarray, shape (n,): the times of the data points. + - `s`: np.ndarray, shape (m,): the times to be interpolated. + - `extrapolation_mode`: str, the mode of extrapolation. 'constant' means extrapolate the boundary values, 'linear' means extrapolate linearly. + + ### Returns: + - `T_interp`: np.ndarray, shape (..., m, 4, 4): the interpolated SE(3) matrices. + """ + i = np.searchsorted(t, s, side='left') + if extrapolation_mode == 'constant': + prev = np.clip(i - 1, 0, len(t) - 1) + suc = np.clip(i, 0, len(t) - 1) + elif extrapolation_mode == 'linear': + prev = np.clip(i - 1, 0, len(t) - 2) + suc = np.clip(i, 1, len(t) - 1) + else: + raise ValueError(f'Invalid extrapolation_mode: {extrapolation_mode}') + + u = (s - t[prev]) / np.maximum(t[suc] - t[prev], 1e-12) + T = lerp_se3_matrix(T[prev], T[suc], u) + + return T + + +def apply_transform(T: np.ndarray, x: np.ndarray) -> np.ndarray: + """ + Apply SE(3) transformation to a point or a set of points. + + ### Parameters: + - `T`: np.ndarray, shape (..., 4, 4): the SE(3) matrix. + - `x`: np.ndarray, shape (..., 3): the point or a set of points to be transformed. + + ### Returns: + - `x_transformed`: np.ndarray, shape (..., 3): the transformed point or a set of points. + """ + x = np.asarray(x) + assert x.shape[-1] == 3 + T = np.asarray(T) + assert T.shape[-2:] == (4, 4) + x_transformed = (T[..., :3, :3] @ x[..., :, None]) + T[..., :3, 3][..., None] + return x_transformed[..., 0] \ No newline at end of file diff --git a/utils3d/utils3d/numpy/utils.py b/utils3d/utils3d/numpy/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6dde6b936220122cc0c633edaec560d589daeb22 --- /dev/null +++ b/utils3d/utils3d/numpy/utils.py @@ -0,0 +1,652 @@ +import numpy as np +from typing import * +from numbers import Number +import warnings +import functools + +from ._helpers import batched +from .._helpers import no_warnings +from . import transforms +from . import mesh + +__all__ = [ + 'sliding_window_1d', + 'sliding_window_nd', + 'sliding_window_2d', + 'max_pool_1d', + 'max_pool_2d', + 'max_pool_nd', + 'depth_edge', + 'normals_edge', + 'depth_aliasing', + 'interpolate', + 'image_scrcoord', + 'image_uv', + 'image_pixel_center', + 'image_pixel', + 'image_mesh', + 'image_mesh_from_depth', + 'points_to_normals', + 'points_to_normals', + 'depth_to_points', + 'depth_to_normals', + 'chessboard', + 'cube', + 'icosahedron', + 'square', + 'camera_frustum', + 'to4x4' +] + + + +def sliding_window_1d(x: np.ndarray, window_size: int, stride: int, axis: int = -1): + """ + Return x view of the input array with x sliding window of the given kernel size and stride. + The sliding window is performed over the given axis, and the window dimension is append to the end of the output array's shape. + + Args: + x (np.ndarray): input array with shape (..., axis_size, ...) + kernel_size (int): size of the sliding window + stride (int): stride of the sliding window + axis (int): axis to perform sliding window over + + Returns: + a_sliding (np.ndarray): view of the input array with shape (..., n_windows, ..., kernel_size), where n_windows = (axis_size - kernel_size + 1) // stride + """ + assert x.shape[axis] >= window_size, f"kernel_size ({window_size}) is larger than axis_size ({x.shape[axis]})" + axis = axis % x.ndim + shape = (*x.shape[:axis], (x.shape[axis] - window_size + 1) // stride, *x.shape[axis + 1:], window_size) + strides = (*x.strides[:axis], stride * x.strides[axis], *x.strides[axis + 1:], x.strides[axis]) + x_sliding = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) + return x_sliding + + +def sliding_window_nd(x: np.ndarray, window_size: Tuple[int,...], stride: Tuple[int,...], axis: Tuple[int,...]) -> np.ndarray: + axis = [axis[i] % x.ndim for i in range(len(axis))] + for i in range(len(axis)): + x = sliding_window_1d(x, window_size[i], stride[i], axis[i]) + return x + + +def sliding_window_2d(x: np.ndarray, window_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], axis: Tuple[int, int] = (-2, -1)) -> np.ndarray: + if isinstance(window_size, int): + window_size = (window_size, window_size) + if isinstance(stride, int): + stride = (stride, stride) + return sliding_window_nd(x, window_size, stride, axis) + + +def max_pool_1d(x: np.ndarray, kernel_size: int, stride: int, padding: int = 0, axis: int = -1): + axis = axis % x.ndim + if padding > 0: + fill_value = np.nan if x.dtype.kind == 'f' else np.iinfo(x.dtype).min + padding_arr = np.full((*x.shape[:axis], padding, *x.shape[axis + 1:]), fill_value=fill_value, dtype=x.dtype) + x = np.concatenate([padding_arr, x, padding_arr], axis=axis) + a_sliding = sliding_window_1d(x, kernel_size, stride, axis) + max_pool = np.nanmax(a_sliding, axis=-1) + return max_pool + + +def max_pool_nd(x: np.ndarray, kernel_size: Tuple[int,...], stride: Tuple[int,...], padding: Tuple[int,...], axis: Tuple[int,...]) -> np.ndarray: + for i in range(len(axis)): + x = max_pool_1d(x, kernel_size[i], stride[i], padding[i], axis[i]) + return x + + +def max_pool_2d(x: np.ndarray, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int]], axis: Tuple[int, int] = (-2, -1)): + if isinstance(kernel_size, Number): + kernel_size = (kernel_size, kernel_size) + if isinstance(stride, Number): + stride = (stride, stride) + if isinstance(padding, Number): + padding = (padding, padding) + axis = tuple(axis) + return max_pool_nd(x, kernel_size, stride, padding, axis) + +@no_warnings(category=RuntimeWarning) +def depth_edge(depth: np.ndarray, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: np.ndarray = None) -> np.ndarray: + """ + Compute the edge mask from depth map. The edge is defined as the pixels whose neighbors have large difference in depth. + + Args: + depth (np.ndarray): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + + Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool + """ + if mask is None: + diff = (max_pool_2d(depth, kernel_size, stride=1, padding=kernel_size // 2) + max_pool_2d(-depth, kernel_size, stride=1, padding=kernel_size // 2)) + else: + diff = (max_pool_2d(np.where(mask, depth, -np.inf), kernel_size, stride=1, padding=kernel_size // 2) + max_pool_2d(np.where(mask, -depth, -np.inf), kernel_size, stride=1, padding=kernel_size // 2)) + + edge = np.zeros_like(depth, dtype=bool) + if atol is not None: + edge |= diff > atol + + if rtol is not None: + edge |= diff / depth > rtol + return edge + + +def depth_aliasing(depth: np.ndarray, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: np.ndarray = None) -> np.ndarray: + """ + Compute the map that indicates the aliasing of x depth map. The aliasing is defined as the pixels which neither close to the maximum nor the minimum of its neighbors. + Args: + depth (np.ndarray): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + + Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool + """ + if mask is None: + diff_max = max_pool_2d(depth, kernel_size, stride=1, padding=kernel_size // 2) - depth + diff_min = max_pool_2d(-depth, kernel_size, stride=1, padding=kernel_size // 2) + depth + else: + diff_max = max_pool_2d(np.where(mask, depth, -np.inf), kernel_size, stride=1, padding=kernel_size // 2) - depth + diff_min = max_pool_2d(np.where(mask, -depth, -np.inf), kernel_size, stride=1, padding=kernel_size // 2) + depth + diff = np.minimum(diff_max, diff_min) + + edge = np.zeros_like(depth, dtype=bool) + if atol is not None: + edge |= diff > atol + if rtol is not None: + edge |= diff / depth > rtol + return edge + +@no_warnings(category=RuntimeWarning) +def normals_edge(normals: np.ndarray, tol: float, kernel_size: int = 3, mask: np.ndarray = None) -> np.ndarray: + """ + Compute the edge mask from normal map. + + Args: + normal (np.ndarray): shape (..., height, width, 3), normal map + tol (float): tolerance in degrees + + Returns: + edge (np.ndarray): shape (..., height, width) of dtype torch.bool + """ + assert normals.ndim >= 3 and normals.shape[-1] == 3, "normal should be of shape (..., height, width, 3)" + normals = normals / (np.linalg.norm(normals, axis=-1, keepdims=True) + 1e-12) + + padding = kernel_size // 2 + normals_window = sliding_window_2d( + np.pad(normals, (*([(0, 0)] * (normals.ndim - 3)), (padding, padding), (padding, padding), (0, 0)), mode='edge'), + window_size=kernel_size, + stride=1, + axis=(-3, -2) + ) + if mask is None: + angle_diff = np.arccos((normals[..., None, None] * normals_window).sum(axis=-3)).max(axis=(-2, -1)) + else: + mask_window = sliding_window_2d( + np.pad(mask, (*([(0, 0)] * (mask.ndim - 3)), (padding, padding), (padding, padding)), mode='edge'), + window_size=kernel_size, + stride=1, + axis=(-3, -2) + ) + angle_diff = np.where(mask_window, np.arccos((normals[..., None, None] * normals_window).sum(axis=-3)), 0).max(axis=(-2, -1)) + + angle_diff = max_pool_2d(angle_diff, kernel_size, stride=1, padding=kernel_size // 2) + edge = angle_diff > np.deg2rad(tol) + return edge + + +@no_warnings(category=RuntimeWarning) +def points_to_normals(point: np.ndarray, mask: np.ndarray = None) -> np.ndarray: + """ + Calculate normal map from point map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + + Args: + point (np.ndarray): shape (height, width, 3), point map + Returns: + normal (np.ndarray): shape (height, width, 3), normal map. + """ + height, width = point.shape[-3:-1] + has_mask = mask is not None + + if mask is None: + mask = np.ones_like(point[..., 0], dtype=bool) + mask_pad = np.zeros((height + 2, width + 2), dtype=bool) + mask_pad[1:-1, 1:-1] = mask + mask = mask_pad + + pts = np.zeros((height + 2, width + 2, 3), dtype=point.dtype) + pts[1:-1, 1:-1, :] = point + up = pts[:-2, 1:-1, :] - pts[1:-1, 1:-1, :] + left = pts[1:-1, :-2, :] - pts[1:-1, 1:-1, :] + down = pts[2:, 1:-1, :] - pts[1:-1, 1:-1, :] + right = pts[1:-1, 2:, :] - pts[1:-1, 1:-1, :] + normal = np.stack([ + np.cross(up, left, axis=-1), + np.cross(left, down, axis=-1), + np.cross(down, right, axis=-1), + np.cross(right, up, axis=-1), + ]) + normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-12) + valid = np.stack([ + mask[:-2, 1:-1] & mask[1:-1, :-2], + mask[1:-1, :-2] & mask[2:, 1:-1], + mask[2:, 1:-1] & mask[1:-1, 2:], + mask[1:-1, 2:] & mask[:-2, 1:-1], + ]) & mask[None, 1:-1, 1:-1] + normal = (normal * valid[..., None]).sum(axis=0) + normal = normal / (np.linalg.norm(normal, axis=-1, keepdims=True) + 1e-12) + + if has_mask: + normal_mask = valid.any(axis=0) + normal = np.where(normal_mask[..., None], normal, 0) + return normal, normal_mask + else: + return normal + + +def depth_to_normals(depth: np.ndarray, intrinsics: np.ndarray, mask: np.ndarray = None) -> np.ndarray: + """ + Calculate normal map from depth map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + + Args: + depth (np.ndarray): shape (height, width), linear depth map + intrinsics (np.ndarray): shape (3, 3), intrinsics matrix + Returns: + normal (np.ndarray): shape (height, width, 3), normal map. + """ + height, width = depth.shape[-2:] + + uv = image_uv(width=width, height=height, dtype=np.float32) + pts = transforms.unproject_cv(uv, depth, intrinsics=intrinsics, extrinsics=None) + + return points_to_normals(pts, mask) + + +def depth_to_points( + depth: np.ndarray, + extrinsics: np.ndarray = None, + intrinsics: np.ndarray = None +) -> np.ndarray: + """ + Unproject depth map to 3D points. + + Args: + depth (np.ndarray): [..., H, W] depth value + extrinsics (optional, np.ndarray): [..., 4, 4] extrinsics matrix + intrinsics ( np.ndarray): [..., 3, 3] intrinsics matrix + + Returns: + points (np.ndarray): [..., N, 3] 3d points + """ + assert intrinsics is not None, "intrinsics matrix is required" + uv = image_uv(width=depth.shape[-1], height=depth.shape[-2], dtype=depth.dtype) + points = transforms.unproject_cv( + uv, + depth, + intrinsics=intrinsics[..., None, :, :], + extrinsics=extrinsics[..., None, :, :] if extrinsics is not None else None + ) + return points + + +def interpolate(bary: np.ndarray, tri_id: np.ndarray, attr: np.ndarray, faces: np.ndarray) -> np.ndarray: + """Interpolate with given barycentric coordinates and triangle indices + + Args: + bary (np.ndarray): shape (..., 3), barycentric coordinates + tri_id (np.ndarray): int array of shape (...), triangle indices + attr (np.ndarray): shape (N, M), vertices attributes + faces (np.ndarray): int array of shape (T, 3), face vertex indices + + Returns: + np.ndarray: shape (..., M) interpolated result + """ + faces_ = np.concatenate([np.zeros((1, 3), dtype=faces.dtype), faces + 1], axis=0) + attr_ = np.concatenate([np.zeros((1, attr.shape[1]), dtype=attr.dtype), attr], axis=0) + return np.sum(bary[..., None] * attr_[faces_[tri_id + 1]], axis=-2) + + +def image_scrcoord( + width: int, + height: int, +) -> np.ndarray: + """ + Get OpenGL's screen space coordinates, ranging in [0, 1]. + [0, 0] is the bottom-left corner of the image. + + Args: + width (int): image width + height (int): image height + + Returns: + (np.ndarray): shape (height, width, 2) + """ + x, y = np.meshgrid( + np.linspace(0.5 / width, 1 - 0.5 / width, width, dtype=np.float32), + np.linspace(1 - 0.5 / height, 0.5 / height, height, dtype=np.float32), + indexing='xy' + ) + return np.stack([x, y], axis=2) + + +def image_uv( + height: int, + width: int, + left: int = None, + top: int = None, + right: int = None, + bottom: int = None, + dtype: np.dtype = np.float32 +) -> np.ndarray: + """ + Get image space UV grid, ranging in [0, 1]. + + >>> image_uv(10, 10): + [[[0.05, 0.05], [0.15, 0.05], ..., [0.95, 0.05]], + [[0.05, 0.15], [0.15, 0.15], ..., [0.95, 0.15]], + ... ... ... + [[0.05, 0.95], [0.15, 0.95], ..., [0.95, 0.95]]] + + Args: + width (int): image width + height (int): image height + + Returns: + np.ndarray: shape (height, width, 2) + """ + if left is None: left = 0 + if top is None: top = 0 + if right is None: right = width + if bottom is None: bottom = height + u = np.linspace((left + 0.5) / width, (right - 0.5) / width, right - left, dtype=dtype) + v = np.linspace((top + 0.5) / height, (bottom - 0.5) / height, bottom - top, dtype=dtype) + u, v = np.meshgrid(u, v, indexing='xy') + return np.stack([u, v], axis=2) + + +def image_pixel_center( + height: int, + width: int, + left: int = None, + top: int = None, + right: int = None, + bottom: int = None, + dtype: np.dtype = np.float32 +) -> np.ndarray: + """ + Get image pixel center coordinates, ranging in [0, width] and [0, height]. + `image[i, j]` has pixel center coordinates `(j + 0.5, i + 0.5)`. + + >>> image_pixel_center(10, 10): + [[[0.5, 0.5], [1.5, 0.5], ..., [9.5, 0.5]], + [[0.5, 1.5], [1.5, 1.5], ..., [9.5, 1.5]], + ... ... ... + [[0.5, 9.5], [1.5, 9.5], ..., [9.5, 9.5]]] + + Args: + width (int): image width + height (int): image height + + Returns: + np.ndarray: shape (height, width, 2) + """ + if left is None: left = 0 + if top is None: top = 0 + if right is None: right = width + if bottom is None: bottom = height + u = np.linspace(left + 0.5, right - 0.5, right - left, dtype=dtype) + v = np.linspace(top + 0.5, bottom - 0.5, bottom - top, dtype=dtype) + u, v = np.meshgrid(u, v, indexing='xy') + return np.stack([u, v], axis=2) + +def image_pixel( + height: int, + width: int, + left: int = None, + top: int = None, + right: int = None, + bottom: int = None, + dtype: np.dtype = np.int32 +) -> np.ndarray: + """ + Get image pixel coordinates grid, ranging in [0, width - 1] and [0, height - 1]. + `image[i, j]` has pixel center coordinates `(j, i)`. + + >>> image_pixel_center(10, 10): + [[[0, 0], [1, 0], ..., [9, 0]], + [[0, 1.5], [1, 1], ..., [9, 1]], + ... ... ... + [[0, 9.5], [1, 9], ..., [9, 9 ]]] + + Args: + width (int): image width + height (int): image height + + Returns: + np.ndarray: shape (height, width, 2) + """ + if left is None: left = 0 + if top is None: top = 0 + if right is None: right = width + if bottom is None: bottom = height + u = np.arange(left, right, dtype=dtype) + v = np.arange(top, bottom, dtype=dtype) + u, v = np.meshgrid(u, v, indexing='xy') + return np.stack([u, v], axis=2) + + +def image_mesh( + *image_attrs: np.ndarray, + mask: np.ndarray = None, + tri: bool = False, + return_indices: bool = False +) -> Tuple[np.ndarray, ...]: + """ + Get a mesh regarding image pixel uv coordinates as vertices and image grid as faces. + + Args: + *image_attrs (np.ndarray): image attributes in shape (height, width, [channels]) + mask (np.ndarray, optional): binary mask of shape (height, width), dtype=bool. Defaults to None. + + Returns: + faces (np.ndarray): faces connecting neighboring pixels. shape (T, 4) if tri is False, else (T, 3) + *vertex_attrs (np.ndarray): vertex attributes in corresponding order with input image_attrs + indices (np.ndarray, optional): indices of vertices in the original mesh + """ + assert (len(image_attrs) > 0) or (mask is not None), "At least one of image_attrs or mask should be provided" + height, width = next(image_attrs).shape[:2] if mask is None else mask.shape + assert all(img.shape[:2] == (height, width) for img in image_attrs), "All image_attrs should have the same shape" + + row_faces = np.stack([np.arange(0, width - 1, dtype=np.int32), np.arange(width, 2 * width - 1, dtype=np.int32), np.arange(1 + width, 2 * width, dtype=np.int32), np.arange(1, width, dtype=np.int32)], axis=1) + faces = (np.arange(0, (height - 1) * width, width, dtype=np.int32)[:, None, None] + row_faces[None, :, :]).reshape((-1, 4)) + if mask is None: + if tri: + faces = mesh.triangulate(faces) + ret = [faces, *(img.reshape(-1, *img.shape[2:]) for img in image_attrs)] + if return_indices: + ret.append(np.arange(height * width, dtype=np.int32)) + return tuple(ret) + else: + quad_mask = (mask[:-1, :-1] & mask[1:, :-1] & mask[1:, 1:] & mask[:-1, 1:]).ravel() + faces = faces[quad_mask] + if tri: + faces = mesh.triangulate(faces) + return mesh.remove_unreferenced_vertices( + faces, + *(x.reshape(-1, *x.shape[2:]) for x in image_attrs), + return_indices=return_indices + ) + + +def image_mesh_from_depth( + depth: np.ndarray, + extrinsics: np.ndarray = None, + intrinsics: np.ndarray = None, + *vertice_attrs: np.ndarray, + atol: float = None, + rtol: float = None, + remove_by_depth: bool = False, + return_uv: bool = False, + return_indices: bool = False +) -> Tuple[np.ndarray, ...]: + """ + Get x triangle mesh by lifting depth map to 3D. + + Args: + depth (np.ndarray): [H, W] depth map + extrinsics (np.ndarray, optional): [4, 4] extrinsics matrix. Defaults to None. + intrinsics (np.ndarray, optional): [3, 3] intrinsics matrix. Defaults to None. + *vertice_attrs (np.ndarray): [H, W, C] vertex attributes. Defaults to None. + atol (float, optional): absolute tolerance. Defaults to None. + rtol (float, optional): relative tolerance. Defaults to None. + triangles with vertices having depth difference larger than atol + rtol * depth will be marked. + remove_by_depth (bool, optional): whether to remove triangles with large depth difference. Defaults to True. + return_uv (bool, optional): whether to return uv coordinates. Defaults to False. + return_indices (bool, optional): whether to return indices of vertices in the original mesh. Defaults to False. + + Returns: + vertices (np.ndarray): [N, 3] vertices + faces (np.ndarray): [T, 3] faces + *vertice_attrs (np.ndarray): [N, C] vertex attributes + image_uv (np.ndarray, optional): [N, 2] uv coordinates + ref_indices (np.ndarray, optional): [N] indices of vertices in the original mesh + """ + height, width = depth.shape + image_uv, image_face = image_mesh(height, width) + depth = depth.reshape(-1) + pts = transforms.unproject_cv(image_uv, depth, extrinsics, intrinsics) + image_face = mesh.triangulate(image_face, vertices=pts) + ref_indices = None + ret = [] + if atol is not None or rtol is not None: + atol = 0 if atol is None else atol + rtol = 0 if rtol is None else rtol + mean = depth[image_face].mean(axis=1) + diff = np.max(np.abs(depth[image_face] - depth[image_face[:, [1, 2, 0]]]), axis=1) + mask = (diff <= atol + rtol * mean) + image_face_ = image_face[mask] + image_face_, ref_indices = mesh.remove_unreferenced_vertices(image_face_, return_indices=True) + + remove = remove_by_depth and ref_indices is not None + if remove: + pts = pts[ref_indices] + image_face = image_face_ + ret += [pts, image_face] + for attr in vertice_attrs: + ret.append(attr.reshape(-1, attr.shape[-1]) if not remove else attr.reshape(-1, attr.shape[-1])[ref_indices]) + if return_uv: + ret.append(image_uv if not remove else image_uv[ref_indices]) + if return_indices and ref_indices is not None: + ret.append(ref_indices) + return tuple(ret) + + +def chessboard(width: int, height: int, grid_size: int, color_a: np.ndarray, color_b: np.ndarray) -> np.ndarray: + """get x chessboard image + + Args: + width (int): image width + height (int): image height + grid_size (int): size of chessboard grid + color_a (np.ndarray): color of the grid at the top-left corner + color_b (np.ndarray): color in complementary grid cells + + Returns: + image (np.ndarray): shape (height, width, channels), chessboard image + """ + x = np.arange(width) // grid_size + y = np.arange(height) // grid_size + mask = (x[None, :] + y[:, None]) % 2 + image = (1 - mask[..., None]) * color_a + mask[..., None] * color_b + return image + + +def square(tri: bool = False) -> Tuple[np.ndarray, np.ndarray]: + """ + Get a square mesh of area 1 centered at origin in the xy-plane. + + ### Returns + vertices (np.ndarray): shape (4, 3) + faces (np.ndarray): shape (1, 4) + """ + vertices = np.array([ + [-0.5, 0.5, 0], [0.5, 0.5, 0], [0.5, -0.5, 0], [-0.5, -0.5, 0] # v0-v1-v2-v3 + ], dtype=np.float32) + if tri: + faces = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.int32) + else: + faces = np.array([[0, 1, 2, 3]], dtype=np.int32) + return vertices, faces + + +def cube(tri: bool = False) -> Tuple[np.ndarray, np.ndarray]: + """ + Get x cube mesh of size 1 centered at origin. + + ### Parameters + tri (bool, optional): return triangulated mesh. Defaults to False, which returns quad mesh. + + ### Returns + vertices (np.ndarray): shape (8, 3) + faces (np.ndarray): shape (12, 3) + """ + vertices = np.array([ + [-0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [-0.5, -0.5, 0.5], # v0-v1-v2-v3 + [-0.5, 0.5, -0.5], [0.5, 0.5, -0.5], [0.5, -0.5, -0.5], [-0.5, -0.5, -0.5] # v4-v5-v6-v7 + ], dtype=np.float32).reshape((-1, 3)) + + faces = np.array([ + [0, 1, 2, 3], # v0-v1-v2-v3 (front) + [4, 5, 1, 0], # v4-v5-v1-v0 (top) + [3, 2, 6, 7], # v3-v2-v6-v7 (bottom) + [5, 4, 7, 6], # v5-v4-v7-v6 (back) + [1, 5, 6, 2], # v1-v5-v6-v2 (right) + [4, 0, 3, 7] # v4-v0-v3-v7 (left) + ], dtype=np.int32) + + if tri: + faces = mesh.triangulate(faces, vertices=vertices) + + return vertices, faces + + +def camera_frustum(extrinsics: np.ndarray, intrinsics: np.ndarray, depth: float = 1.0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Get x triangle mesh of camera frustum. + """ + assert extrinsics.shape == (4, 4) and intrinsics.shape == (3, 3) + vertices = transforms.unproject_cv( + np.array([[0, 0], [0, 0], [0, 1], [1, 1], [1, 0]], dtype=np.float32), + np.array([0] + [depth] * 4, dtype=np.float32), + extrinsics, + intrinsics + ).astype(np.float32) + edges = np.array([ + [0, 1], [0, 2], [0, 3], [0, 4], + [1, 2], [2, 3], [3, 4], [4, 1] + ], dtype=np.int32) + faces = np.array([ + [0, 1, 2], + [0, 2, 3], + [0, 3, 4], + [0, 4, 1], + [1, 2, 3], + [1, 3, 4] + ], dtype=np.int32) + return vertices, edges, faces + + +def icosahedron(): + A = (1 + 5 ** 0.5) / 2 + vertices = np.array([ + [0, 1, A], [0, -1, A], [0, 1, -A], [0, -1, -A], + [1, A, 0], [-1, A, 0], [1, -A, 0], [-1, -A, 0], + [A, 0, 1], [A, 0, -1], [-A, 0, 1], [-A, 0, -1] + ], dtype=np.float32) + faces = np.array([ + [0, 1, 8], [0, 8, 4], [0, 4, 5], [0, 5, 10], [0, 10, 1], + [3, 2, 9], [3, 9, 6], [3, 6, 7], [3, 7, 11], [3, 11, 2], + [1, 6, 8], [8, 9, 4], [4, 2, 5], [5, 11, 10], [10, 7, 1], + [2, 4, 9], [9, 8, 6], [6, 1, 7], [7, 10, 11], [11, 5, 2] + ], dtype=np.int32) + return vertices, faces \ No newline at end of file diff --git a/utils3d/utils3d/torch/__init__.py b/utils3d/utils3d/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..613b47900fc5d52ea47da36e90dd8e43940dadaa --- /dev/null +++ b/utils3d/utils3d/torch/__init__.py @@ -0,0 +1,146 @@ +import importlib +import itertools +import torch +from typing import TYPE_CHECKING + +__modules_all__ = { + 'mesh': [ + 'triangulate', + 'compute_face_normal', + 'compute_face_angles', + 'compute_vertex_normal', + 'compute_vertex_normal_weighted', + 'compute_edges', + 'compute_connected_components', + 'compute_edge_connected_components', + 'compute_boundarys', + 'compute_dual_graph', + 'remove_unreferenced_vertices', + 'remove_corrupted_faces', + 'remove_isolated_pieces', + 'merge_duplicate_vertices', + 'subdivide_mesh_simple', + 'compute_face_tbn', + 'compute_vertex_tbn', + 'laplacian', + 'laplacian_smooth_mesh', + 'taubin_smooth_mesh', + 'laplacian_hc_smooth_mesh', + ], + 'nerf': [ + 'get_rays', + 'get_image_rays', + 'get_mipnerf_cones', + 'volume_rendering', + 'bin_sample', + 'importance_sample', + 'nerf_render_rays', + 'mipnerf_render_rays', + 'nerf_render_view', + 'mipnerf_render_view', + 'InstantNGP', + ], + 'utils': [ + 'sliding_window_1d', + 'sliding_window_2d', + 'sliding_window_nd', + 'image_uv', + 'image_pixel_center', + 'image_mesh', + 'chessboard', + 'depth_edge', + 'depth_aliasing', + 'image_mesh_from_depth', + 'points_to_normals', + 'depth_to_points', + 'depth_to_normals', + 'masked_min', + 'masked_max', + 'bounding_rect' + ], + 'transforms': [ + 'perspective', + 'perspective_from_fov', + 'perspective_from_fov_xy', + 'intrinsics_from_focal_center', + 'intrinsics_from_fov', + 'intrinsics_from_fov_xy', + 'focal_to_fov', + 'fov_to_focal', + 'intrinsics_to_fov', + 'view_look_at', + 'extrinsics_look_at', + 'perspective_to_intrinsics', + 'intrinsics_to_perspective', + 'extrinsics_to_view', + 'view_to_extrinsics', + 'normalize_intrinsics', + 'crop_intrinsics', + 'pixel_to_uv', + 'pixel_to_ndc', + 'uv_to_pixel', + 'project_depth', + 'depth_buffer_to_linear', + 'project_gl', + 'project_cv', + 'unproject_gl', + 'unproject_cv', + 'skew_symmetric', + 'rotation_matrix_from_vectors', + 'euler_axis_angle_rotation', + 'euler_angles_to_matrix', + 'matrix_to_euler_angles', + 'matrix_to_quaternion', + 'quaternion_to_matrix', + 'matrix_to_axis_angle', + 'axis_angle_to_matrix', + 'axis_angle_to_quaternion', + 'quaternion_to_axis_angle', + 'slerp', + 'interpolate_extrinsics', + 'interpolate_view', + 'extrinsics_to_essential', + 'to4x4', + 'rotation_matrix_2d', + 'rotate_2d', + 'translate_2d', + 'scale_2d', + 'apply_2d', + ], + 'rasterization': [ + 'RastContext', + 'rasterize_triangle_faces', + 'rasterize_triangle_faces_depth_peeling', + 'texture', + 'texture_composite', + 'warp_image_by_depth', + 'warp_image_by_forward_flow', + ], +} + + +__all__ = list(itertools.chain(*__modules_all__.values())) + +def __getattr__(name): + try: + return globals()[name] + except KeyError: + pass + + try: + module_name = next(m for m in __modules_all__ if name in __modules_all__[m]) + except StopIteration: + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + module = importlib.import_module(f'.{module_name}', __name__) + for key in __modules_all__[module_name]: + globals()[key] = getattr(module, key) + + return globals()[name] + + +if TYPE_CHECKING: + from .transforms import * + from .mesh import * + from .utils import * + from .nerf import * + from .rasterization import * \ No newline at end of file diff --git a/utils3d/utils3d/torch/_helpers.py b/utils3d/utils3d/torch/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..a67ba5427d301ade21ea21da8f15e52b31db3e21 --- /dev/null +++ b/utils3d/utils3d/torch/_helpers.py @@ -0,0 +1,103 @@ +# decorator +import torch +from numbers import Number +import inspect +from functools import wraps +from .._helpers import suppress_traceback + + +def get_device(args, kwargs): + device = None + for arg in (list(args) + list(kwargs.values())): + if isinstance(arg, torch.Tensor): + if device is None: + device = arg.device + elif device != arg.device: + raise ValueError("All tensors must be on the same device.") + return device + + +def get_args_order(func, args, kwargs): + """ + Get the order of the arguments of a function. + """ + names = inspect.getfullargspec(func).args + names_idx = {name: i for i, name in enumerate(names)} + args_order = [] + kwargs_order = {} + for name, arg in kwargs.items(): + if name in names: + kwargs_order[name] = names_idx[name] + names.remove(name) + for i, arg in enumerate(args): + if i < len(names): + args_order.append(names_idx[names[i]]) + return args_order, kwargs_order + + +def broadcast_args(args, kwargs, args_dim, kwargs_dim): + spatial = [] + for arg, arg_dim in zip(args + list(kwargs.values()), args_dim + list(kwargs_dim.values())): + if isinstance(arg, torch.Tensor) and arg_dim is not None: + arg_spatial = arg.shape[:arg.ndim-arg_dim] + if len(arg_spatial) > len(spatial): + spatial = [1] * (len(arg_spatial) - len(spatial)) + spatial + for j in range(len(arg_spatial)): + if spatial[-j] < arg_spatial[-j]: + if spatial[-j] == 1: + spatial[-j] = arg_spatial[-j] + else: + raise ValueError("Cannot broadcast arguments.") + for i, arg in enumerate(args): + if isinstance(arg, torch.Tensor) and args_dim[i] is not None: + args[i] = torch.broadcast_to(arg, [*spatial, *arg.shape[arg.ndim-args_dim[i]:]]) + for key, arg in kwargs.items(): + if isinstance(arg, torch.Tensor) and kwargs_dim[key] is not None: + kwargs[key] = torch.broadcast_to(arg, [*spatial, *arg.shape[arg.ndim-kwargs_dim[key]:]]) + return args, kwargs, spatial + +@suppress_traceback +def batched(*dims): + """ + Decorator that allows a function to be called with batched arguments. + """ + def decorator(func): + @wraps(func) + def wrapper(*args, device=torch.device('cpu'), **kwargs): + args = list(args) + # get arguments dimensions + args_order, kwargs_order = get_args_order(func, args, kwargs) + args_dim = [dims[i] for i in args_order] + kwargs_dim = {key: dims[i] for key, i in kwargs_order.items()} + # convert to torch tensor + device = get_device(args, kwargs) or device + for i, arg in enumerate(args): + if isinstance(arg, (Number, list, tuple)) and args_dim[i] is not None: + args[i] = torch.tensor(arg, device=device) + for key, arg in kwargs.items(): + if isinstance(arg, (Number, list, tuple)) and kwargs_dim[key] is not None: + kwargs[key] = torch.tensor(arg, device=device) + # broadcast arguments + args, kwargs, spatial = broadcast_args(args, kwargs, args_dim, kwargs_dim) + for i, (arg, arg_dim) in enumerate(zip(args, args_dim)): + if isinstance(arg, torch.Tensor) and arg_dim is not None: + args[i] = arg.reshape([-1, *arg.shape[arg.ndim-arg_dim:]]) + for key, arg in kwargs.items(): + if isinstance(arg, torch.Tensor) and kwargs_dim[key] is not None: + kwargs[key] = arg.reshape([-1, *arg.shape[arg.ndim-kwargs_dim[key]:]]) + # call function + results = func(*args, **kwargs) + type_results = type(results) + results = list(results) if isinstance(results, (tuple, list)) else [results] + # restore spatial dimensions + for i, result in enumerate(results): + results[i] = result.reshape([*spatial, *result.shape[1:]]) + if type_results == tuple: + results = tuple(results) + elif type_results == list: + results = list(results) + else: + results = results[0] + return results + return wrapper + return decorator \ No newline at end of file diff --git a/utils3d/utils3d/torch/mesh.py b/utils3d/utils3d/torch/mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6517fd06b2bbfe62fea744e7cf3536096c47d6 --- /dev/null +++ b/utils3d/utils3d/torch/mesh.py @@ -0,0 +1,688 @@ +import torch +import torch.nn.functional as F +from typing import * +from ._helpers import batched + + +__all__ = [ + 'triangulate', + 'compute_face_normal', + 'compute_face_angles', + 'compute_vertex_normal', + 'compute_vertex_normal_weighted', + 'compute_edges', + 'compute_connected_components', + 'compute_edge_connected_components', + 'compute_boundarys', + 'compute_dual_graph', + 'remove_unreferenced_vertices', + 'remove_corrupted_faces', + 'remove_isolated_pieces', + 'merge_duplicate_vertices', + 'subdivide_mesh_simple', + 'compute_face_tbn', + 'compute_vertex_tbn', + 'laplacian', + 'laplacian_smooth_mesh', + 'taubin_smooth_mesh', + 'laplacian_hc_smooth_mesh', +] + + +def _group( + values: torch.Tensor, + required_group_size: Optional[int] = None, + return_values: bool = False +) -> Tuple[Union[List[torch.Tensor], torch.Tensor], Optional[torch.Tensor]]: + """ + Group values into groups with identical values. + + Args: + values (torch.Tensor): [N] values to group + required_group_size (int, optional): required group size. Defaults to None. + return_values (bool, optional): return values of groups. Defaults to False. + + Returns: + group (Union[List[torch.Tensor], torch.Tensor]): list of groups or group indices. It will be a list of groups if required_group_size is None, otherwise a tensor of group indices. + group_values (Optional[torch.Tensor]): values of groups. Only returned if return_values is True. + """ + sorted_values, indices = torch.sort(values) + nondupe = torch.cat([torch.tensor([True], dtype=torch.bool, device=values.device), sorted_values[1:] != sorted_values[:-1]]) + nondupe_indices = torch.cumsum(nondupe, dim=0) - 1 + counts = torch.bincount(nondupe_indices) + if required_group_size is None: + groups = torch.split(indices, counts.tolist()) + if return_values: + group_values = sorted_values[nondupe] + return groups, group_values + else: + return groups + else: + counts = counts[nondupe_indices] + groups = indices[counts == required_group_size].reshape(-1, required_group_size) + if return_values: + group_values = sorted_values[nondupe][counts[nondupe] == required_group_size] + return groups, group_values + else: + return groups + +def triangulate( + faces: torch.Tensor, + vertices: torch.Tensor = None, + backslash: bool = None +) -> torch.Tensor: + """ + Triangulate a polygonal mesh. + + Args: + faces (torch.Tensor): [..., L, P] polygonal faces + vertices (torch.Tensor, optional): [..., N, 3] 3-dimensional vertices. + If given, the triangulation is performed according to the distance + between vertices. Defaults to None. + backslash (torch.Tensor, optional): [..., L] boolean array indicating + how to triangulate the quad faces. Defaults to None. + + + Returns: + (torch.Tensor): [L * (P - 2), 3] triangular faces + """ + if faces.shape[-1] == 3: + return faces + P = faces.shape[-1] + if vertices is not None: + assert faces.shape[-1] == 4, "now only support quad mesh" + if backslash is None: + faces_idx = faces.long() + backslash = torch.norm(vertices[faces_idx[..., 0]] - vertices[faces_idx[..., 2]], p=2, dim=-1) < \ + torch.norm(vertices[faces_idx[..., 1]] - vertices[faces_idx[..., 3]], p=2, dim=-1) + if backslash is None: + loop_indice = torch.stack([ + torch.zeros(P - 2, dtype=int), + torch.arange(1, P - 1, 1, dtype=int), + torch.arange(2, P, 1, dtype=int) + ], axis=1) + return faces[:, loop_indice].reshape(-1, 3) + else: + assert faces.shape[-1] == 4, "now only support quad mesh" + if isinstance(backslash, bool): + if backslash: + faces = faces[:, [0, 1, 2, 0, 2, 3]].reshape(-1, 3) + else: + faces = faces[:, [0, 1, 3, 3, 1, 2]].reshape(-1, 3) + else: + faces = torch.where( + backslash[:, None], + faces[:, [0, 1, 2, 0, 2, 3]], + faces[:, [0, 1, 3, 3, 1, 2]] + ).reshape(-1, 3) + return faces + + +@batched(2, None) +def compute_face_normal( + vertices: torch.Tensor, + faces: torch.Tensor +) -> torch.Tensor: + """ + Compute face normals of a triangular mesh + + Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [..., T, 3] triangular face indices + + Returns: + normals (torch.Tensor): [..., T, 3] face normals + """ + N = vertices.shape[0] + index = torch.arange(N)[:, None] + normal = torch.cross( + vertices[index, faces[..., 1].long()] - vertices[index, faces[..., 0].long()], + vertices[index, faces[..., 2].long()] - vertices[index, faces[..., 0].long()], + dim=-1 + ) + return F.normalize(normal, p=2, dim=-1) + + +@batched(2, None) +def compute_face_angles( + vertices: torch.Tensor, + faces: torch.Tensor +) -> torch.Tensor: + """ + Compute face angles of a triangular mesh + + Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + + Returns: + angles (torch.Tensor): [..., T, 3] face angles + """ + face_angles = [] + for i in range(3): + edge1 = torch.index_select(vertices, dim=-2, index=faces[:, (i + 1) % 3]) - torch.index_select(vertices, dim=-2, index=faces[:, i]) + edge2 = torch.index_select(vertices, dim=-2, index=faces[:, (i + 2) % 3]) - torch.index_select(vertices, dim=-2, index=faces[:, i]) + face_angle = torch.arccos(torch.sum(F.normalize(edge1, p=2, dim=-1) * F.normalize(edge2, p=2, dim=-1), dim=-1)) + face_angles.append(face_angle) + face_angles = torch.stack(face_angles, dim=-1) + return face_angles + + +@batched(2, None, 2) +def compute_vertex_normal( + vertices: torch.Tensor, + faces: torch.Tensor, + face_normal: torch.Tensor = None +) -> torch.Tensor: + """ + Compute vertex normals of a triangular mesh by averaging neightboring face normals + + Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + face_normal (torch.Tensor, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + + Returns: + normals (torch.Tensor): [..., N, 3] vertex normals + """ + N = vertices.shape[0] + assert faces.shape[-1] == 3, "Only support triangular mesh" + if face_normal is None: + face_normal = compute_face_normal(vertices, faces) + face_normal = face_normal[:, :, None, :].expand(-1, -1, 3, -1).flatten(-3, -2) + faces = faces.flatten() + vertex_normal = torch.index_put(torch.zeros_like(vertices), (torch.arange(N)[:, None], faces[None, :]), face_normal, accumulate=True) + vertex_normal = F.normalize(vertex_normal, p=2, dim=-1) + return vertex_normal + + +@batched(2, None, 2) +def compute_vertex_normal_weighted( + vertices: torch.Tensor, + faces: torch.Tensor, + face_normal: torch.Tensor = None +) -> torch.Tensor: + """ + Compute vertex normals of a triangular mesh by weighted sum of neightboring face normals + according to the angles + + Args: + vertices (torch.Tensor): [..., N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + face_normal (torch.Tensor, optional): [..., T, 3] face normals. + None to compute face normals from vertices and faces. Defaults to None. + + Returns: + normals (torch.Tensor): [..., N, 3] vertex normals + """ + N = vertices.shape[0] + if face_normal is None: + face_normal = compute_face_normal(vertices, faces) + face_angle = compute_face_angles(vertices, faces) + face_normal = face_normal[:, :, None, :].expand(-1, -1, 3, -1) * face_angle[..., None] + vertex_normal = torch.index_put(torch.zeros_like(vertices), (torch.arange(N)[:, None], faces.view(N, -1)), face_normal.view(N, -1, 3), accumulate=True) + vertex_normal = F.normalize(vertex_normal, p=2, dim=-1) + return vertex_normal + + +def compute_edges( + faces: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Compute edges of a mesh. + + Args: + faces (torch.Tensor): [T, 3] triangular face indices + + Returns: + edges (torch.Tensor): [E, 2] edge indices + face2edge (torch.Tensor): [T, 3] mapping from face to edge + counts (torch.Tensor): [E] degree of each edge + """ + T = faces.shape[0] + edges = torch.cat([faces[:, [0, 1]], faces[:, [1, 2]], faces[:, [2, 0]]], dim=0) # [3T, 2] + edges = torch.sort(edges, dim=1).values + edges, inv_map, counts = torch.unique(edges, return_inverse=True, return_counts=True, dim=0) + face2edge = inv_map.view(3, T).T + return edges, face2edge, counts + + +def compute_connected_components( + faces: torch.Tensor, + edges: torch.Tensor=None, + face2edge: torch.Tensor=None +) -> List[torch.Tensor]: + """ + Compute connected faces of a mesh. + + Args: + faces (torch.Tensor): [T, 3] triangular face indices + edges (torch.Tensor, optional): [E, 2] edge indices. Defaults to None. + face2edge (torch.Tensor, optional): [T, 3] mapping from face to edge. Defaults to None. + NOTE: If edges and face2edge are not provided, they will be computed. + + Returns: + components (List[torch.Tensor]): list of connected faces + """ + T = faces.shape[0] + if edges is None or face2edge is None: + edges, face2edge, _ = compute_edges(faces) + E = edges.shape[0] + + labels = torch.arange(T, dtype=torch.int32, device=faces.device) + while True: + edge_labels = torch.scatter_reduce( + torch.zeros(E, dtype=torch.int32, device=faces.device), + 0, + face2edge.flatten().long(), + labels.view(-1, 1).expand(-1, 3).flatten(), + reduce='amin', + include_self=False + ) + new_labels = torch.min(edge_labels[face2edge], dim=-1).values + if torch.equal(labels, new_labels): + break + labels = new_labels + + components = _group(labels) + + return components + + +def compute_edge_connected_components( + edges: torch.Tensor, +) -> List[torch.Tensor]: + """ + Compute connected edges of a mesh. + + Args: + edges (torch.Tensor): [E, 2] edge indices + + Returns: + components (List[torch.Tensor]): list of connected edges + """ + E = edges.shape[0] + + # Re-index edges + verts, edges = torch.unique(edges.flatten(), return_inverse=True) + edges = edges.view(-1, 2) + V = verts.shape[0] + + labels = torch.arange(E, dtype=torch.int32, device=edges.device) + while True: + vertex_labels = torch.scatter_reduce( + torch.zeros(V, dtype=torch.int32, device=edges.device), + 0, + edges.flatten().long(), + labels.view(-1, 1).expand(-1, 2).flatten(), + reduce='amin', + include_self=False + ) + new_labels = torch.min(vertex_labels[edges], dim=-1).values + if torch.equal(labels, new_labels): + break + labels = new_labels + + components = _group(labels) + + return components + + +def compute_boundarys( + faces: torch.Tensor, + edges: torch.Tensor=None, + face2edge: torch.Tensor=None, + edge_degrees: torch.Tensor=None +) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """ + Compute boundary edges of a mesh. + + Args: + faces (torch.Tensor): [T, 3] triangular face indices + edges (torch.Tensor): [E, 2] edge indices. + face2edge (torch.Tensor): [T, 3] mapping from face to edge. + edge_degrees (torch.Tensor): [E] degree of each edge. + + Returns: + boundary_edge_indices (List[torch.Tensor]): list of boundary edge indices + boundary_face_indices (List[torch.Tensor]): list of boundary face indices + """ + # Map each edge to boundary edge index + boundary_edges = edges[edge_degrees == 1] # [BE, 2] + boundary_edges_idx = torch.nonzero(edge_degrees == 1, as_tuple=False).flatten() # [BE] + E = edges.shape[0] # Edge count + BE = boundary_edges.shape[0] # Boundary edge count + map_to_boundary_edges = torch.full((E,), -1, dtype=torch.int32, device=faces.device) # [E] + map_to_boundary_edges[boundary_edges_idx] = torch.arange(BE, dtype=torch.int32, device=faces.device) + + # Re-index boundary vertices + boundary_vertices, boundary_edges = torch.unique(boundary_edges.flatten(), return_inverse=True) + boundary_edges = boundary_edges.view(-1, 2) + BV = boundary_vertices.shape[0] + + boundary_edge_labels = torch.arange(BE, dtype=torch.int32, device=faces.device) + while True: + boundary_vertex_labels = torch.scatter_reduce( + torch.zeros(BV, dtype=torch.int32, device=faces.device), + 0, + boundary_edges.flatten().long(), + boundary_edge_labels.view(-1, 1).expand(-1, 2).flatten(), + reduce='amin', + include_self=False + ) + new_boundary_edge_labels = torch.min(boundary_vertex_labels[boundary_edges], dim=-1).values + if torch.equal(boundary_edge_labels, new_boundary_edge_labels): + break + boundary_edge_labels = new_boundary_edge_labels + + labels = torch.unique(boundary_edge_labels) + boundary_edge_indices = [boundary_edges_idx[boundary_edge_labels == label] for label in labels] + edge_labels = torch.full((E,), -1, dtype=torch.int32, device=faces.device) + edge_labels[boundary_edges_idx] = boundary_edge_labels + boundary_face_indices = [torch.nonzero((edge_labels[face2edge] == label).any(dim=-1), as_tuple=False).flatten() for label in labels] + + return boundary_edge_indices, boundary_face_indices + + +def compute_dual_graph( + face2edge: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Compute dual graph of a mesh. + + Args: + face2edge (torch.Tensor): [T, 3] mapping from face to edge. + + Returns: + dual_edges (torch.Tensor): [DE, 2] face indices of dual edges + dual_edge2edge (torch.Tensor): [DE] mapping from dual edge to edge + """ + all_edge_indices = face2edge.flatten() # [3T] + dual_edges, dual_edge2edge = _group(all_edge_indices, required_group_size=2, return_values=True) + dual_edges = dual_edges // face2edge.shape[1] + return dual_edges, dual_edge2edge + + +def remove_unreferenced_vertices( + faces: torch.Tensor, + *vertice_attrs, + return_indices: bool = False +) -> Tuple[torch.Tensor, ...]: + """ + Remove unreferenced vertices of a mesh. + Unreferenced vertices are removed, and the face indices are updated accordingly. + + Args: + faces (torch.Tensor): [T, P] face indices + *vertice_attrs: vertex attributes + + Returns: + faces (torch.Tensor): [T, P] face indices + *vertice_attrs: vertex attributes + indices (torch.Tensor, optional): [N] indices of vertices that are kept. Defaults to None. + """ + P = faces.shape[-1] + fewer_indices, inv_map = torch.unique(faces, return_inverse=True) + faces = inv_map.to(torch.int32).reshape(-1, P) + ret = [faces] + for attr in vertice_attrs: + ret.append(attr[fewer_indices]) + if return_indices: + ret.append(fewer_indices) + return tuple(ret) + + +def remove_corrupted_faces( + faces: torch.Tensor +) -> torch.Tensor: + """ + Remove corrupted faces (faces with duplicated vertices) + + Args: + faces (torch.Tensor): [T, 3] triangular face indices + + Returns: + torch.Tensor: [T_, 3] triangular face indices + """ + corrupted = (faces[:, 0] == faces[:, 1]) | (faces[:, 1] == faces[:, 2]) | (faces[:, 2] == faces[:, 0]) + return faces[~corrupted] + + +def merge_duplicate_vertices( + vertices: torch.Tensor, + faces: torch.Tensor, + tol: float = 1e-6 +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Merge duplicate vertices of a triangular mesh. + Duplicate vertices are merged by selecte one of them, and the face indices are updated accordingly. + + Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + tol (float, optional): tolerance for merging. Defaults to 1e-6. + + Returns: + vertices (torch.Tensor): [N_, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + """ + vertices_round = torch.round(vertices / tol) + uni, uni_inv = torch.unique(vertices_round, dim=0, return_inverse=True) + uni[uni_inv] = vertices + faces = uni_inv[faces] + return uni, faces + + +def remove_isolated_pieces( + vertices: torch.Tensor, + faces: torch.Tensor, + connected_components: List[torch.Tensor] = None, + thresh_num_faces: int = None, + thresh_radius: float = None, + thresh_boundary_ratio: float = None, + remove_unreferenced: bool = True, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Remove isolated pieces of a mesh. + Isolated pieces are removed, and the face indices are updated accordingly. + If no face is left, will return the largest connected component. + + Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + connected_components (List[torch.Tensor], optional): connected components of the mesh. If None, it will be computed. Defaults to None. + thresh_num_faces (int, optional): threshold of number of faces for isolated pieces. Defaults to None. + thresh_radius (float, optional): threshold of radius for isolated pieces. Defaults to None. + remove_unreferenced (bool, optional): remove unreferenced vertices after removing isolated pieces. Defaults to True. + + Returns: + vertices (torch.Tensor): [N_, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + """ + if connected_components is None: + connected_components = compute_connected_components(faces) + connected_components = sorted(connected_components, key=lambda x: len(x), reverse=True) + if thresh_num_faces is not None: + removed = [] + for i in range(1, len(connected_components)): + if len(connected_components[i]) < thresh_num_faces: + removed.append(i) + for i in removed[::-1]: + connected_components.pop(i) + if thresh_radius is not None: + removed = [] + for i in range(1, len(connected_components)): + comp_vertices = vertices[faces[connected_components[i]].flatten().unique()] + comp_center = comp_vertices.mean(dim=0) + comp_radius = (comp_vertices - comp_center).norm(p=2, dim=-1).max() + if comp_radius < thresh_radius: + removed.append(i) + for i in removed[::-1]: + connected_components.pop(i) + if thresh_boundary_ratio is not None: + removed = [] + for i in range(1, len(connected_components)): + edges = torch.cat([faces[connected_components[i]][:, [0, 1]], faces[connected_components[i]][:, [1, 2]], faces[connected_components[i]][:, [2, 0]]], dim=0) + edges = torch.sort(edges, dim=1).values + edges, counts = torch.unique(edges, return_counts=True, dim=0) + num_boundary_edges = (counts == 1).sum().item() + num_faces = len(connected_components[i]) + if num_boundary_edges / num_faces > thresh_boundary_ratio: + removed.append(i) + for i in removed[::-1]: + connected_components.pop(i) + + # post-process + faces = torch.cat([faces[connected_components[i]] for i in range(len(connected_components))], dim=0) + if remove_unreferenced: + faces, vertices = remove_unreferenced_vertices(faces, vertices) + return vertices, faces + + +def subdivide_mesh_simple(vertices: torch.Tensor, faces: torch.Tensor, n: int = 1) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Subdivide a triangular mesh by splitting each triangle into 4 smaller triangles. + NOTE: All original vertices are kept, and new vertices are appended to the end of the vertex list. + + Args: + vertices (torch.Tensor): [N, 3] 3-dimensional vertices + faces (torch.Tensor): [T, 3] triangular face indices + n (int, optional): number of subdivisions. Defaults to 1. + + Returns: + vertices (torch.Tensor): [N_, 3] subdivided 3-dimensional vertices + faces (torch.Tensor): [4 * T, 3] subdivided triangular face indices + """ + for _ in range(n): + edges = torch.stack([faces[:, [0, 1]], faces[:, [1, 2]], faces[:, [2, 0]]], dim=0) + edges = torch.sort(edges, dim=2) + uni_edges, uni_inv = torch.unique(edges, return_inverse=True, dim=0) + midpoints = (vertices[uni_edges[:, 0]] + vertices[uni_edges[:, 1]]) / 2 + + n_vertices = vertices.shape[0] + vertices = torch.cat([vertices, midpoints], dim=0) + faces = torch.cat([ + torch.stack([faces[:, 0], n_vertices + uni_inv[0], n_vertices + uni_inv[2]], axis=1), + torch.stack([faces[:, 1], n_vertices + uni_inv[1], n_vertices + uni_inv[0]], axis=1), + torch.stack([faces[:, 2], n_vertices + uni_inv[2], n_vertices + uni_inv[1]], axis=1), + torch.stack([n_vertices + uni_inv[0], n_vertices + uni_inv[1], n_vertices + uni_inv[2]], axis=1), + ], dim=0) + return vertices, faces + + +def compute_face_tbn(pos: torch.Tensor, faces_pos: torch.Tensor, uv: torch.Tensor, faces_uv: torch.Tensor, eps: float = 1e-7) -> torch.Tensor: + """compute TBN matrix for each face + + Args: + pos (torch.Tensor): shape (..., N_pos, 3), positions + faces_pos (torch.Tensor): shape(T, 3) + uv (torch.Tensor): shape (..., N_uv, 3) uv coordinates, + faces_uv (torch.Tensor): shape(T, 3) + + Returns: + torch.Tensor: (..., T, 3, 3) TBN matrix for each face. Note TBN vectors are normalized but not necessarily orthognal + """ + e01 = torch.index_select(pos, dim=-2, index=faces_pos[:, 1]) - torch.index_select(pos, dim=-2, index=faces_pos[:, 0]) + e02 = torch.index_select(pos, dim=-2, index=faces_pos[:, 2]) - torch.index_select(pos, dim=-2, index=faces_pos[:, 0]) + uv01 = torch.index_select(uv, dim=-2, index=faces_uv[:, 1]) - torch.index_select(uv, dim=-2, index=faces_uv[:, 0]) + uv02 = torch.index_select(uv, dim=-2, index=faces_uv[:, 2]) - torch.index_select(uv, dim=-2, index=faces_uv[:, 0]) + normal = torch.cross(e01, e02) + tangent_bitangent = torch.stack([e01, e02], dim=-1) @ torch.inverse(torch.stack([uv01, uv02], dim=-1)) + tbn = torch.cat([tangent_bitangent, normal.unsqueeze(-1)], dim=-1) + tbn = tbn / (torch.norm(tbn, p=2, dim=-2, keepdim=True) + eps) + return tbn + + +def compute_vertex_tbn(faces_topo: torch.Tensor, pos: torch.Tensor, faces_pos: torch.Tensor, uv: torch.Tensor, faces_uv: torch.Tensor) -> torch.Tensor: + """compute TBN matrix for each face + + Args: + faces_topo (torch.Tensor): (T, 3), face indice of topology + pos (torch.Tensor): shape (..., N_pos, 3), positions + faces_pos (torch.Tensor): shape(T, 3) + uv (torch.Tensor): shape (..., N_uv, 3) uv coordinates, + faces_uv (torch.Tensor): shape(T, 3) + + Returns: + torch.Tensor: (..., V, 3, 3) TBN matrix for each face. Note TBN vectors are normalized but not necessarily orthognal + """ + n_vertices = faces_topo.max().item() + 1 + n_tri = faces_topo.shape[-2] + batch_shape = pos.shape[:-2] + face_tbn = compute_face_tbn(pos, faces_pos, uv, faces_uv) # (..., T, 3, 3) + face_tbn = face_tbn[..., :, None, :, :].repeat(*[1] * len(batch_shape), 1, 3, 1, 1).view(*batch_shape, n_tri * 3, 3, 3) # (..., T * 3, 3, 3) + vertex_tbn = torch.index_add(torch.zeros(*batch_shape, n_vertices, 3, 3).to(face_tbn), dim=-3, index=faces_topo.view(-1), source=face_tbn) + vertex_tbn = vertex_tbn / (torch.norm(vertex_tbn, p=2, dim=-2, keepdim=True) + 1e-7) + return vertex_tbn + + +def laplacian(vertices: torch.Tensor, faces: torch.Tensor, weight: str = 'uniform') -> torch.Tensor: + """Laplacian smooth with cotangent weights + + Args: + vertices (torch.Tensor): shape (..., N, 3) + faces (torch.Tensor): shape (T, 3) + weight (str): 'uniform' or 'cotangent' + """ + sum_verts = torch.zeros_like(vertices) # (..., N, 3) + sum_weights = torch.zeros(*vertices.shape[:-1]).to(vertices) # (..., N) + face_verts = torch.index_select(vertices, -2, faces.view(-1)).view(*vertices.shape[:-2], *faces.shape, vertices.shape[-1]) # (..., T, 3) + if weight == 'cotangent': + for i in range(3): + e1 = face_verts[..., (i + 1) % 3, :] - face_verts[..., i, :] + e2 = face_verts[..., (i + 2) % 3, :] - face_verts[..., i, :] + cot_angle = (e1 * e2).sum(dim=-1) / torch.cross(e1, e2, dim=-1).norm(p=2, dim=-1) # (..., T, 3) + sum_verts = torch.index_add(sum_verts, -2, faces[:, (i + 1) % 3], face_verts[..., (i + 2) % 3, :] * cot_angle[..., None]) + sum_weights = torch.index_add(sum_weights, -1, faces[:, (i + 1) % 3], cot_angle) + sum_verts = torch.index_add(sum_verts, -2, faces[:, (i + 2) % 3], face_verts[..., (i + 1) % 3, :] * cot_angle[..., None]) + sum_weights = torch.index_add(sum_weights, -1, faces[:, (i + 2) % 3], cot_angle) + elif weight == 'uniform': + for i in range(3): + sum_verts = torch.index_add(sum_verts, -2, faces[:, i], face_verts[..., (i + 1) % 3, :]) + sum_weights = torch.index_add(sum_weights, -1, faces[:, i], torch.ones_like(face_verts[..., i, 0])) + else: + raise NotImplementedError + return sum_verts / (sum_weights[..., None] + 1e-7) + + +def laplacian_smooth_mesh(vertices: torch.Tensor, faces: torch.Tensor, weight: str = 'uniform', times: int = 5) -> torch.Tensor: + """Laplacian smooth with cotangent weights + + Args: + vertices (torch.Tensor): shape (..., N, 3) + faces (torch.Tensor): shape (T, 3) + weight (str): 'uniform' or 'cotangent' + """ + for _ in range(times): + vertices = laplacian(vertices, faces, weight) + return vertices + + +def taubin_smooth_mesh(vertices: torch.Tensor, faces: torch.Tensor, lambda_: float = 0.5, mu_: float = -0.51) -> torch.Tensor: + """Taubin smooth mesh + + Args: + vertices (torch.Tensor): _description_ + faces (torch.Tensor): _description_ + lambda_ (float, optional): _description_. Defaults to 0.5. + mu_ (float, optional): _description_. Defaults to -0.51. + + Returns: + torch.Tensor: _description_ + """ + pt = vertices + lambda_ * laplacian_smooth_mesh(vertices, faces) + p = pt + mu_ * laplacian_smooth_mesh(pt, faces) + return p + + +def laplacian_hc_smooth_mesh(vertices: torch.Tensor, faces: torch.Tensor, times: int = 5, alpha: float = 0.5, beta: float = 0.5, weight: str = 'uniform'): + """HC algorithm from Improved Laplacian Smoothing of Noisy Surface Meshes by J.Vollmer et al. + """ + p = vertices + for i in range(times): + q = p + p = laplacian_smooth_mesh(vertices, faces, weight) + b = p - (alpha * vertices + (1 - alpha) * q) + p = p - (beta * b + (1 - beta) * laplacian_smooth_mesh(b, faces, weight)) * 0.8 + return p diff --git a/utils3d/utils3d/torch/nerf.py b/utils3d/utils3d/torch/nerf.py new file mode 100644 index 0000000000000000000000000000000000000000..1eeb6fa25ec4a547ab33abb3b3c049ee61d36176 --- /dev/null +++ b/utils3d/utils3d/torch/nerf.py @@ -0,0 +1,749 @@ +from typing import * +from numbers import Number +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from .utils import image_uv + + +__all__ = [ + 'get_rays', + 'get_image_rays', + 'get_mipnerf_cones', + 'volume_rendering', + 'bin_sample', + 'importance_sample', + 'nerf_render_rays', + 'mipnerf_render_rays', + 'nerf_render_view', + 'mipnerf_render_view', + 'InstantNGP', +] + + +def get_rays(extrinsics: Tensor, intrinsics: Tensor, uv: Tensor) -> Tuple[Tensor, Tensor]: + """ + Args: + extrinsics: (..., 4, 4) extrinsics matrices. + intrinsics: (..., 3, 3) intrinsics matrices. + uv: (..., n_rays, 2) uv coordinates of the rays. + + Returns: + rays_o: (..., 1, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + NOTE: ray directions are NOT normalized. They actuallys makes rays_o + rays_d * z = world coordinates, where z is the depth. + """ + uvz = torch.cat([uv, torch.ones_like(uv[..., :1])], dim=-1).to(extrinsics) # (n_batch, n_views, n_rays, 3) + + with torch.cuda.amp.autocast(enabled=False): + inv_transformation = (intrinsics @ extrinsics[..., :3, :3]).inverse() + inv_extrinsics = extrinsics.inverse() + rays_d = uvz @ inv_transformation.transpose(-1, -2) + rays_o = inv_extrinsics[..., None, :3, 3] # (n_batch, n_views, 1, 3) + return rays_o, rays_d + + +def get_image_rays(extrinsics: Tensor, intrinsics: Tensor, width: int, height: int) -> Tuple[Tensor, Tensor]: + """ + Args: + extrinsics: (..., 4, 4) extrinsics matrices. + intrinsics: (..., 3, 3) intrinsics matrices. + width: width of the image. + height: height of the image. + + Returns: + rays_o: (..., 1, 1, 3) ray origins + rays_d: (..., height, width, 3) ray directions. + NOTE: ray directions are NOT normalized. They actuallys makes rays_o + rays_d * z = world coordinates, where z is the depth. + """ + uv = image_uv(height, width).to(extrinsics).flatten(0, 1) + rays_o, rays_d = get_rays(extrinsics, intrinsics, uv) + rays_o = rays_o.unflatten(-2, (1, 1)) + rays_d = rays_d.unflatten(-2, (height, width)) + return rays_o, rays_d + + +def get_mipnerf_cones(rays_o: Tensor, rays_d: Tensor, z_vals: Tensor, pixel_width: Tensor) -> Tuple[Tensor, Tensor]: + """ + Args: + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + z_vals: (..., n_rays, n_samples) z values. + pixel_width: (...) pixel width. = 1 / (normalized focal length * width) + + Returns: + mu: (..., n_rays, n_samples, 3) cone mu. + sigma: (..., n_rays, n_samples, 3, 3) cone sigma. + """ + t_mu = (z_vals[..., 1:] + z_vals[..., :-1]).mul_(0.5) + t_delta = (z_vals[..., 1:] - z_vals[..., :-1]).mul_(0.5) + t_mu_square = t_mu.square() + t_delta_square = t_delta.square() + t_delta_quad = t_delta_square.square() + mu_t = t_mu + 2.0 * t_mu * t_delta_square / (3.0 * t_mu_square + t_delta_square) + sigma_t = t_delta_square / 3.0 - (4.0 / 15.0) * t_delta_quad / (3.0 * t_mu_square + t_delta_square).square() * (12.0 * t_mu_square - t_delta_square) + sigma_r = (pixel_width[..., None, None].square() / 3.0) * (t_mu_square / 4.0 + (5.0 / 12.0) * t_delta_square - (4.0 / 15.0) * t_delta_quad / (3.0 * t_mu_square + t_delta_square)) + points_mu = rays_o[:, :, :, None, :] + rays_d[:, :, :, None, :] * mu_t[..., None] + d_dt = rays_d[..., :, None] * rays_d[..., None, :] # (..., n_rays, 3, 3) + points_sigma = sigma_t[..., None, None] * d_dt[..., None, :, :] + sigma_r[..., None, None] * (torch.eye(3).to(rays_o) - d_dt[..., None, :, :]) + return points_mu, points_sigma + + +def get_pixel_width(intrinsics: Tensor, width: int, height: int) -> Tensor: + """ + Args: + intrinsics: (..., 3, 3) intrinsics matrices. + width: width of the image. + height: height of the image. + + Returns: + pixel_width: (...) pixel width. = 1 / (normalized focal length * width) + """ + assert width == height, "Currently, only square images are supported." + pixel_width = torch.reciprocal((intrinsics[..., 0, 0] * intrinsics[..., 1, 1]).sqrt() * width) + return pixel_width + + +def volume_rendering(color: Tensor, sigma: Tensor, z_vals: Tensor, ray_length: Tensor, rgb: bool = True, depth: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + """ + Given color, sigma and z_vals (linear depth of the sampling points), render the volume. + + NOTE: By default, color and sigma should have one less sample than z_vals, in correspondence with the average value in intervals. + If queried color are aligned with z_vals, we use trapezoidal rule to calculate the average values in intervals. + + Args: + color: (..., n_samples or n_samples - 1, 3) color values. + sigma: (..., n_samples or n_samples - 1) density values. + z_vals: (..., n_samples) z values. + ray_length: (...) length of the ray + + Returns: + rgb: (..., 3) rendered color values. + depth: (...) rendered depth values. + weights (..., n_samples) weights. + """ + dists = (z_vals[..., 1:] - z_vals[..., :-1]) * ray_length[..., None] + if color.shape[-2] == z_vals.shape[-1]: + color = (color[..., 1:, :] + color[..., :-1, :]).mul_(0.5) + sigma = (sigma[..., 1:] + sigma[..., :-1]).mul_(0.5) + sigma_delta = sigma * dists + transparancy = (-torch.cat([torch.zeros_like(sigma_delta[..., :1]), sigma_delta[..., :-1]], dim=-1).cumsum(dim=-1)).exp_() # First cumsum then exp for numerical stability + alpha = 1.0 - (-sigma_delta).exp_() + weights = alpha * transparancy + if rgb: + rgb = torch.sum(weights[..., None] * color, dim=-2) if rgb else None + if depth: + z_vals = (z_vals[..., 1:] + z_vals[..., :-1]).mul_(0.5) + depth = torch.sum(weights * z_vals, dim=-1) / weights.sum(dim=-1).clamp_min_(1e-8) if depth else None + return rgb, depth, weights + + +def neus_volume_rendering(color: Tensor, sdf: Tensor, s: torch.Tensor, z_vals: Tensor = None, rgb: bool = True, depth: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + """ + Given color, sdf values and z_vals (linear depth of the sampling points), do volume rendering. (NeuS) + + Args: + color: (..., n_samples or n_samples - 1, 3) color values. + sdf: (..., n_samples) sdf values. + s: (..., n_samples) S values of S-density function in NeuS. The standard deviation of such S-density distribution is 1 / s. + z_vals: (..., n_samples) z values. + ray_length: (...) length of the ray + + Returns: + rgb: (..., 3) rendered color values. + depth: (...) rendered depth values. + weights (..., n_samples) weights. + """ + + if color.shape[-2] == z_vals.shape[-1]: + color = (color[..., 1:, :] + color[..., :-1, :]).mul_(0.5) + + sigmoid_sdf = torch.sigmoid(s * sdf) + alpha = F.relu(1 - sigmoid_sdf[..., :-1] / sigmoid_sdf[..., :-1]) + transparancy = torch.cumprod(torch.cat([torch.ones_like(alpha[..., :1]), alpha], dim=-1), dim=-1) + weights = alpha * transparancy + + if rgb: + rgb = torch.sum(weights[..., None] * color, dim=-2) if rgb else None + if depth: + z_vals = (z_vals[..., 1:] + z_vals[..., :-1]).mul_(0.5) + depth = torch.sum(weights * z_vals, dim=-1) / weights.sum(dim=-1).clamp_min_(1e-8) if depth else None + return rgb, depth, weights + + +def bin_sample(size: Union[torch.Size, Tuple[int, ...]], n_samples: int, min_value: Number, max_value: Number, spacing: Literal['linear', 'inverse_linear'], dtype: torch.dtype = None, device: torch.device = None) -> Tensor: + """ + Uniformly (or uniformly in inverse space) sample z values in `n_samples` bins in range [min_value, max_value]. + Args: + size: size of the rays + n_samples: number of samples to be sampled, also the number of bins + min_value: minimum value of the range + max_value: maximum value of the range + space: 'linear' or 'inverse_linear'. If 'inverse_linear', the sampling is uniform in inverse space. + + Returns: + z_rand: (*size, n_samples) sampled z values, sorted in ascending order. + """ + if spacing == 'linear': + pass + elif spacing == 'inverse_linear': + min_value = 1.0 / min_value + max_value = 1.0 / max_value + bin_length = (max_value - min_value) / n_samples + z_rand = (torch.rand(*size, n_samples, device=device, dtype=dtype) - 0.5) * bin_length + torch.linspace(min_value + bin_length * 0.5, max_value - bin_length * 0.5, n_samples, device=device, dtype=dtype) + if spacing == 'inverse_linear': + z_rand = 1.0 / z_rand + return z_rand + + +def importance_sample(z_vals: Tensor, weights: Tensor, n_samples: int) -> Tuple[Tensor, Tensor]: + """ + Importance sample z values. + + NOTE: By default, weights should have one less sample than z_vals, in correspondence with the intervals. + If weights has the same number of samples as z_vals, we use trapezoidal rule to calculate the average weights in intervals. + + Args: + z_vals: (..., n_rays, n_input_samples) z values, sorted in ascending order. + weights: (..., n_rays, n_input_samples or n_input_samples - 1) weights. + n_samples: number of output samples for importance sampling. + + Returns: + z_importance: (..., n_rays, n_samples) importance sampled z values, unsorted. + """ + if weights.shape[-1] == z_vals.shape[-1]: + weights = (weights[..., 1:] + weights[..., :-1]).mul_(0.5) + weights = weights / torch.sum(weights, dim=-1, keepdim=True) # (..., n_rays, n_input_samples - 1) + bins_a, bins_b = z_vals[..., :-1], z_vals[..., 1:] + + pdf = weights / torch.sum(weights, dim=-1, keepdim=True) # (..., n_rays, n_input_samples - 1) + cdf = torch.cumsum(pdf, dim=-1) + u = torch.rand(*z_vals.shape[:-1], n_samples, device=z_vals.device, dtype=z_vals.dtype) + + inds = torch.searchsorted(cdf, u, right=True).clamp(0, cdf.shape[-1] - 1) # (..., n_rays, n_samples) + + bins_a = torch.gather(bins_a, dim=-1, index=inds) + bins_b = torch.gather(bins_b, dim=-1, index=inds) + z_importance = bins_a + (bins_b - bins_a) * torch.rand_like(u) + return z_importance + + +def nerf_render_rays( + nerf: Union[Callable[[Tensor, Tensor], Tuple[Tensor, Tensor]], Tuple[Callable[[Tensor], Tuple[Tensor, Tensor]], Callable[[Tensor], Tuple[Tensor, Tensor]]]], + rays_o: Tensor, rays_d: Tensor, + *, + return_dict: bool = False, + n_coarse: int = 64, n_fine: int = 64, + near: float = 0.1, far: float = 100.0, + z_spacing: Literal['linear', 'inverse_linear'] = 'linear', +): + """ + NeRF rendering of rays. Note that it supports arbitrary batch dimensions (denoted as `...`) + + Args: + nerf: nerf model, which takes (points, directions) as input and returns (color, density) as output. + If nerf is a tuple, it should be (nerf_coarse, nerf_fine), where nerf_coarse and nerf_fine are two nerf models for coarse and fine stages respectively. + + nerf args: + points: (..., n_rays, n_samples, 3) + directions: (..., n_rays, n_samples, 3) + nerf returns: + color: (..., n_rays, n_samples, 3) color values. + density: (..., n_rays, n_samples) density values. + + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + pixel_width: (..., n_rays) pixel width. How to compute? pixel_width = 1 / (normalized focal length * width) + + Returns + if return_dict is False, return rendered rgb and depth for short cut. (If there are separate coarse and fine results, return fine results) + rgb: (..., n_rays, 3) rendered color values. + depth: (..., n_rays) rendered depth values. + else, return a dict. If `n_fine == 0` or `nerf` is a single model, the dict only contains coarse results: + ``` + {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + ``` + If there are two models for coarse and fine stages, the dict contains both coarse and fine results: + ``` + { + "coarse": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..}, + "fine": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + } + ``` + """ + if isinstance(nerf, tuple): + nerf_coarse, nerf_fine = nerf + else: + nerf_coarse = nerf_fine = nerf + # 1. Coarse: bin sampling + z_coarse = bin_sample(rays_d.shape[:-1], n_coarse, near, far, device=rays_o.device, dtype=rays_o.dtype, spacing=z_spacing) # (n_batch, n_views, n_rays, n_samples) + points_coarse = rays_o[..., None, :] + rays_d[..., None, :] * z_coarse[..., None] # (n_batch, n_views, n_rays, n_samples, 3) + ray_length = rays_d.norm(dim=-1) + + # Query color and density + color_coarse, density_coarse = nerf_coarse(points_coarse, rays_d[..., None, :].expand_as(points_coarse)) # (n_batch, n_views, n_rays, n_samples, 3), (n_batch, n_views, n_rays, n_samples) + + # Volume rendering + with torch.no_grad(): + rgb_coarse, depth_coarse, weights = volume_rendering(color_coarse, density_coarse, z_coarse, ray_length) # (n_batch, n_views, n_rays, 3), (n_batch, n_views, n_rays, 1), (n_batch, n_views, n_rays, n_samples) + + if n_fine == 0: + if return_dict: + return {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights, 'z_vals': z_coarse, 'color': color_coarse, 'density': density_coarse} + else: + return rgb_coarse, depth_coarse + + # 2. Fine: Importance sampling + if nerf_coarse is nerf_fine: + # If coarse and fine stages share the same model, the points of coarse stage can be reused, + # and we only need to query the importance samples of fine stage. + z_fine = importance_sample(z_coarse, weights, n_fine) + points_fine = rays_o[..., None, :] + rays_d[..., None, :] * z_fine[..., None] + color_fine, density_fine = nerf_fine(points_fine, rays_d[..., None, :].expand_as(points_fine)) + + # Merge & volume rendering + z_vals = torch.cat([z_coarse, z_fine], dim=-1) + color = torch.cat([color_coarse, color_fine], dim=-2) + density = torch.cat([density_coarse, density_fine], dim=-1) + z_vals, sort_inds = torch.sort(z_vals, dim=-1) + color = torch.gather(color, dim=-2, index=sort_inds[..., None].expand_as(color)) + density = torch.gather(density, dim=-1, index=sort_inds) + rgb, depth, weights = volume_rendering(color, density, z_vals, ray_length) + + if return_dict: + return {'rgb': rgb, 'depth': depth, 'weights': weights, 'z_vals': z_vals, 'color': color, 'density': density} + else: + return rgb, depth + else: + # If coarse and fine stages use different models, we need to query the importance samples of both stages. + z_fine = importance_sample(z_coarse, weights, n_fine) + z_vals = torch.cat([z_coarse, z_fine], dim=-1) + points = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., None] + color, density = nerf_fine(points) + rgb, depth, weights = volume_rendering(color, density, z_vals, ray_length) + + if return_dict: + return { + 'coarse': {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights, 'z_vals': z_coarse, 'color': color_coarse, 'density': density_coarse}, + 'fine': {'rgb': rgb, 'depth': depth, 'weights': weights, 'z_vals': z_vals, 'color': color, 'density': density} + } + else: + return rgb, depth + + +def mipnerf_render_rays( + mipnerf: Callable[[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]], + rays_o: Tensor, rays_d: Tensor, pixel_width: Tensor, + *, + return_dict: bool = False, + n_coarse: int = 64, n_fine: int = 64, uniform_ratio: float = 0.4, + near: float = 0.1, far: float = 100.0, + z_spacing: Literal['linear', 'inverse_linear'] = 'linear', +) -> Union[Tuple[Tensor, Tensor], Dict[str, Tensor]]: + """ + MipNeRF rendering. + + Args: + mipnerf: mipnerf model, which takes (points_mu, points_sigma) as input and returns (color, density) as output. + + mipnerf args: + points_mu: (..., n_rays, n_samples, 3) cone mu. + points_sigma: (..., n_rays, n_samples, 3, 3) cone sigma. + directions: (..., n_rays, n_samples, 3) + mipnerf returns: + color: (..., n_rays, n_samples, 3) color values. + density: (..., n_rays, n_samples) density values. + + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + pixel_width: (..., n_rays) pixel width. How to compute? pixel_width = 1 / (normalized focal length * width) + + Returns + if return_dict is False, return rendered results only: (If `n_fine == 0`, return coarse results, otherwise return fine results) + rgb: (..., n_rays, 3) rendered color values. + depth: (..., n_rays) rendered depth values. + else, return a dict. If `n_fine == 0`, the dict only contains coarse results: + ``` + {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + ``` + If n_fine > 0, the dict contains both coarse and fine results : + ``` + { + "coarse": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..}, + "fine": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + } + ``` + """ + # 1. Coarse: bin sampling + z_coarse = bin_sample(rays_d.shape[:-1], n_coarse, near, far, spacing=z_spacing, device=rays_o.device, dtype=rays_o.dtype) + points_mu_coarse, points_sigma_coarse = get_mipnerf_cones(rays_o, rays_d, z_coarse, pixel_width) + ray_length = rays_d.norm(dim=-1) + + # Query color and density + color_coarse, density_coarse = mipnerf(points_mu_coarse, points_sigma_coarse, rays_d[..., None, :].expand_as(points_mu_coarse)) # (n_batch, n_views, n_rays, n_samples, 3), (n_batch, n_views, n_rays, n_samples) + + # Volume rendering + rgb_coarse, depth_coarse, weights_coarse = volume_rendering(color_coarse, density_coarse, z_coarse, ray_length) # (n_batch, n_views, n_rays, 3), (n_batch, n_views, n_rays, 1), (n_batch, n_views, n_rays, n_samples) + + if n_fine == 0: + if return_dict: + return {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights_coarse, 'z_vals': z_coarse, 'color': color_coarse, 'density': density_coarse} + else: + return rgb_coarse, depth_coarse + + # 2. Fine: Importance sampling. (NOTE: coarse stages and fine stages always share the same model, but coarse stage points can not be reused) + with torch.no_grad(): + weights_coarse = (1.0 - uniform_ratio) * weights_coarse + uniform_ratio / weights_coarse.shape[-1] + z_fine = importance_sample(z_coarse, weights_coarse, n_fine) + z_fine, _ = torch.sort(z_fine, dim=-2) + points_mu_fine, points_sigma_fine = get_mipnerf_cones(rays_o, rays_d, z_fine, pixel_width) + color_fine, density_fine = mipnerf(points_mu_fine, points_sigma_fine, rays_d[..., None, :].expand_as(points_mu_fine)) + + # Volume rendering + rgb_fine, depth_fine, weights_fine = volume_rendering(color_fine, density_fine, z_fine, ray_length) + + if return_dict: + return { + 'coarse': {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights_coarse, 'z_vals': z_coarse, 'color': color_coarse, 'density': density_coarse}, + 'fine': {'rgb': rgb_fine, 'depth': depth_fine, 'weights': weights_fine, 'z_vals': z_fine, 'color': color_fine, 'density': density_fine} + } + else: + return rgb_fine, depth_fine + + +def neus_render_rays( + neus: Callable[[Tensor, Tensor], Tuple[Tensor, Tensor]], + s: Union[Number, Tensor], + rays_o: Tensor, rays_d: Tensor, + *, + compute_normal: bool = True, + return_dict: bool = False, + n_coarse: int = 64, n_fine: int = 64, + near: float = 0.1, far: float = 100.0, + z_spacing: Literal['linear', 'inverse_linear'] = 'linear', +): + """ + TODO + NeuS rendering of rays. Note that it supports arbitrary batch dimensions (denoted as `...`) + + Args: + neus: neus model, which takes (points, directions) as input and returns (color, density) as output. + + nerf args: + points: (..., n_rays, n_samples, 3) + directions: (..., n_rays, n_samples, 3) + nerf returns: + color: (..., n_rays, n_samples, 3) color values. + density: (..., n_rays, n_samples) density values. + + rays_o: (..., n_rays, 3) ray origins + rays_d: (..., n_rays, 3) ray directions. + pixel_width: (..., n_rays) pixel width. How to compute? pixel_width = 1 / (normalized focal length * width) + + Returns + if return_dict is False, return rendered results only: (If `n_fine == 0`, return coarse results, otherwise return fine results) + rgb: (..., n_rays, 3) rendered color values. + depth: (..., n_rays) rendered depth values. + else, return a dict. If `n_fine == 0`, the dict only contains coarse results: + ``` + {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'sdf': ..., 'normal': ...} + ``` + If n_fine > 0, the dict contains both coarse and fine results: + ``` + { + "coarse": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..}, + "fine": {'rgb': .., 'depth': .., 'weights': .., 'z_vals': .., 'color': .., 'density': ..} + } + ``` + """ + + # 1. Coarse: bin sampling + z_coarse = bin_sample(rays_d.shape[:-1], n_coarse, near, far, device=rays_o.device, dtype=rays_o.dtype, spacing=z_spacing) # (n_batch, n_views, n_rays, n_samples) + points_coarse = rays_o[..., None, :] + rays_d[..., None, :] * z_coarse[..., None] # (n_batch, n_views, n_rays, n_samples, 3) + + # Query color and density + color_coarse, sdf_coarse = neus(points_coarse, rays_d[..., None, :].expand_as(points_coarse)) # (n_batch, n_views, n_rays, n_samples, 3), (n_batch, n_views, n_rays, n_samples) + + # Volume rendering + with torch.no_grad(): + rgb_coarse, depth_coarse, weights = neus_volume_rendering(color_coarse, sdf_coarse, s, z_coarse) # (n_batch, n_views, n_rays, 3), (n_batch, n_views, n_rays, 1), (n_batch, n_views, n_rays, n_samples) + + if n_fine == 0: + if return_dict: + return {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights, 'z_vals': z_coarse, 'color': color_coarse, 'sdf': sdf_coarse} + else: + return rgb_coarse, depth_coarse + + # If coarse and fine stages share the same model, the points of coarse stage can be reused, + # and we only need to query the importance samples of fine stage. + z_fine = importance_sample(z_coarse, weights, n_fine) + points_fine = rays_o[..., None, :] + rays_d[..., None, :] * z_fine[..., None] + color_fine, sdf_fine = neus(points_fine, rays_d[..., None, :].expand_as(points_fine)) + + # Merge & volume rendering + z_vals = torch.cat([z_coarse, z_fine], dim=-1) + color = torch.cat([color_coarse, color_fine], dim=-2) + sdf = torch.cat([sdf_coarse, sdf_fine], dim=-1) + z_vals, sort_inds = torch.sort(z_vals, dim=-1) + color = torch.gather(color, dim=-2, index=sort_inds[..., None].expand_as(color)) + sdf = torch.gather(sdf, dim=-1, index=sort_inds) + rgb, depth, weights = neus_volume_rendering(color, sdf, s, z_vals) + + if return_dict: + return { + 'coarse': {'rgb': rgb_coarse, 'depth': depth_coarse, 'weights': weights, 'z_vals': z_coarse, 'color': color_coarse, 'sdf': sdf_coarse}, + 'fine': {'rgb': rgb, 'depth': depth, 'weights': weights, 'z_vals': z_vals, 'color': color, 'sdf': sdf} + } + else: + return rgb, depth + + +def nerf_render_view( + nerf: Tensor, + extrinsics: Tensor, + intrinsics: Tensor, + width: int, + height: int, + *, + patchify: bool = False, + patch_size: Tuple[int, int] = (64, 64), + **options: Dict[str, Any] +) -> Tuple[Tensor, Tensor]: + """ + NeRF rendering of views. Note that it supports arbitrary batch dimensions (denoted as `...`) + + Args: + extrinsics: (..., 4, 4) extrinsics matrice of the rendered views + intrinsics (optional): (..., 3, 3) intrinsics matrice of the rendered views. + width (optional): image width of the rendered views. + height (optional): image height of the rendered views. + patchify (optional): If the image is too large, render it patch by patch + **options: rendering options. + + Returns: + rgb: (..., channels, height, width) rendered color values. + depth: (..., height, width) rendered depth values. + """ + if patchify: + # Patchified rendering + max_patch_width, max_patch_height = patch_size + n_rows, n_columns = math.ceil(height / max_patch_height), math.ceil(width / max_patch_width) + + rgb_rows, depth_rows = [], [] + for i_row in range(n_rows): + rgb_row, depth_row = [], [] + for i_column in range(n_columns): + patch_shape = patch_height, patch_width = min(max_patch_height, height - i_row * max_patch_height), min(max_patch_width, width - i_column * max_patch_width) + uv = image_uv(height, width, i_column * max_patch_width, i_row * max_patch_height, i_column * max_patch_width + patch_width, i_row * max_patch_height + patch_height).to(extrinsics) + uv = uv.flatten(0, 1) # (patch_height * patch_width, 2) + ray_o_, ray_d_ = get_rays(extrinsics, intrinsics, uv) + rgb_, depth_ = nerf_render_rays(nerf, ray_o_, ray_d_, **options, return_dict=False) + rgb_ = rgb_.transpose(-1, -2).unflatten(-1, patch_shape) # (..., 3, patch_height, patch_width) + depth_ = depth_.unflatten(-1, patch_shape) # (..., patch_height, patch_width) + + rgb_row.append(rgb_) + depth_row.append(depth_) + rgb_rows.append(torch.cat(rgb_row, dim=-1)) + depth_rows.append(torch.cat(depth_row, dim=-1)) + rgb = torch.cat(rgb_rows, dim=-2) + depth = torch.cat(depth_rows, dim=-2) + + return rgb, depth + else: + # Full rendering + uv = image_uv(height, width).to(extrinsics) + uv = uv.flatten(0, 1) # (height * width, 2) + ray_o_, ray_d_ = get_rays(extrinsics, intrinsics, uv) + rgb, depth = nerf_render_rays(nerf, ray_o_, ray_d_, **options, return_dict=False) + rgb = rgb.transpose(-1, -2).unflatten(-1, (height, width)) # (..., 3, height, width) + depth = depth.unflatten(-1, (height, width)) # (..., height, width) + + return rgb, depth + + +def mipnerf_render_view( + mipnerf: Tensor, + extrinsics: Tensor, + intrinsics: Tensor, + width: int, + height: int, + *, + patchify: bool = False, + patch_size: Tuple[int, int] = (64, 64), + **options: Dict[str, Any] +) -> Tuple[Tensor, Tensor]: + """ + MipNeRF rendering of views. Note that it supports arbitrary batch dimensions (denoted as `...`) + + Args: + extrinsics: (..., 4, 4) extrinsics matrice of the rendered views + intrinsics (optional): (..., 3, 3) intrinsics matrice of the rendered views. + width (optional): image width of the rendered views. + height (optional): image height of the rendered views. + patchify (optional): If the image is too large, render it patch by patch + **options: rendering options. + + Returns: + rgb: (..., 3, height, width) rendered color values. + depth: (..., height, width) rendered depth values. + """ + pixel_width = get_pixel_width(intrinsics, width, height) + + if patchify: + # Patchified rendering + max_patch_width, max_patch_height = patch_size + n_rows, n_columns = math.ceil(height / max_patch_height), math.ceil(width / max_patch_width) + + rgb_rows, depth_rows = [], [] + for i_row in range(n_rows): + rgb_row, depth_row = [], [] + for i_column in range(n_columns): + patch_shape = patch_height, patch_width = min(max_patch_height, height - i_row * max_patch_height), min(max_patch_width, width - i_column * max_patch_width) + uv = image_uv(height, width, i_column * max_patch_width, i_row * max_patch_height, i_column * max_patch_width + patch_width, i_row * max_patch_height + patch_height).to(extrinsics) + uv = uv.flatten(0, 1) # (patch_height * patch_width, 2) + ray_o_, ray_d_ = get_rays(extrinsics, intrinsics, uv) + rgb_, depth_ = mipnerf_render_rays(mipnerf, ray_o_, ray_d_, pixel_width, **options) + rgb_ = rgb_.transpose(-1, -2).unflatten(-1, patch_shape) # (..., 3, patch_height, patch_width) + depth_ = depth_.unflatten(-1, patch_shape) # (..., patch_height, patch_width) + + rgb_row.append(rgb_) + depth_row.append(depth_) + rgb_rows.append(torch.cat(rgb_row, dim=-1)) + depth_rows.append(torch.cat(depth_row, dim=-1)) + rgb = torch.cat(rgb_rows, dim=-2) + depth = torch.cat(depth_rows, dim=-2) + + return rgb, depth + else: + # Full rendering + uv = image_uv(height, width).to(extrinsics) + uv = uv.flatten(0, 1) # (height * width, 2) + ray_o_, ray_d_ = get_rays(extrinsics, intrinsics, uv) + rgb, depth = mipnerf_render_rays(mipnerf, ray_o_, ray_d_, pixel_width, **options) + rgb = rgb.transpose(-1, -2).unflatten(-1, (height, width)) # (..., 3, height, width) + depth = depth.unflatten(-1, (height, width)) # (..., height, width) + + return rgb, depth + + +class InstantNGP(nn.Module): + """ + An implementation of InstantNGP, Müller et. al., https://nvlabs.github.io/instant-ngp/. + Requires `tinycudann` package. + Install it by: + ``` + pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch + ``` + """ + def __init__(self, + view_dependent: bool = True, + base_resolution: int = 16, + finest_resolution: int = 2048, + n_levels: int = 16, + num_layers_density: int = 2, + hidden_dim_density: int = 64, + num_layers_color: int = 3, + hidden_dim_color: int = 64, + log2_hashmap_size: int = 19, + bound: float = 1.0, + color_channels: int = 3, + ): + super().__init__() + import tinycudann + N_FEATURES_PER_LEVEL = 2 + GEO_FEAT_DIM = 15 + + self.bound = bound + self.color_channels = color_channels + + # density network + self.num_layers_density = num_layers_density + self.hidden_dim_density = hidden_dim_density + + per_level_scale = (finest_resolution / base_resolution) ** (1 / (n_levels - 1)) + + self.encoder = tinycudann.Encoding( + n_input_dims=3, + encoding_config={ + "otype": "HashGrid", + "n_levels": n_levels, + "n_features_per_level": N_FEATURES_PER_LEVEL, + "log2_hashmap_size": log2_hashmap_size, + "base_resolution": base_resolution, + "per_level_scale": per_level_scale, + }, + ) + + self.density_net = tinycudann.Network( + n_input_dims=N_FEATURES_PER_LEVEL * n_levels, + n_output_dims=1 + GEO_FEAT_DIM, + network_config={ + "otype": "FullyFusedMLP", + "activation": "ReLU", + "output_activation": "None", + "n_neurons": hidden_dim_density, + "n_hidden_layers": num_layers_density - 1, + }, + ) + + # color network + self.num_layers_color = num_layers_color + self.hidden_dim_color = hidden_dim_color + + self.view_dependent = view_dependent + if view_dependent: + self.encoder_dir = tinycudann.Encoding( + n_input_dims=3, + encoding_config={ + "otype": "SphericalHarmonics", + "degree": 4, + }, + ) + self.in_dim_color = self.encoder_dir.n_output_dims + GEO_FEAT_DIM + else: + self.in_dim_color = GEO_FEAT_DIM + + self.color_net = tinycudann.Network( + n_input_dims=self.in_dim_color, + n_output_dims=color_channels, + network_config={ + "otype": "FullyFusedMLP", + "activation": "ReLU", + "output_activation": "None", + "n_neurons": hidden_dim_color, + "n_hidden_layers": num_layers_color - 1, + }, + ) + + def forward(self, x: torch.Tensor, d: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: (..., 3) points + d: (..., 3) directions + Returns: + color: (..., 3) color values. + density: (..., 1) density values. + """ + batch_shape = x.shape[:-1] + x, d = x.reshape(-1, 3), d.reshape(-1, 3) + + # density + x = (x + self.bound) / (2 * self.bound) # to [0, 1] + x = self.encoder(x) + density, geo_feat = self.density_net(x).split([1, 15], dim=-1) + density = F.softplus(density).squeeze(-1) + + # color + if self.view_dependent: + d = (F.normalize(d, dim=-1) + 1) / 2 # tcnn SH encoding requires inputs to be in [0, 1] + d = self.encoder_dir(d) + h = torch.cat([d, geo_feat], dim=-1) + else: + h = geo_feat + color = self.color_net(h) + + return color.reshape(*batch_shape, self.color_channels), density.reshape(*batch_shape) + diff --git a/utils3d/utils3d/torch/rasterization.py b/utils3d/utils3d/torch/rasterization.py new file mode 100644 index 0000000000000000000000000000000000000000..446e05dee502ce1f837e2da1264314892fcfbfaa --- /dev/null +++ b/utils3d/utils3d/torch/rasterization.py @@ -0,0 +1,574 @@ +from typing import * + +import torch +import nvdiffrast.torch as dr + +from . import utils, transforms, mesh +from ._helpers import batched + + +__all__ = [ + 'RastContext', + 'rasterize_triangle_faces', + 'rasterize_triangle_faces_depth_peeling', + 'texture', + 'texture_composite', + 'warp_image_by_depth', + 'warp_image_by_forward_flow', +] + + +class RastContext: + """ + Create a rasterization context. Nothing but a wrapper of nvdiffrast.torch.RasterizeCudaContext or nvdiffrast.torch.RasterizeGLContext. + """ + def __init__(self, nvd_ctx: Union[dr.RasterizeCudaContext, dr.RasterizeGLContext] = None, *, backend: Literal['cuda', 'gl'] = 'gl', device: Union[str, torch.device] = None): + if nvd_ctx is not None: + self.nvd_ctx = nvd_ctx + return + + if backend == 'gl': + self.nvd_ctx = dr.RasterizeGLContext(device=device) + elif backend == 'cuda': + self.nvd_ctx = dr.RasterizeCudaContext(device=device) + else: + raise ValueError(f'Unknown backend: {backend}') + + +def rasterize_triangle_faces( + ctx: RastContext, + vertices: torch.Tensor, + faces: torch.Tensor, + width: int, + height: int, + attr: torch.Tensor = None, + uv: torch.Tensor = None, + texture: torch.Tensor = None, + model: torch.Tensor = None, + view: torch.Tensor = None, + projection: torch.Tensor = None, + antialiasing: Union[bool, List[int]] = True, + diff_attrs: Union[None, List[int]] = None, +) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + """ + Rasterize a mesh with vertex attributes. + + Args: + ctx (GLContext): rasterizer context + vertices (np.ndarray): (B, N, 2 or 3 or 4) + faces (torch.Tensor): (T, 3) + width (int): width of the output image + height (int): height of the output image + attr (torch.Tensor, optional): (B, N, C) vertex attributes. Defaults to None. + uv (torch.Tensor, optional): (B, N, 2) uv coordinates. Defaults to None. + texture (torch.Tensor, optional): (B, C, H, W) texture. Defaults to None. + model (torch.Tensor, optional): ([B,] 4, 4) model matrix. Defaults to None (identity). + view (torch.Tensor, optional): ([B,] 4, 4) view matrix. Defaults to None (identity). + projection (torch.Tensor, optional): ([B,] 4, 4) projection matrix. Defaults to None (identity). + antialiasing (Union[bool, List[int]], optional): whether to perform antialiasing. Defaults to True. If a list of indices is provided, only those channels will be antialiased. + diff_attrs (Union[None, List[int]], optional): indices of attributes to compute screen-space derivatives. Defaults to None. + + Returns: + Dictionary containing: + - image: (torch.Tensor): (B, C, H, W) + - depth: (torch.Tensor): (B, H, W) screen space depth, ranging from 0 (near) to 1. (far) + NOTE: Empty pixels will have depth 1., i.e. far plane. + - mask: (torch.BoolTensor): (B, H, W) mask of valid pixels + - image_dr: (torch.Tensor): (B, *, H, W) screen space derivatives of the attributes + - face_id: (torch.Tensor): (B, H, W) face ids + - uv: (torch.Tensor): (B, H, W, 2) uv coordinates (if uv is not None) + - uv_dr: (torch.Tensor): (B, H, W, 4) uv derivatives (if uv is not None) + - texture: (torch.Tensor): (B, C, H, W) texture (if uv and texture are not None) + """ + assert vertices.ndim == 3 + assert faces.ndim == 2 + + if vertices.shape[-1] == 2: + vertices = torch.cat([vertices, torch.zeros_like(vertices[..., :1]), torch.ones_like(vertices[..., :1])], dim=-1) + elif vertices.shape[-1] == 3: + vertices = torch.cat([vertices, torch.ones_like(vertices[..., :1])], dim=-1) + elif vertices.shape[-1] == 4: + pass + else: + raise ValueError(f'Wrong shape of vertices: {vertices.shape}') + + mvp = projection if projection is not None else torch.eye(4).to(vertices) + if view is not None: + mvp = mvp @ view + if model is not None: + mvp = mvp @ model + + pos_clip = vertices @ mvp.transpose(-1, -2) + faces = faces.contiguous() + if attr is not None: + attr = attr.contiguous() + + rast_out, rast_db = dr.rasterize(ctx.nvd_ctx, pos_clip, faces, resolution=[height, width], grad_db=True) + face_id = rast_out[..., 3].flip(1) + depth = rast_out[..., 2].flip(1) + mask = (face_id > 0).float() + depth = (depth * 0.5 + 0.5) * mask + (1.0 - mask) + + ret = { + 'depth': depth, + 'mask': mask, + 'face_id': face_id, + } + + if attr is not None: + image, image_dr = dr.interpolate(attr, rast_out, faces, rast_db, diff_attrs=diff_attrs) + if antialiasing == True: + image = dr.antialias(image, rast_out, pos_clip, faces) + elif isinstance(antialiasing, list): + aa_image = dr.antialias(image[..., antialiasing], rast_out, pos_clip, faces) + image[..., antialiasing] = aa_image + image = image.flip(1).permute(0, 3, 1, 2) + ret['image'] = image + + if uv is not None: + uv_map, uv_map_dr = dr.interpolate(uv, rast_out, faces, rast_db, diff_attrs='all') + ret['uv'] = uv_map + ret['uv_dr'] = uv_map_dr + if texture is not None: + texture = texture.flip(1).permute(0, 2, 3, 1) + texture_map = dr.texture(texture, uv_map, uv_map_dr) + ret['texture'] = texture_map.flip(1).permute(0, 3, 1, 2) + + if diff_attrs is not None: + image_dr = image_dr.flip(1).permute(0, 3, 1, 2) + ret['image_dr'] = image_dr + + return ret + + +def rasterize_triangle_faces_depth_peeling( + ctx: RastContext, + vertices: torch.Tensor, + faces: torch.Tensor, + width: int, + height: int, + max_layers: int, + attr: torch.Tensor = None, + uv: torch.Tensor = None, + texture: torch.Tensor = None, + model: torch.Tensor = None, + view: torch.Tensor = None, + projection: torch.Tensor = None, + antialiasing: Union[bool, List[int]] = True, + diff_attrs: Union[None, List[int]] = None, +) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + """ + Rasterize a mesh with vertex attributes using depth peeling. + + Args: + ctx (GLContext): rasterizer context + vertices (np.ndarray): (B, N, 2 or 3 or 4) + faces (torch.Tensor): (T, 3) + width (int): width of the output image + height (int): height of the output image + max_layers (int): maximum number of layers + NOTE: if the number of layers is less than max_layers, the output will contain less than max_layers images. + attr (torch.Tensor, optional): (B, N, C) vertex attributes. Defaults to None. + uv (torch.Tensor, optional): (B, N, 2) uv coordinates. Defaults to None. + texture (torch.Tensor, optional): (B, C, H, W) texture. Defaults to None. + model (torch.Tensor, optional): ([B,] 4, 4) model matrix. Defaults to None (identity). + view (torch.Tensor, optional): ([B,] 4, 4) view matrix. Defaults to None (identity). + projection (torch.Tensor, optional): ([B,] 4, 4) projection matrix. Defaults to None (identity). + antialiasing (Union[bool, List[int]], optional): whether to perform antialiasing. Defaults to True. If a list of indices is provided, only those channels will be antialiased. + diff_attrs (Union[None, List[int]], optional): indices of attributes to compute screen-space derivatives. Defaults to None. + + Returns: + Dictionary containing: + - image: (List[torch.Tensor]): list of (B, C, H, W) rendered images + - depth: (List[torch.Tensor]): list of (B, H, W) screen space depth, ranging from 0 (near) to 1. (far) + NOTE: Empty pixels will have depth 1., i.e. far plane. + - mask: (List[torch.BoolTensor]): list of (B, H, W) mask of valid pixels + - image_dr: (List[torch.Tensor]): list of (B, *, H, W) screen space derivatives of the attributes + - face_id: (List[torch.Tensor]): list of (B, H, W) face ids + - uv: (List[torch.Tensor]): list of (B, H, W, 2) uv coordinates (if uv is not None) + - uv_dr: (List[torch.Tensor]): list of (B, H, W, 4) uv derivatives (if uv is not None) + - texture: (List[torch.Tensor]): list of (B, C, H, W) texture (if uv and texture are not None) + """ + assert vertices.ndim == 3 + assert faces.ndim == 2 + + if vertices.shape[-1] == 2: + vertices = torch.cat([vertices, torch.zeros_like(vertices[..., :1]), torch.ones_like(vertices[..., :1])], dim=-1) + elif vertices.shape[-1] == 3: + vertices = torch.cat([vertices, torch.ones_like(vertices[..., :1])], dim=-1) + elif vertices.shape[-1] == 4: + pass + else: + raise ValueError(f'Wrong shape of vertices: {vertices.shape}') + + mvp = projection if projection is not None else torch.eye(4).to(vertices) + if view is not None: + mvp = mvp @ view + if model is not None: + mvp = mvp @ model + + pos_clip = vertices @ mvp.transpose(-1, -2) + faces = faces.contiguous() + if attr is not None: + attr = attr.contiguous() + + ret = { + 'depth': [], + 'mask': [], + 'face_id': [], + } + with dr.DepthPeeler(ctx.nvd_ctx, pos_clip, faces, resolution=[height, width]) as peeler: + for i in range(max_layers): + rast_out, rast_db = peeler.rasterize_next_layer() + face_id = rast_out[..., 3].flip(1) + depth = rast_out[..., 2].flip(1) + mask = (face_id > 0).float() + depth = (depth * 0.5 + 0.5) * mask + (1.0 - mask) + + if torch.all(mask == 0): + break + + ret['depth'].append(depth) + ret['mask'].append(mask) + ret['face_id'].append(face_id) + + if attr is not None: + image, image_dr = dr.interpolate(attr, rast_out, faces, rast_db, diff_attrs=diff_attrs) + if antialiasing == True: + image = dr.antialias(image, rast_out, pos_clip, faces) + elif isinstance(antialiasing, list): + aa_image = dr.antialias(image[..., antialiasing], rast_out, pos_clip, faces) + image[..., antialiasing] = aa_image + image = image.flip(1).permute(0, 3, 1, 2) + if 'image' not in ret: + ret['image'] = [] + ret['image'].append(image) + + if uv is not None: + uv_map, uv_map_dr = dr.interpolate(uv, rast_out, faces, rast_db, diff_attrs='all') + if 'uv' not in ret: + ret['uv'] = [] + ret['uv_dr'] = [] + ret['uv'].append(uv_map) + ret['uv_dr'].append(uv_map_dr) + if texture is not None: + texture = texture.flip(1).permute(0, 2, 3, 1) + texture_map = dr.texture(texture, uv_map, uv_map_dr) + if 'texture' not in ret: + ret['texture'] = [] + ret['texture'].append(texture_map.flip(1).permute(0, 3, 1, 2)) + + if diff_attrs is not None: + image_dr = image_dr.flip(1).permute(0, 3, 1, 2) + if 'image_dr' not in ret: + ret['image_dr'] = [] + ret['image_dr'].append(image_dr) + + return ret + + +def texture( + texture: torch.Tensor, + uv: torch.Tensor, + uv_da: torch.Tensor, +) -> torch.Tensor: + """ + Interpolate texture using uv coordinates. + + Args: + texture (torch.Tensor): (B, C, H, W) texture + uv (torch.Tensor): (B, H, W, 2) uv coordinates + uv_da (torch.Tensor): (B, H, W, 4) uv derivatives + + Returns: + torch.Tensor: (B, C, H, W) interpolated texture + """ + texture = texture.flip(2).permute(0, 2, 3, 1).contiguous() + return dr.texture(texture, uv, uv_da).flip(1).permute(0, 3, 1, 2) + + +def texture_composite( + texture: torch.Tensor, + uv: List[torch.Tensor], + uv_da: List[torch.Tensor], + background: torch.Tensor = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Composite textures with depth peeling output. + + Args: + texture (torch.Tensor): (B, C+1, H, W) texture + NOTE: the last channel is alpha channel + uv (List[torch.Tensor]): list of (B, H, W, 2) uv coordinates + uv_da (List[torch.Tensor]): list of (B, H, W, 4) uv derivatives + background (Optional[torch.Tensor], optional): (B, C, H, W) background image. Defaults to None (black). + + Returns: + image: (torch.Tensor): (B, C, H, W) rendered image + alpha: (torch.Tensor): (B, H, W) alpha channel + """ + assert len(uv) == len(uv_da) + if background is not None: + assert texture.shape[1] == background.shape[1] + 1 + + C = texture.shape[1] - 1 + B, H, W = uv[0].shape[:3] + texture = texture.flip(2).permute(0, 2, 3, 1).contiguous() + alpha = torch.zeros(B, H, W, device=texture.device) + if background is None: + image = torch.zeros(B, H, W, C, device=texture.device) + else: + image = background.clone().permute(0, 2, 3, 1) # [B, H, W, C] + for i in range(len(uv)): + texture_map = dr.texture(texture, uv[i], uv_da[i]) # [B, H, W, C+1] + _alpha = texture_map[..., -1] # [B, H, W] + _weight = _alpha * (1 - alpha) # [B, H, W] + image = image + texture_map[..., :-1] * _weight.unsqueeze(-1) # [B, H, W, C] + alpha = alpha + _weight # [B, H, W] + return image.flip(1).permute(0, 3, 1, 2), alpha.flip(1) + + +def warp_image_by_depth( + ctx: RastContext, + depth: torch.FloatTensor, + image: torch.FloatTensor = None, + mask: torch.BoolTensor = None, + width: int = None, + height: int = None, + *, + extrinsics_src: torch.FloatTensor = None, + extrinsics_tgt: torch.FloatTensor = None, + intrinsics_src: torch.FloatTensor = None, + intrinsics_tgt: torch.FloatTensor = None, + near: float = 0.1, + far: float = 100.0, + antialiasing: bool = True, + backslash: bool = False, + padding: int = 0, + return_uv: bool = False, + return_dr: bool = False, +) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.BoolTensor, Optional[torch.FloatTensor], Optional[torch.FloatTensor]]: + """ + Warp image by depth. + NOTE: if batch size is 1, image mesh will be triangulated aware of the depth, yielding less distorted results. + Otherwise, image mesh will be triangulated simply for batch rendering. + + Args: + ctx (Union[dr.RasterizeCudaContext, dr.RasterizeGLContext]): rasterization context + depth (torch.Tensor): (B, H, W) linear depth + image (torch.Tensor): (B, C, H, W). None to use image space uv. Defaults to None. + width (int, optional): width of the output image. None to use the same as depth. Defaults to None. + height (int, optional): height of the output image. Defaults the same as depth.. + extrinsics_src (torch.Tensor, optional): (B, 4, 4) extrinsics matrix for source. None to use identity. Defaults to None. + extrinsics_tgt (torch.Tensor, optional): (B, 4, 4) extrinsics matrix for target. None to use identity. Defaults to None. + intrinsics_src (torch.Tensor, optional): (B, 3, 3) intrinsics matrix for source. None to use the same as target. Defaults to None. + intrinsics_tgt (torch.Tensor, optional): (B, 3, 3) intrinsics matrix for target. None to use the same as source. Defaults to None. + near (float, optional): near plane. Defaults to 0.1. + far (float, optional): far plane. Defaults to 100.0. + antialiasing (bool, optional): whether to perform antialiasing. Defaults to True. + backslash (bool, optional): whether to use backslash triangulation. Defaults to False. + padding (int, optional): padding of the image. Defaults to 0. + return_uv (bool, optional): whether to return the uv. Defaults to False. + return_dr (bool, optional): whether to return the image-space derivatives of uv. Defaults to False. + + Returns: + image: (torch.FloatTensor): (B, C, H, W) rendered image + depth: (torch.FloatTensor): (B, H, W) linear depth, ranging from 0 to inf + mask: (torch.BoolTensor): (B, H, W) mask of valid pixels + uv: (torch.FloatTensor): (B, 2, H, W) image-space uv + dr: (torch.FloatTensor): (B, 4, H, W) image-space derivatives of uv + """ + assert depth.ndim == 3 + batch_size = depth.shape[0] + + if width is None: + width = depth.shape[-1] + if height is None: + height = depth.shape[-2] + if image is not None: + assert image.shape[-2:] == depth.shape[-2:], f'Shape of image {image.shape} does not match shape of depth {depth.shape}' + + if extrinsics_src is None: + extrinsics_src = torch.eye(4).to(depth) + if extrinsics_tgt is None: + extrinsics_tgt = torch.eye(4).to(depth) + if intrinsics_src is None: + intrinsics_src = intrinsics_tgt + if intrinsics_tgt is None: + intrinsics_tgt = intrinsics_src + + assert all(x is not None for x in [extrinsics_src, extrinsics_tgt, intrinsics_src, intrinsics_tgt]), "Make sure you have provided all the necessary camera parameters." + + view_tgt = transforms.extrinsics_to_view(extrinsics_tgt) + perspective_tgt = transforms.intrinsics_to_perspective(intrinsics_tgt, near=near, far=far) + + if padding > 0: + uv, faces = utils.image_mesh(width=width+2, height=height+2) + uv = (uv - 1 / (width + 2)) * ((width + 2) / width) + uv_ = uv.clone().reshape(height+2, width+2, 2) + uv_[0, :, 1] -= padding / height + uv_[-1, :, 1] += padding / height + uv_[:, 0, 0] -= padding / width + uv_[:, -1, 0] += padding / width + uv_ = uv_.reshape(-1, 2) + depth = torch.nn.functional.pad(depth, [1, 1, 1, 1], mode='replicate') + if image is not None: + image = torch.nn.functional.pad(image, [1, 1, 1, 1], mode='replicate') + uv, uv_, faces = uv.to(depth.device), uv_.to(depth.device), faces.to(depth.device) + pts = transforms.unproject_cv( + uv_, + depth.flatten(-2, -1), + extrinsics_src, + intrinsics_src, + ) + else: + uv, faces = utils.image_mesh(width=depth.shape[-1], height=depth.shape[-2]) + if mask is not None: + depth = torch.where(mask, depth, torch.tensor(far, dtype=depth.dtype, device=depth.device)) + uv, faces = uv.to(depth.device), faces.to(depth.device) + pts = transforms.unproject_cv( + uv, + depth.flatten(-2, -1), + extrinsics_src, + intrinsics_src, + ) + + # triangulate + if batch_size == 1: + faces = mesh.triangulate(faces, vertices=pts[0]) + else: + faces = mesh.triangulate(faces, backslash=backslash) + + # rasterize attributes + diff_attrs = None + if image is not None: + attr = image.permute(0, 2, 3, 1).flatten(1, 2) + if return_dr or return_uv: + if return_dr: + diff_attrs = [image.shape[1], image.shape[1]+1] + if return_uv and antialiasing: + antialiasing = list(range(image.shape[1])) + attr = torch.cat([attr, uv.expand(batch_size, -1, -1)], dim=-1) + else: + attr = uv.expand(batch_size, -1, -1) + if antialiasing: + print("\033[93mWarning: you are performing antialiasing on uv. This may cause artifacts.\033[0m") + if return_uv: + return_uv = False + print("\033[93mWarning: image is None, return_uv is ignored.\033[0m") + if return_dr: + diff_attrs = [0, 1] + + if mask is not None: + attr = torch.cat([attr, mask.float().flatten(1, 2).unsqueeze(-1)], dim=-1) + + rast = rasterize_triangle_faces( + ctx, + pts, + faces, + width, + height, + attr=attr, + view=view_tgt, + perspective=perspective_tgt, + antialiasing=antialiasing, + diff_attrs=diff_attrs, + ) + if return_dr: + output_image, screen_depth, output_dr = rast['image'], rast['depth'], rast['image_dr'] + else: + output_image, screen_depth = rast['image'], rast['depth'] + output_mask = screen_depth < 1.0 + + if mask is not None: + output_image, rast_mask = output_image[..., :-1, :, :], output_image[..., -1, :, :] + output_mask &= (rast_mask > 0.9999).reshape(-1, height, width) + + if (return_dr or return_uv) and image is not None: + output_image, output_uv = output_image[..., :-2, :, :], output_image[..., -2:, :, :] + + output_depth = transforms.depth_buffer_to_linear(screen_depth, near=near, far=far) * output_mask + output_image = output_image * output_mask.unsqueeze(1) + + outs = [output_image, output_depth, output_mask] + if return_uv: + outs.append(output_uv) + if return_dr: + outs.append(output_dr) + return tuple(outs) + + +def warp_image_by_forward_flow( + ctx: RastContext, + image: torch.FloatTensor, + flow: torch.FloatTensor, + depth: torch.FloatTensor = None, + *, + antialiasing: bool = True, + backslash: bool = False, +) -> Tuple[torch.FloatTensor, torch.BoolTensor]: + """ + Warp image by forward flow. + NOTE: if batch size is 1, image mesh will be triangulated aware of the depth, yielding less distorted results. + Otherwise, image mesh will be triangulated simply for batch rendering. + + Args: + ctx (Union[dr.RasterizeCudaContext, dr.RasterizeGLContext]): rasterization context + image (torch.Tensor): (B, C, H, W) image + flow (torch.Tensor): (B, 2, H, W) forward flow + depth (torch.Tensor, optional): (B, H, W) linear depth. If None, will use the same for all pixels. Defaults to None. + antialiasing (bool, optional): whether to perform antialiasing. Defaults to True. + backslash (bool, optional): whether to use backslash triangulation. Defaults to False. + + Returns: + image: (torch.FloatTensor): (B, C, H, W) rendered image + mask: (torch.BoolTensor): (B, H, W) mask of valid pixels + """ + assert image.ndim == 4, f'Wrong shape of image: {image.shape}' + batch_size, _, height, width = image.shape + + if depth is None: + depth = torch.ones_like(flow[:, 0]) + + extrinsics = torch.eye(4).to(image) + fov = torch.deg2rad(torch.tensor([45.0], device=image.device)) + intrinsics = transforms.intrinsics_from_fov(fov, width, height, normalize=True)[0] + + view = transforms.extrinsics_to_view(extrinsics) + perspective = transforms.intrinsics_to_perspective(intrinsics, near=0.1, far=100) + + uv, faces = utils.image_mesh(width=width, height=height) + uv, faces = uv.to(image.device), faces.to(image.device) + uv = uv + flow.permute(0, 2, 3, 1).flatten(1, 2) + pts = transforms.unproject_cv( + uv, + depth.flatten(-2, -1), + extrinsics, + intrinsics, + ) + + # triangulate + if batch_size == 1: + faces = mesh.triangulate(faces, vertices=pts[0]) + else: + faces = mesh.triangulate(faces, backslash=backslash) + + # rasterize attributes + attr = image.permute(0, 2, 3, 1).flatten(1, 2) + rast = rasterize_triangle_faces( + ctx, + pts, + faces, + width, + height, + attr=attr, + view=view, + perspective=perspective, + antialiasing=antialiasing, + ) + output_image, screen_depth = rast['image'], rast['depth'] + output_mask = screen_depth < 1.0 + output_image = output_image * output_mask.unsqueeze(1) + + outs = [output_image, output_mask] + return tuple(outs) diff --git a/utils3d/utils3d/torch/transforms.py b/utils3d/utils3d/torch/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..b61b7725adcf8bc772367519b787d035a1284336 --- /dev/null +++ b/utils3d/utils3d/torch/transforms.py @@ -0,0 +1,1208 @@ +from typing import * +from numbers import Number + +import torch +import torch.nn.functional as F + +from ._helpers import batched + + +__all__ = [ + 'perspective', + 'perspective_from_fov', + 'perspective_from_fov_xy', + 'intrinsics_from_focal_center', + 'intrinsics_from_fov', + 'intrinsics_from_fov_xy', + 'focal_to_fov', + 'fov_to_focal', + 'intrinsics_to_fov', + 'view_look_at', + 'extrinsics_look_at', + 'perspective_to_intrinsics', + 'intrinsics_to_perspective', + 'extrinsics_to_view', + 'view_to_extrinsics', + 'normalize_intrinsics', + 'crop_intrinsics', + 'pixel_to_uv', + 'pixel_to_ndc', + 'uv_to_pixel', + 'project_depth', + 'depth_buffer_to_linear', + 'project_gl', + 'project_cv', + 'unproject_gl', + 'unproject_cv', + 'skew_symmetric', + 'rotation_matrix_from_vectors', + 'euler_axis_angle_rotation', + 'euler_angles_to_matrix', + 'matrix_to_euler_angles', + 'matrix_to_quaternion', + 'quaternion_to_matrix', + 'matrix_to_axis_angle', + 'axis_angle_to_matrix', + 'axis_angle_to_quaternion', + 'quaternion_to_axis_angle', + 'slerp', + 'interpolate_extrinsics', + 'interpolate_view', + 'extrinsics_to_essential', + 'to4x4', + 'rotation_matrix_2d', + 'rotate_2d', + 'translate_2d', + 'scale_2d', + 'apply_2d', +] + + +@batched(0,0,0,0) +def perspective( + fov_y: Union[float, torch.Tensor], + aspect: Union[float, torch.Tensor], + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor] + ) -> torch.Tensor: + """ + Get OpenGL perspective matrix + + Args: + fov_y (float | torch.Tensor): field of view in y axis + aspect (float | torch.Tensor): aspect ratio + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + + Returns: + (torch.Tensor): [..., 4, 4] perspective matrix + """ + N = fov_y.shape[0] + ret = torch.zeros((N, 4, 4), dtype=fov_y.dtype, device=fov_y.device) + ret[:, 0, 0] = 1. / (torch.tan(fov_y / 2) * aspect) + ret[:, 1, 1] = 1. / (torch.tan(fov_y / 2)) + ret[:, 2, 2] = (near + far) / (near - far) + ret[:, 2, 3] = 2. * near * far / (near - far) + ret[:, 3, 2] = -1. + return ret + + +def perspective_from_fov( + fov: Union[float, torch.Tensor], + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor], + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor] + ) -> torch.Tensor: + """ + Get OpenGL perspective matrix from field of view in largest dimension + + Args: + fov (float | torch.Tensor): field of view in largest dimension + width (int | torch.Tensor): image width + height (int | torch.Tensor): image height + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + + Returns: + (torch.Tensor): [..., 4, 4] perspective matrix + """ + fov_y = 2 * torch.atan(torch.tan(fov / 2) * height / torch.maximum(width, height)) + aspect = width / height + return perspective(fov_y, aspect, near, far) + + +def perspective_from_fov_xy( + fov_x: Union[float, torch.Tensor], + fov_y: Union[float, torch.Tensor], + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor] + ) -> torch.Tensor: + """ + Get OpenGL perspective matrix from field of view in x and y axis + + Args: + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + near (float | torch.Tensor): near plane to clip + far (float | torch.Tensor): far plane to clip + + Returns: + (torch.Tensor): [..., 4, 4] perspective matrix + """ + aspect = torch.tan(fov_x / 2) / torch.tan(fov_y / 2) + return perspective(fov_y, aspect, near, far) + + +@batched(0,0,0,0) +def intrinsics_from_focal_center( + fx: Union[float, torch.Tensor], + fy: Union[float, torch.Tensor], + cx: Union[float, torch.Tensor], + cy: Union[float, torch.Tensor] +) -> torch.Tensor: + """ + Get OpenCV intrinsics matrix + + Args: + focal_x (float | torch.Tensor): focal length in x axis + focal_y (float | torch.Tensor): focal length in y axis + cx (float | torch.Tensor): principal point in x axis + cy (float | torch.Tensor): principal point in y axis + + Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix + """ + N = fx.shape[0] + ret = torch.zeros((N, 3, 3), dtype=fx.dtype, device=fx.device) + zeros, ones = torch.zeros(N, dtype=fx.dtype, device=fx.device), torch.ones(N, dtype=fx.dtype, device=fx.device) + ret = torch.stack([fx, zeros, cx, zeros, fy, cy, zeros, zeros, ones], dim=-1).unflatten(-1, (3, 3)) + return ret + + +@batched(0, 0, 0, 0, 0, 0) +def intrinsics_from_fov( + fov_max: Union[float, torch.Tensor] = None, + fov_min: Union[float, torch.Tensor] = None, + fov_x: Union[float, torch.Tensor] = None, + fov_y: Union[float, torch.Tensor] = None, + width: Union[int, torch.Tensor] = None, + height: Union[int, torch.Tensor] = None, +) -> torch.Tensor: + """ + Get normalized OpenCV intrinsics matrix from given field of view. + You can provide either fov_max, fov_min, fov_x or fov_y + + Args: + width (int | torch.Tensor): image width + height (int | torch.Tensor): image height + fov_max (float | torch.Tensor): field of view in largest dimension + fov_min (float | torch.Tensor): field of view in smallest dimension + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + + Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix + """ + if fov_max is not None: + fx = torch.maximum(width, height) / width / (2 * torch.tan(fov_max / 2)) + fy = torch.maximum(width, height) / height / (2 * torch.tan(fov_max / 2)) + elif fov_min is not None: + fx = torch.minimum(width, height) / width / (2 * torch.tan(fov_min / 2)) + fy = torch.minimum(width, height) / height / (2 * torch.tan(fov_min / 2)) + elif fov_x is not None and fov_y is not None: + fx = 1 / (2 * torch.tan(fov_x / 2)) + fy = 1 / (2 * torch.tan(fov_y / 2)) + elif fov_x is not None: + fx = 1 / (2 * torch.tan(fov_x / 2)) + fy = fx * width / height + elif fov_y is not None: + fy = 1 / (2 * torch.tan(fov_y / 2)) + fx = fy * height / width + cx = 0.5 + cy = 0.5 + ret = intrinsics_from_focal_center(fx, fy, cx, cy) + return ret + + + +def intrinsics_from_fov_xy( + fov_x: Union[float, torch.Tensor], + fov_y: Union[float, torch.Tensor] +) -> torch.Tensor: + """ + Get OpenCV intrinsics matrix from field of view in x and y axis + + Args: + fov_x (float | torch.Tensor): field of view in x axis + fov_y (float | torch.Tensor): field of view in y axis + + Returns: + (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix + """ + focal_x = 0.5 / torch.tan(fov_x / 2) + focal_y = 0.5 / torch.tan(fov_y / 2) + cx = cy = 0.5 + return intrinsics_from_focal_center(focal_x, focal_y, cx, cy) + + +def focal_to_fov(focal: torch.Tensor): + return 2 * torch.atan(0.5 / focal) + + +def fov_to_focal(fov: torch.Tensor): + return 0.5 / torch.tan(fov / 2) + + +def intrinsics_to_fov(intrinsics: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + "NOTE: approximate FOV by assuming centered principal point" + fov_x = focal_to_fov(intrinsics[..., 0, 0]) + fov_y = focal_to_fov(intrinsics[..., 1, 1]) + return fov_x, fov_y + + +@batched(1,1,1) +def view_look_at( + eye: torch.Tensor, + look_at: torch.Tensor, + up: torch.Tensor +) -> torch.Tensor: + """ + Get OpenGL view matrix looking at something + + Args: + eye (torch.Tensor): [..., 3] the eye position + look_at (torch.Tensor): [..., 3] the position to look at + up (torch.Tensor): [..., 3] head up direction (y axis in screen space). Not necessarily othogonal to view direction + + Returns: + (torch.Tensor): [..., 4, 4], view matrix + """ + N = eye.shape[0] + z = eye - look_at + x = torch.cross(up, z, dim=-1) + y = torch.cross(z, x, dim=-1) + # x = torch.cross(y, z, dim=-1) + x = x / x.norm(dim=-1, keepdim=True) + y = y / y.norm(dim=-1, keepdim=True) + z = z / z.norm(dim=-1, keepdim=True) + R = torch.stack([x, y, z], dim=-2) + t = -torch.matmul(R, eye[..., None]) + ret = torch.zeros((N, 4, 4), dtype=eye.dtype, device=eye.device) + ret[:, :3, :3] = R + ret[:, :3, 3] = t[:, :, 0] + ret[:, 3, 3] = 1. + return ret + + +@batched(1, 1, 1) +def extrinsics_look_at( + eye: torch.Tensor, + look_at: torch.Tensor, + up: torch.Tensor +) -> torch.Tensor: + """ + Get OpenCV extrinsics matrix looking at something + + Args: + eye (torch.Tensor): [..., 3] the eye position + look_at (torch.Tensor): [..., 3] the position to look at + up (torch.Tensor): [..., 3] head up direction (-y axis in screen space). Not necessarily othogonal to view direction + + Returns: + (torch.Tensor): [..., 4, 4], extrinsics matrix + """ + N = eye.shape[0] + z = look_at - eye + x = torch.cross(-up, z, dim=-1) + y = torch.cross(z, x, dim=-1) + # x = torch.cross(y, z, dim=-1) + x = x / x.norm(dim=-1, keepdim=True) + y = y / y.norm(dim=-1, keepdim=True) + z = z / z.norm(dim=-1, keepdim=True) + R = torch.stack([x, y, z], dim=-2) + t = -torch.matmul(R, eye[..., None]) + ret = torch.zeros((N, 4, 4), dtype=eye.dtype, device=eye.device) + ret[:, :3, :3] = R + ret[:, :3, 3] = t[:, :, 0] + ret[:, 3, 3] = 1. + return ret + + +@batched(2) +def perspective_to_intrinsics( + perspective: torch.Tensor +) -> torch.Tensor: + """ + OpenGL perspective matrix to OpenCV intrinsics + + Args: + perspective (torch.Tensor): [..., 4, 4] OpenGL perspective matrix + + Returns: + (torch.Tensor): shape [..., 3, 3] OpenCV intrinsics + """ + assert torch.allclose(perspective[:, [0, 1, 3], 3], 0), "The perspective matrix is not a projection matrix" + ret = torch.tensor([[0.5, 0., 0.5], [0., -0.5, 0.5], [0., 0., 1.]], dtype=perspective.dtype, device=perspective.device) \ + @ perspective[:, [0, 1, 3], :3] \ + @ torch.diag(torch.tensor([1, -1, -1], dtype=perspective.dtype, device=perspective.device)) + return ret / ret[:, 2, 2, None, None] + + +@batched(2,0,0) +def intrinsics_to_perspective( + intrinsics: torch.Tensor, + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor], +) -> torch.Tensor: + """ + OpenCV intrinsics to OpenGL perspective matrix + + Args: + intrinsics (torch.Tensor): [..., 3, 3] OpenCV intrinsics matrix + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip + Returns: + (torch.Tensor): [..., 4, 4] OpenGL perspective matrix + """ + N = intrinsics.shape[0] + fx, fy = intrinsics[:, 0, 0], intrinsics[:, 1, 1] + cx, cy = intrinsics[:, 0, 2], intrinsics[:, 1, 2] + ret = torch.zeros((N, 4, 4), dtype=intrinsics.dtype, device=intrinsics.device) + ret[:, 0, 0] = 2 * fx + ret[:, 1, 1] = 2 * fy + ret[:, 0, 2] = -2 * cx + 1 + ret[:, 1, 2] = 2 * cy - 1 + ret[:, 2, 2] = (near + far) / (near - far) + ret[:, 2, 3] = 2. * near * far / (near - far) + ret[:, 3, 2] = -1. + return ret + + +@batched(2) +def extrinsics_to_view( + extrinsics: torch.Tensor + ) -> torch.Tensor: + """ + OpenCV camera extrinsics to OpenGL view matrix + + Args: + extrinsics (torch.Tensor): [..., 4, 4] OpenCV camera extrinsics matrix + + Returns: + (torch.Tensor): [..., 4, 4] OpenGL view matrix + """ + return extrinsics * torch.tensor([1, -1, -1, 1], dtype=extrinsics.dtype, device=extrinsics.device)[:, None] + + +@batched(2) +def view_to_extrinsics( + view: torch.Tensor + ) -> torch.Tensor: + """ + OpenGL view matrix to OpenCV camera extrinsics + + Args: + view (torch.Tensor): [..., 4, 4] OpenGL view matrix + + Returns: + (torch.Tensor): [..., 4, 4] OpenCV camera extrinsics matrix + """ + return view * torch.tensor([1, -1, -1, 1], dtype=view.dtype, device=view.device)[:, None] + + +@batched(2,0,0) +def normalize_intrinsics( + intrinsics: torch.Tensor, + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor] + ) -> torch.Tensor: + """ + Normalize camera intrinsics(s) to uv space + + Args: + intrinsics (torch.Tensor): [..., 3, 3] camera intrinsics(s) to normalize + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + + Returns: + (torch.Tensor): [..., 3, 3] normalized camera intrinsics(s) + """ + zeros = torch.zeros_like(width) + ones = torch.ones_like(width) + transform = torch.stack([ + 1 / width, zeros, 0.5 / width, + zeros, 1 / height, 0.5 / height, + zeros, zeros, ones + ]).reshape(*zeros.shape, 3, 3).to(intrinsics) + return transform @ intrinsics + + + +@batched(2,0,0,0,0,0,0) +def crop_intrinsics( + intrinsics: torch.Tensor, + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor], + left: Union[int, torch.Tensor], + top: Union[int, torch.Tensor], + crop_width: Union[int, torch.Tensor], + crop_height: Union[int, torch.Tensor] +) -> torch.Tensor: + """ + Evaluate the new intrinsics(s) after crop the image: cropped_img = img[top:top+crop_height, left:left+crop_width] + + Args: + intrinsics (torch.Tensor): [..., 3, 3] camera intrinsics(s) to crop + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + left (int | torch.Tensor): [...] left crop boundary + top (int | torch.Tensor): [...] top crop boundary + crop_width (int | torch.Tensor): [...] crop width + crop_height (int | torch.Tensor): [...] crop height + + Returns: + (torch.Tensor): [..., 3, 3] cropped camera intrinsics(s) + """ + zeros = torch.zeros_like(width) + ones = torch.ones_like(width) + transform = torch.stack([ + width / crop_width, zeros, -left / crop_width, + zeros, height / crop_height, -top / crop_height, + zeros, zeros, ones + ]).reshape(*zeros.shape, 3, 3).to(intrinsics) + return transform @ intrinsics + + +@batched(1,0,0) +def pixel_to_uv( + pixel: torch.Tensor, + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor] +) -> torch.Tensor: + """ + Args: + pixel (torch.Tensor): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + + Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + """ + if not torch.is_floating_point(pixel): + pixel = pixel.float() + uv = (pixel + 0.5) / torch.stack([width, height], dim=-1).to(pixel) + return uv + + +@batched(1,0,0) +def uv_to_pixel( + uv: torch.Tensor, + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor] +) -> torch.Tensor: + """ + Args: + uv (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + + Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in uv space, the range is (0, 1) + """ + pixel = uv * torch.stack([width, height], dim=-1).to(uv) - 0.5 + return pixel + + +@batched(1,0,0) +def pixel_to_ndc( + pixel: torch.Tensor, + width: Union[int, torch.Tensor], + height: Union[int, torch.Tensor] +) -> torch.Tensor: + """ + Args: + pixel (torch.Tensor): [..., 2] pixel coordinrates defined in image space, x range is (0, W - 1), y range is (0, H - 1) + width (int | torch.Tensor): [...] image width(s) + height (int | torch.Tensor): [...] image height(s) + + Returns: + (torch.Tensor): [..., 2] pixel coordinrates defined in ndc space, the range is (-1, 1) + """ + if not torch.is_floating_point(pixel): + pixel = pixel.float() + ndc = (pixel + 0.5) / (torch.stack([width, height], dim=-1).to(pixel) * torch.tensor([2, -2], dtype=pixel.dtype, device=pixel.device)) \ + + torch.tensor([-1, 1], dtype=pixel.dtype, device=pixel.device) + return ndc + + +@batched(0,0,0) +def project_depth( + depth: torch.Tensor, + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor] + ) -> torch.Tensor: + """ + Project linear depth to depth value in screen space + + Args: + depth (torch.Tensor): [...] depth value + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip + + Returns: + (torch.Tensor): [..., 1] depth value in screen space, value ranging in [0, 1] + """ + return (far - near * far / depth) / (far - near) + + +@batched(0,0,0) +def depth_buffer_to_linear( + depth: torch.Tensor, + near: Union[float, torch.Tensor], + far: Union[float, torch.Tensor] + ) -> torch.Tensor: + """ + Linearize depth value to linear depth + + Args: + depth (torch.Tensor): [...] screen depth value, ranging in [0, 1] + near (float | torch.Tensor): [...] near plane to clip + far (float | torch.Tensor): [...] far plane to clip + + Returns: + (torch.Tensor): [...] linear depth + """ + return near * far / (far - (far - near) * depth) + + +@batched(2, 2, 2, 2) +def project_gl( + points: torch.Tensor, + model: torch.Tensor = None, + view: torch.Tensor = None, + perspective: torch.Tensor = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Project 3D points to 2D following the OpenGL convention (except for row major matrice) + + Args: + points (torch.Tensor): [..., N, 3 or 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + model (torch.Tensor): [..., 4, 4] model matrix + view (torch.Tensor): [..., 4, 4] view matrix + perspective (torch.Tensor): [..., 4, 4] perspective matrix + + Returns: + scr_coord (torch.Tensor): [..., N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + linear_depth (torch.Tensor): [..., N] linear depth + """ + assert perspective is not None, "perspective matrix is required" + + if points.shape[-1] == 3: + points = torch.cat([points, torch.ones_like(points[..., :1])], dim=-1) + mvp = perspective if perspective is not None else torch.eye(4).to(points) + if view is not None: + mvp = mvp @ view + if model is not None: + mvp = mvp @ model + clip_coord = points @ mvp.transpose(-1, -2) + ndc_coord = clip_coord[..., :3] / clip_coord[..., 3:] + scr_coord = ndc_coord * 0.5 + 0.5 + linear_depth = clip_coord[..., 3] + return scr_coord, linear_depth + + +@batched(2, 2, 2) +def project_cv( + points: torch.Tensor, + extrinsics: torch.Tensor = None, + intrinsics: torch.Tensor = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Project 3D points to 2D following the OpenCV convention + + Args: + points (torch.Tensor): [..., N, 3] or [..., N, 4] 3D points to project, if the last + dimension is 4, the points are assumed to be in homogeneous coordinates + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + intrinsics (torch.Tensor): [..., 3, 3] intrinsics matrix + + Returns: + uv_coord (torch.Tensor): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + linear_depth (torch.Tensor): [..., N] linear depth + """ + assert intrinsics is not None, "intrinsics matrix is required" + if points.shape[-1] == 3: + points = torch.cat([points, torch.ones_like(points[..., :1])], dim=-1) + if extrinsics is not None: + points = points @ extrinsics.transpose(-1, -2) + points = points[..., :3] @ intrinsics.transpose(-2, -1) + uv_coord = points[..., :2] / points[..., 2:] + linear_depth = points[..., 2] + return uv_coord, linear_depth + + +@batched(2, 2, 2, 2) +def unproject_gl( + screen_coord: torch.Tensor, + model: torch.Tensor = None, + view: torch.Tensor = None, + perspective: torch.Tensor = None + ) -> torch.Tensor: + """ + Unproject screen space coordinates to 3D view space following the OpenGL convention (except for row major matrice) + + Args: + screen_coord (torch.Tensor): [... N, 3] screen space coordinates, value ranging in [0, 1]. + The origin (0., 0., 0.) is corresponding to the left & bottom & nearest + model (torch.Tensor): [..., 4, 4] model matrix + view (torch.Tensor): [..., 4, 4] view matrix + perspective (torch.Tensor): [..., 4, 4] perspective matrix + + Returns: + points (torch.Tensor): [..., N, 3] 3d points + """ + assert perspective is not None, "perspective matrix is required" + ndc_xy = screen_coord * 2 - 1 + clip_coord = torch.cat([ndc_xy, torch.ones_like(ndc_xy[..., :1])], dim=-1) + transform = perspective + if view is not None: + transform = transform @ view + if model is not None: + transform = transform @ model + transform = torch.inverse(transform) + points = clip_coord @ transform.transpose(-1, -2) + points = points[..., :3] / points[..., 3:] + return points + + +@batched(2, 1, 2, 2) +def unproject_cv( + uv_coord: torch.Tensor, + depth: torch.Tensor = None, + extrinsics: torch.Tensor = None, + intrinsics: torch.Tensor = None +) -> torch.Tensor: + """ + Unproject uv coordinates to 3D view space following the OpenCV convention + + Args: + uv_coord (torch.Tensor): [..., N, 2] uv coordinates, value ranging in [0, 1]. + The origin (0., 0.) is corresponding to the left & top + depth (torch.Tensor): [..., N] depth value + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + intrinsics (torch.Tensor): [..., 3, 3] intrinsics matrix + + Returns: + points (torch.Tensor): [..., N, 3] 3d points + """ + assert intrinsics is not None, "intrinsics matrix is required" + points = torch.cat([uv_coord, torch.ones_like(uv_coord[..., :1])], dim=-1) + points = points @ torch.inverse(intrinsics).transpose(-2, -1) + if depth is not None: + points = points * depth[..., None] + if extrinsics is not None: + points = torch.cat([points, torch.ones_like(points[..., :1])], dim=-1) + points = (points @ torch.inverse(extrinsics).transpose(-2, -1))[..., :3] + return points + + +def euler_axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError("letter must be either X, Y or Z.") + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str = 'XYZ') -> torch.Tensor: + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3), XYZ + convention: permutation of "X", "Y" or "Z", representing the order of Euler rotations to apply. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = [ + euler_axis_angle_rotation(c, euler_angles[..., 'XYZ'.index(c)]) + for c in convention + ] + # return functools.reduce(torch.matmul, matrices) + return matrices[2] @ matrices[1] @ matrices[0] + + +def skew_symmetric(v: torch.Tensor): + "Skew symmetric matrix from a 3D vector" + assert v.shape[-1] == 3, "v must be 3D" + x, y, z = v.unbind(dim=-1) + zeros = torch.zeros_like(x) + return torch.stack([ + zeros, -z, y, + z, zeros, -x, + -y, x, zeros, + ], dim=-1).reshape(*v.shape[:-1], 3, 3) + + +def rotation_matrix_from_vectors(v1: torch.Tensor, v2: torch.Tensor): + "Rotation matrix that rotates v1 to v2" + I = torch.eye(3).to(v1) + v1 = F.normalize(v1, dim=-1) + v2 = F.normalize(v2, dim=-1) + v = torch.cross(v1, v2, dim=-1) + c = torch.sum(v1 * v2, dim=-1) + K = skew_symmetric(v) + R = I + K + (1 / (1 + c))[None, None] * (K @ K) + return R + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +) -> torch.Tensor: + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to Euler angles in radians. + NOTE: The composition order eg. `XYZ` means `Rz * Ry * Rx` (like blender), instead of `Rx * Ry * Rz` (like pytorch3d) + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + + Returns: + Euler angles in radians as tensor of shape (..., 3), in the order of XYZ (like blender), instead of convention (like pytorch3d) + """ + if not all(c in 'XYZ' for c in convention) or not all(c in convention for c in 'XYZ'): + raise ValueError(f"Invalid convention {convention}.") + if not matrix.shape[-2:] == (3, 3): + raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") + + i0 = 'XYZ'.index(convention[0]) + i2 = 'XYZ'.index(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin(matrix[..., i2, i0] * (-1.0 if i2 - i0 in [-1, 2] else 1.0)) + else: + central_angle = torch.acos(matrix[..., i2, i2]) + + # Angles in composition order + o = [ + _angle_from_tan( + convention[0], convention[1], matrix[..., i2, :], True, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0], False, tait_bryan + ), + ] + return torch.stack([o[convention.index(c)] for c in 'XYZ'], -1) + + +def axis_angle_to_matrix(axis_angle: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Convert axis-angle representation (rotation vector) to rotation matrix, whose direction is the axis of rotation and length is the angle of rotation + + Args: + axis_angle (torch.Tensor): shape (..., 3), axis-angle vcetors + + Returns: + torch.Tensor: shape (..., 3, 3) The rotation matrices for the given axis-angle parameters + """ + batch_shape = axis_angle.shape[:-1] + device, dtype = axis_angle.device, axis_angle.dtype + + angle = torch.norm(axis_angle + eps, dim=-1, keepdim=True) + axis = axis_angle / angle + + cos = torch.cos(angle)[..., None, :] + sin = torch.sin(angle)[..., None, :] + + rx, ry, rz = torch.split(axis, 3, dim=-1) + zeros = torch.zeros((*batch_shape, 1), dtype=dtype, device=device) + K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=-1).view((*batch_shape, 3, 3)) + + ident = torch.eye(3, dtype=dtype, device=device) + rot_mat = ident + sin * K + (1 - cos) * torch.matmul(K, K) + return rot_mat + + +def matrix_to_axis_angle(rot_mat: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Convert a batch of 3x3 rotation matrices to axis-angle representation (rotation vector) + + Args: + rot_mat (torch.Tensor): shape (..., 3, 3), the rotation matrices to convert + + Returns: + torch.Tensor: shape (..., 3), the axis-angle vectors corresponding to the given rotation matrices + """ + quat = matrix_to_quaternion(rot_mat) + axis_angle = quaternion_to_axis_angle(quat, eps=eps) + return axis_angle + + +def quaternion_to_axis_angle(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Convert a batch of quaternions (w, x, y, z) to axis-angle representation (rotation vector) + + Args: + quaternion (torch.Tensor): shape (..., 4), the quaternions to convert + + Returns: + torch.Tensor: shape (..., 3), the axis-angle vectors corresponding to the given quaternions + """ + assert quaternion.shape[-1] == 4 + norm = torch.norm(quaternion[..., 1:], dim=-1, keepdim=True) + axis = quaternion[..., 1:] / norm.clamp(min=eps) + angle = 2 * torch.atan2(norm, quaternion[..., 0:1]) + return angle * axis + + +def axis_angle_to_quaternion(axis_angle: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Convert axis-angle representation (rotation vector) to quaternion (w, x, y, z) + + Args: + axis_angle (torch.Tensor): shape (..., 3), axis-angle vcetors + + Returns: + torch.Tensor: shape (..., 4) The quaternions for the given axis-angle parameters + """ + axis = F.normalize(axis_angle, dim=-1, eps=eps) + angle = torch.norm(axis_angle, dim=-1, keepdim=True) + quat = torch.cat([torch.cos(angle / 2), torch.sin(angle / 2) * axis], dim=-1) + return quat + + +def matrix_to_quaternion(rot_mat: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Convert 3x3 rotation matrix to quaternion (w, x, y, z) + + Args: + rot_mat (torch.Tensor): shape (..., 3, 3), the rotation matrices to convert + + Returns: + torch.Tensor: shape (..., 4), the quaternions corresponding to the given rotation matrices + """ + # Extract the diagonal and off-diagonal elements of the rotation matrix + m00, m01, m02, m10, m11, m12, m20, m21, m22 = rot_mat.flatten(-2).unbind(dim=-1) + + diag = torch.diagonal(rot_mat, dim1=-2, dim2=-1) + M = torch.tensor([ + [1, 1, 1], + [1, -1, -1], + [-1, 1, -1], + [-1, -1, 1] + ], dtype=rot_mat.dtype, device=rot_mat.device) + wxyz = (1 + diag @ M.transpose(-1, -2)).clamp_(0).sqrt().mul(0.5) + _, max_idx = wxyz.max(dim=-1) + xw = torch.sign(m21 - m12) + yw = torch.sign(m02 - m20) + zw = torch.sign(m10 - m01) + yz = torch.sign(m21 + m12) + xz = torch.sign(m02 + m20) + xy = torch.sign(m01 + m10) + ones = torch.ones_like(xw) + sign = torch.where( + max_idx[..., None] == 0, + torch.stack([ones, xw, yw, zw], dim=-1), + torch.where( + max_idx[..., None] == 1, + torch.stack([xw, ones, xy, xz], dim=-1), + torch.where( + max_idx[..., None] == 2, + torch.stack([yw, xy, ones, yz], dim=-1), + torch.stack([zw, xz, yz, ones], dim=-1) + ) + ) + ) + quat = sign * wxyz + quat = F.normalize(quat, dim=-1, eps=eps) + return quat + + +def quaternion_to_matrix(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: + """Converts a batch of quaternions (w, x, y, z) to rotation matrices + + Args: + quaternion (torch.Tensor): shape (..., 4), the quaternions to convert + + Returns: + torch.Tensor: shape (..., 3, 3), the rotation matrices corresponding to the given quaternions + """ + assert quaternion.shape[-1] == 4 + quaternion = F.normalize(quaternion, dim=-1, eps=eps) + w, x, y, z = quaternion.unbind(dim=-1) + zeros = torch.zeros_like(w) + I = torch.eye(3, dtype=quaternion.dtype, device=quaternion.device) + xyz = quaternion[..., 1:] + A = xyz[..., :, None] * xyz[..., None, :] - I * (xyz ** 2).sum(dim=-1)[..., None, None] + B = torch.stack([ + zeros, -z, y, + z, zeros, -x, + -y, x, zeros + ], dim=-1).unflatten(-1, (3, 3)) + rot_mat = I + 2 * (A + w[..., None, None] * B) + return rot_mat + + +def slerp(rot_mat_1: torch.Tensor, rot_mat_2: torch.Tensor, t: Union[Number, torch.Tensor]) -> torch.Tensor: + """Spherical linear interpolation between two rotation matrices + + Args: + rot_mat_1 (torch.Tensor): shape (..., 3, 3), the first rotation matrix + rot_mat_2 (torch.Tensor): shape (..., 3, 3), the second rotation matrix + t (torch.Tensor): scalar or shape (...,), the interpolation factor + + Returns: + torch.Tensor: shape (..., 3, 3), the interpolated rotation matrix + """ + assert rot_mat_1.shape[-2:] == (3, 3) + rot_vec_1 = matrix_to_axis_angle(rot_mat_1) + rot_vec_2 = matrix_to_axis_angle(rot_mat_2) + if isinstance(t, Number): + t = torch.tensor(t, dtype=rot_mat_1.dtype, device=rot_mat_1.device) + rot_vec = (1 - t[..., None]) * rot_vec_1 + t[..., None] * rot_vec_2 + rot_mat = axis_angle_to_matrix(rot_vec) + return rot_mat + + +def interpolate_extrinsics(ext1: torch.Tensor, ext2: torch.Tensor, t: Union[Number, torch.Tensor]) -> torch.Tensor: + """Interpolate extrinsics between two camera poses. Linear interpolation for translation, spherical linear interpolation for rotation. + + Args: + ext1 (torch.Tensor): shape (..., 4, 4), the first camera pose + ext2 (torch.Tensor): shape (..., 4, 4), the second camera pose + t (torch.Tensor): scalar or shape (...,), the interpolation factor + + Returns: + torch.Tensor: shape (..., 4, 4), the interpolated camera pose + """ + return torch.inverse(interpolate_transform(torch.inverse(ext1), torch.inverse(ext2), t)) + + +def interpolate_view(view1: torch.Tensor, view2: torch.Tensor, t: Union[Number, torch.Tensor]): + """Interpolate view matrices between two camera poses. Linear interpolation for translation, spherical linear interpolation for rotation. + + Args: + ext1 (torch.Tensor): shape (..., 4, 4), the first camera pose + ext2 (torch.Tensor): shape (..., 4, 4), the second camera pose + t (torch.Tensor): scalar or shape (...,), the interpolation factor + + Returns: + torch.Tensor: shape (..., 4, 4), the interpolated camera pose + """ + return interpolate_extrinsics(view1, view2, t) + + +def interpolate_transform(transform1: torch.Tensor, transform2: torch.Tensor, t: Union[Number, torch.Tensor]): + assert transform1.shape[-2:] == (4, 4) and transform2.shape[-2:] == (4, 4) + if isinstance(t, Number): + t = torch.tensor(t, dtype=transform1.dtype, device=transform1.device) + pos = (1 - t[..., None]) * transform1[..., :3, 3] + t[..., None] * transform2[..., :3, 3] + rot = slerp(transform1[..., :3, :3], transform2[..., :3, :3], t) + transform = torch.cat([rot, pos[..., None]], dim=-1) + transform = torch.cat([ext, torch.tensor([0, 0, 0, 1], dtype=transform.dtype, device=transform.device).expand_as(transform[..., :1, :])], dim=-2) + return transform + + +def extrinsics_to_essential(extrinsics: torch.Tensor): + """ + extrinsics matrix `[[R, t] [0, 0, 0, 1]]` such that `x' = R (x - t)` to essential matrix such that `x' E x = 0` + + Args: + extrinsics (torch.Tensor): [..., 4, 4] extrinsics matrix + + Returns: + (torch.Tensor): [..., 3, 3] essential matrix + """ + assert extrinsics.shape[-2:] == (4, 4) + R = extrinsics[..., :3, :3] + t = extrinsics[..., :3, 3] + zeros = torch.zeros_like(t) + t_x = torch.stack([ + zeros, -t[..., 2], t[..., 1], + t[..., 2], zeros, -t[..., 0], + -t[..., 1], t[..., 0], zeros + ]).reshape(*t.shape[:-1], 3, 3) + return R @ t_x + + +def to4x4(R: torch.Tensor, t: torch.Tensor): + """ + Compose rotation matrix and translation vector to 4x4 transformation matrix + + Args: + R (torch.Tensor): [..., 3, 3] rotation matrix + t (torch.Tensor): [..., 3] translation vector + + Returns: + (torch.Tensor): [..., 4, 4] transformation matrix + """ + assert R.shape[-2:] == (3, 3) + assert t.shape[-1] == 3 + assert R.shape[:-2] == t.shape[:-1] + return torch.cat([ + torch.cat([R, t[..., None]], dim=-1), + torch.tensor([0, 0, 0, 1], dtype=R.dtype, device=R.device).expand(*R.shape[:-2], 1, 4) + ], dim=-2) + + +def rotation_matrix_2d(theta: Union[float, torch.Tensor]): + """ + 2x2 matrix for 2D rotation + + Args: + theta (float | torch.Tensor): rotation angle in radians, arbitrary shape (...,) + + Returns: + (torch.Tensor): (..., 2, 2) rotation matrix + """ + if isinstance(theta, float): + theta = torch.tensor(theta) + return torch.stack([ + torch.cos(theta), -torch.sin(theta), + torch.sin(theta), torch.cos(theta), + ], dim=-1).unflatten(-1, (2, 2)) + + +def rotate_2d(theta: Union[float, torch.Tensor], center: torch.Tensor = None): + """ + 3x3 matrix for 2D rotation around a center + ``` + [[Rxx, Rxy, tx], + [Ryx, Ryy, ty], + [0, 0, 1]] + ``` + Args: + theta (float | torch.Tensor): rotation angle in radians, arbitrary shape (...,) + center (torch.Tensor): rotation center, arbitrary shape (..., 2). Default to (0, 0) + + Returns: + (torch.Tensor): (..., 3, 3) transformation matrix + """ + if isinstance(theta, float): + theta = torch.tensor(theta) + if center is not None: + theta = theta.to(center) + if center is None: + center = torch.zeros(2).to(theta).expand(*theta.shape, -1) + R = rotation_matrix_2d(theta) + return torch.cat([ + torch.cat([ + R, + center[..., :, None] - R @ center[..., :, None], + ], dim=-1), + torch.tensor([[0, 0, 1]], dtype=center.dtype, device=center.device).expand(*center.shape[:-1], -1, -1), + ], dim=-2) + + +def translate_2d(translation: torch.Tensor): + """ + Translation matrix for 2D translation + ``` + [[1, 0, tx], + [0, 1, ty], + [0, 0, 1]] + ``` + Args: + translation (torch.Tensor): translation vector, arbitrary shape (..., 2) + + Returns: + (torch.Tensor): (..., 3, 3) transformation matrix + """ + return torch.cat([ + torch.cat([ + torch.eye(2, dtype=translation.dtype, device=translation.device).expand(*translation.shape[:-1], -1, -1), + translation[..., None], + ], dim=-1), + torch.tensor([[0, 0, 1]], dtype=translation.dtype, device=translation.device).expand(*translation.shape[:-1], -1, -1), + ], dim=-2) + + +def scale_2d(scale: Union[float, torch.Tensor], center: torch.Tensor = None): + """ + Scale matrix for 2D scaling + ``` + [[s, 0, tx], + [0, s, ty], + [0, 0, 1]] + ``` + Args: + scale (float | torch.Tensor): scale factor, arbitrary shape (...,) + center (torch.Tensor): scale center, arbitrary shape (..., 2). Default to (0, 0) + + Returns: + (torch.Tensor): (..., 3, 3) transformation matrix + """ + if isinstance(scale, float): + scale = torch.tensor(scale) + if center is not None: + scale = scale.to(center) + if center is None: + center = torch.zeros(2, dtype=scale.dtype, device=scale.device).expand(*scale.shape, -1) + return torch.cat([ + torch.cat([ + scale * torch.eye(2, dtype=scale.dtype, device=scale.device).expand(*scale.shape[:-1], -1, -1), + center[..., :, None] - center[..., :, None] * scale[..., None, None], + ], dim=-1), + torch.tensor([[0, 0, 1]], dtype=scale.dtype, device=scale.device).expand(*center.shape[:-1], -1, -1), + ], dim=-2) + + +def apply_2d(transform: torch.Tensor, points: torch.Tensor): + """ + Apply (3x3 or 2x3) 2D affine transformation to points + ``` + p = R @ p + t + ``` + Args: + transform (torch.Tensor): (..., 2 or 3, 3) transformation matrix + points (torch.Tensor): (..., N, 2) points to transform + + Returns: + (torch.Tensor): (..., N, 2) transformed points + """ + assert transform.shape[-2:] == (3, 3) or transform.shape[-2:] == (2, 3), "transform must be 3x3 or 2x3" + assert points.shape[-1] == 2, "points must be 2D" + return points @ transform[..., :2, :2].mT + transform[..., :2, None, 2] \ No newline at end of file diff --git a/utils3d/utils3d/torch/utils.py b/utils3d/utils3d/torch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09887ab8ac7da3e00ae26e5675bcadf8bf0838a7 --- /dev/null +++ b/utils3d/utils3d/torch/utils.py @@ -0,0 +1,350 @@ +from typing import * + +import torch +import torch.nn.functional as F + +from . import transforms +from . import mesh +from ._helpers import batched +from .._helpers import no_warnings + + +__all__ = [ + 'sliding_window_1d', + 'sliding_window_2d', + 'sliding_window_nd', + 'image_uv', + 'image_pixel_center', + 'image_mesh', + 'chessboard', + 'depth_edge', + 'depth_aliasing', + 'image_mesh_from_depth', + 'points_to_normals', + 'depth_to_points', + 'depth_to_normals', + 'masked_min', + 'masked_max', + 'bounding_rect' +] + + +def sliding_window_1d(x: torch.Tensor, window_size: int, stride: int = 1, dim: int = -1) -> torch.Tensor: + """ + Sliding window view of the input tensor. The dimension of the sliding window is appended to the end of the input tensor's shape. + NOTE: Since Pytorch has `unfold` function, 1D sliding window view is just a wrapper of it. + """ + return x.unfold(dim, window_size, stride) + + +def sliding_window_nd(x: torch.Tensor, window_size: Tuple[int, ...], stride: Tuple[int, ...], dim: Tuple[int, ...]) -> torch.Tensor: + dim = [dim[i] % x.ndim for i in range(len(dim))] + assert len(window_size) == len(stride) == len(dim) + for i in range(len(window_size)): + x = sliding_window_1d(x, window_size[i], stride[i], dim[i]) + return x + + +def sliding_window_2d(x: torch.Tensor, window_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], dim: Union[int, Tuple[int, int]] = (-2, -1)) -> torch.Tensor: + if isinstance(window_size, int): + window_size = (window_size, window_size) + if isinstance(stride, int): + stride = (stride, stride) + return sliding_window_nd(x, window_size, stride, dim) + + +def image_uv(height: int, width: int, left: int = None, top: int = None, right: int = None, bottom: int = None, device: torch.device = None, dtype: torch.dtype = None) -> torch.Tensor: + """ + Get image space UV grid, ranging in [0, 1]. + + >>> image_uv(10, 10): + [[[0.05, 0.05], [0.15, 0.05], ..., [0.95, 0.05]], + [[0.05, 0.15], [0.15, 0.15], ..., [0.95, 0.15]], + ... ... ... + [[0.05, 0.95], [0.15, 0.95], ..., [0.95, 0.95]]] + + Args: + width (int): image width + height (int): image height + + Returns: + torch.Tensor: shape (height, width, 2) + """ + if left is None: left = 0 + if top is None: top = 0 + if right is None: right = width + if bottom is None: bottom = height + u = torch.linspace((left + 0.5) / width, (right - 0.5) / width, right - left, device=device, dtype=dtype) + v = torch.linspace((top + 0.5) / height, (bottom - 0.5) / height, bottom - top, device=device, dtype=dtype) + u, v = torch.meshgrid(u, v, indexing='xy') + uv = torch.stack([u, v], dim=-1) + return uv + + +def image_pixel_center( + height: int, + width: int, + left: int = None, + top: int = None, + right: int = None, + bottom: int = None, + dtype: torch.dtype = None, + device: torch.device = None +) -> torch.Tensor: + """ + Get image pixel center coordinates, ranging in [0, width] and [0, height]. + `image[i, j]` has pixel center coordinates `(j + 0.5, i + 0.5)`. + + >>> image_pixel_center(10, 10): + [[[0.5, 0.5], [1.5, 0.5], ..., [9.5, 0.5]], + [[0.5, 1.5], [1.5, 1.5], ..., [9.5, 1.5]], + ... ... ... + [[0.5, 9.5], [1.5, 9.5], ..., [9.5, 9.5]]] + + Args: + width (int): image width + height (int): image height + + Returns: + torch.Tensor: shape (height, width, 2) + """ + if left is None: left = 0 + if top is None: top = 0 + if right is None: right = width + if bottom is None: bottom = height + u = torch.linspace(left + 0.5, right - 0.5, right - left, dtype=dtype, device=device) + v = torch.linspace(top + 0.5, bottom - 0.5, bottom - top, dtype=dtype, device=device) + u, v = torch.meshgrid(u, v, indexing='xy') + return torch.stack([u, v], dim=2) + + +def image_mesh(height: int, width: int, mask: torch.Tensor = None, device: torch.device = None, dtype: torch.dtype = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Get a quad mesh regarding image pixel uv coordinates as vertices and image grid as faces. + + Args: + width (int): image width + height (int): image height + mask (torch.Tensor, optional): binary mask of shape (height, width), dtype=bool. Defaults to None. + + Returns: + uv (torch.Tensor): uv corresponding to pixels as described in image_uv() + faces (torch.Tensor): quad faces connecting neighboring pixels + indices (torch.Tensor, optional): indices of vertices in the original mesh + """ + if device is None and mask is not None: + device = mask.device + if mask is not None: + assert mask.shape[0] == height and mask.shape[1] == width + assert mask.dtype == torch.bool + uv = image_uv(height, width, device=device, dtype=dtype).reshape((-1, 2)) + row_faces = torch.stack([ + torch.arange(0, width - 1, dtype=torch.int32, device=device), + torch.arange(width, 2 * width - 1, dtype=torch.int32, device=device), + torch.arange(1 + width, 2 * width, dtype=torch.int32, device=device), + torch.arange(1, width, dtype=torch.int32, device=device) + ], dim=1) + faces = (torch.arange(0, (height - 1) * width, width, device=device, dtype=torch.int32)[:, None, None] + row_faces[None, :, :]).reshape((-1, 4)) + if mask is not None: + quad_mask = (mask[:-1, :-1] & mask[1:, :-1] & mask[1:, 1:] & mask[:-1, 1:]).ravel() + faces = faces[quad_mask] + faces, uv, indices = mesh.remove_unreferenced_vertices(faces, uv, return_indices=True) + return uv, faces, indices + return uv, faces + + +def depth_edge(depth: torch.Tensor, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: torch.Tensor = None) -> torch.BoolTensor: + """ + Compute the edge mask of a depth map. The edge is defined as the pixels whose neighbors have a large difference in depth. + + Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + + Returns: + edge (torch.Tensor): shape (..., height, width) of dtype torch.bool + """ + shape = depth.shape + depth = depth.reshape(-1, 1, *shape[-2:]) + if mask is not None: + mask = mask.reshape(-1, 1, *shape[-2:]) + + if mask is None: + diff = (F.max_pool2d(depth, kernel_size, stride=1, padding=kernel_size // 2) + F.max_pool2d(-depth, kernel_size, stride=1, padding=kernel_size // 2)) + else: + diff = (F.max_pool2d(torch.where(mask, depth, -torch.inf), kernel_size, stride=1, padding=kernel_size // 2) + F.max_pool2d(torch.where(mask, -depth, -torch.inf), kernel_size, stride=1, padding=kernel_size // 2)) + + edge = torch.zeros_like(depth, dtype=torch.bool) + if atol is not None: + edge |= diff > atol + if rtol is not None: + edge |= (diff / depth).nan_to_num_() > rtol + edge = edge.reshape(*shape) + return edge + + +def depth_aliasing(depth: torch.Tensor, atol: float = None, rtol: float = None, kernel_size: int = 3, mask: torch.Tensor = None) -> torch.BoolTensor: + """ + Compute the map that indicates the aliasing of a depth map. The aliasing is defined as the pixels which neither close to the maximum nor the minimum of its neighbors. + Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + atol (float): absolute tolerance + rtol (float): relative tolerance + + Returns: + edge (torch.Tensor): shape (..., height, width) of dtype torch.bool + """ + shape = depth.shape + depth = depth.reshape(-1, 1, *shape[-2:]) + if mask is not None: + mask = mask.reshape(-1, 1, *shape[-2:]) + + if mask is None: + diff_max = F.max_pool2d(depth, kernel_size, stride=1, padding=kernel_size // 2) - depth + diff_min = F.max_pool2d(-depth, kernel_size, stride=1, padding=kernel_size // 2) + depth + else: + diff_max = F.max_pool2d(torch.where(mask, depth, -torch.inf), kernel_size, stride=1, padding=kernel_size // 2) - depth + diff_min = F.max_pool2d(torch.where(mask, -depth, -torch.inf), kernel_size, stride=1, padding=kernel_size // 2) + depth + diff = torch.minimum(diff_max, diff_min) + + edge = torch.zeros_like(depth, dtype=torch.bool) + if atol is not None: + edge |= diff > atol + if rtol is not None: + edge |= (diff / depth).nan_to_num_() > rtol + edge = edge.reshape(*shape) + return edge + + +def image_mesh_from_depth( + depth: torch.Tensor, + extrinsics: torch.Tensor = None, + intrinsics: torch.Tensor = None +) -> Tuple[torch.Tensor, torch.Tensor]: + height, width = depth.shape + uv, faces = image_mesh(height, width) + faces = faces.reshape(-1, 4) + depth = depth.reshape(-1) + pts = transforms.unproject_cv(image_uv, depth, extrinsics, intrinsics) + faces = mesh.triangulate(faces, vertices=pts) + return pts, faces + + +@batched(3, 2, 2) +def points_to_normals(point: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor: + """ + Calculate normal map from point map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + + Args: + point (torch.Tensor): shape (..., height, width, 3), point map + Returns: + normal (torch.Tensor): shape (..., height, width, 3), normal map. + """ + has_mask = mask is not None + + if mask is None: + mask = torch.ones_like(point[..., 0], dtype=torch.bool) + mask = F.pad(mask, (1, 1, 1, 1), mode='constant', value=0) + + pts = F.pad(point.permute(0, 3, 1, 2), (1, 1, 1, 1), mode='constant', value=1).permute(0, 2, 3, 1) + up = pts[:, :-2, 1:-1, :] - pts[:, 1:-1, 1:-1, :] + left = pts[:, 1:-1, :-2, :] - pts[:, 1:-1, 1:-1, :] + down = pts[:, 2:, 1:-1, :] - pts[:, 1:-1, 1:-1, :] + right = pts[:, 1:-1, 2:, :] - pts[:, 1:-1, 1:-1, :] + normal = torch.stack([ + torch.cross(up, left, dim=-1), + torch.cross(left, down, dim=-1), + torch.cross(down, right, dim=-1), + torch.cross(right, up, dim=-1), + ]) + normal = F.normalize(normal, dim=-1) + valid = torch.stack([ + mask[:, :-2, 1:-1] & mask[:, 1:-1, :-2], + mask[:, 1:-1, :-2] & mask[:, 2:, 1:-1], + mask[:, 2:, 1:-1] & mask[:, 1:-1, 2:], + mask[:, 1:-1, 2:] & mask[:, :-2, 1:-1], + ]) & mask[None, :, 1:-1, 1:-1] + normal = (normal * valid[..., None]).sum(dim=0) + normal = F.normalize(normal, dim=-1) + + if has_mask: + return normal, valid.any(dim=0) + else: + return normal + + +def depth_to_normals(depth: torch.Tensor, intrinsics: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor: + """ + Calculate normal map from depth map. Value range is [-1, 1]. Normal direction in OpenGL identity camera's coordinate system. + + Args: + depth (torch.Tensor): shape (..., height, width), linear depth map + intrinsics (torch.Tensor): shape (..., 3, 3), intrinsics matrix + Returns: + normal (torch.Tensor): shape (..., 3, height, width), normal map. + """ + pts = depth_to_points(depth, intrinsics) + return points_to_normals(pts, mask) + + +def depth_to_points(depth: torch.Tensor, intrinsics: torch.Tensor, extrinsics: torch.Tensor = None): + height, width = depth.shape[-2:] + uv = image_uv(width=width, height=height, dtype=depth.dtype, device=depth.device) + pts = transforms.unproject_cv(uv, depth, intrinsics=intrinsics[..., None, :, :], extrinsics=extrinsics[..., None, :, :] if extrinsics is not None else None) + return pts + + +def masked_min(input: torch.Tensor, mask: torch.BoolTensor, dim: int = None, keepdim: bool = False) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """Similar to torch.min, but with mask + """ + if dim is None: + return torch.where(mask, input, torch.tensor(torch.inf, dtype=input.dtype, device=input.device)).min() + else: + return torch.where(mask, input, torch.tensor(torch.inf, dtype=input.dtype, device=input.device)).min(dim=dim, keepdim=keepdim) + + +def masked_max(input: torch.Tensor, mask: torch.BoolTensor, dim: int = None, keepdim: bool = False) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """Similar to torch.max, but with mask + """ + if dim is None: + return torch.where(mask, input, torch.tensor(-torch.inf, dtype=input.dtype, device=input.device)).max() + else: + return torch.where(mask, input, torch.tensor(-torch.inf, dtype=input.dtype, device=input.device)).max(dim=dim, keepdim=keepdim) + + +def bounding_rect(mask: torch.BoolTensor): + """get bounding rectangle of a mask + + Args: + mask (torch.Tensor): shape (..., height, width), mask + + Returns: + rect (torch.Tensor): shape (..., 4), bounding rectangle (left, top, right, bottom) + """ + height, width = mask.shape[-2:] + mask = mask.flatten(-2).unsqueeze(-1) + uv = image_uv(height, width).to(mask.device).reshape(-1, 2) + left_top = masked_min(uv, mask, dim=-2)[0] + right_bottom = masked_max(uv, mask, dim=-2)[0] + return torch.cat([left_top, right_bottom], dim=-1) + + +def chessboard(width: int, height: int, grid_size: int, color_a: torch.Tensor, color_b: torch.Tensor) -> torch.Tensor: + """get a chessboard image + + Args: + width (int): image width + height (int): image height + grid_size (int): size of chessboard grid + color_a (torch.Tensor): shape (chanenls,), color of the grid at the top-left corner + color_b (torch.Tensor): shape (chanenls,), color in complementary grids + + Returns: + image (torch.Tensor): shape (height, width, channels), chessboard image + """ + x = torch.div(torch.arange(width), grid_size, rounding_mode='floor') + y = torch.div(torch.arange(height), grid_size, rounding_mode='floor') + mask = ((x[None, :] + y[:, None]) % 2).to(color_a) + image = (1 - mask[..., None]) * color_a + mask[..., None] * color_b + return image \ No newline at end of file