diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_conda.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_conda.py
new file mode 100644
index 0000000000000000000000000000000000000000..554e86c6d8cf18d418176b51f1f84a0eeee158c0
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_conda.py
@@ -0,0 +1,145 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os.path
+import runpy
+import subprocess
+from typing import List
+
+# required env vars:
+# CU_VERSION: E.g. cu112
+# JUST_TESTRUN: 1 to not set nvcc flags
+# PYTORCH_VERSION: e.g. 1.12.0
+# PYTHON_VERSION: e.g. 3.9
+
+# should be run from pytorch3d root
+
+CU_VERSION = os.environ["CU_VERSION"]
+PYTORCH_VERSION = os.environ["PYTORCH_VERSION"]
+pytorch_major_minor = tuple(int(i) for i in PYTORCH_VERSION.split(".")[:2])
+source_root_dir = os.environ["PWD"]
+
+
+def version_constraint(version):
+ """
+ Given version "11.3" returns " >=11.3,<11.4"
+ """
+ last_part = version.rindex(".") + 1
+ upper = version[:last_part] + str(1 + int(version[last_part:]))
+ return f" >={version},<{upper}"
+
+
+def get_cuda_major_minor():
+ if CU_VERSION == "cpu":
+ raise ValueError("fn only for cuda builds")
+ if len(CU_VERSION) != 5 or CU_VERSION[:2] != "cu":
+ raise ValueError(f"Bad CU_VERSION {CU_VERSION}")
+ major = CU_VERSION[2:4]
+ minor = CU_VERSION[4]
+ return major, minor
+
+
+def setup_cuda():
+ if CU_VERSION == "cpu":
+ return
+ major, minor = get_cuda_major_minor()
+ os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
+ os.environ["FORCE_CUDA"] = "1"
+
+ basic_nvcc_flags = (
+ "-gencode=arch=compute_50,code=sm_50 "
+ "-gencode=arch=compute_60,code=sm_60 "
+ "-gencode=arch=compute_70,code=sm_70 "
+ "-gencode=arch=compute_75,code=sm_75 "
+ "-gencode=arch=compute_50,code=compute_50"
+ )
+ if CU_VERSION == "cu102":
+ nvcc_flags = "-gencode=arch=compute_35,code=sm_35 " + basic_nvcc_flags
+ elif CU_VERSION < ("cu118"):
+ nvcc_flags = (
+ "-gencode=arch=compute_35,code=sm_35 "
+ + "-gencode=arch=compute_80,code=sm_80 "
+ + "-gencode=arch=compute_86,code=sm_86 "
+ + basic_nvcc_flags
+ )
+ else:
+ nvcc_flags = (
+ "-gencode=arch=compute_80,code=sm_80 "
+ + "-gencode=arch=compute_86,code=sm_86 "
+ + "-gencode=arch=compute_90,code=sm_90 "
+ + basic_nvcc_flags
+ )
+
+ if os.environ.get("JUST_TESTRUN", "0") != "1":
+ os.environ["NVCC_FLAGS"] = nvcc_flags
+
+
+def setup_conda_pytorch_constraint() -> List[str]:
+ pytorch_constraint = f"- pytorch=={PYTORCH_VERSION}"
+ os.environ["CONDA_PYTORCH_CONSTRAINT"] = pytorch_constraint
+ if pytorch_major_minor < (2, 2):
+ os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = "- mkl!=2024.1.0"
+ os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools<70"
+ else:
+ os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = ""
+ os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools"
+ os.environ["CONDA_PYTORCH_BUILD_CONSTRAINT"] = pytorch_constraint
+ os.environ["PYTORCH_VERSION_NODOT"] = PYTORCH_VERSION.replace(".", "")
+
+ if pytorch_major_minor < (1, 13):
+ return ["-c", "pytorch"]
+ else:
+ return ["-c", "pytorch", "-c", "nvidia"]
+
+
+def setup_conda_cudatoolkit_constraint():
+ if CU_VERSION == "cpu":
+ os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
+ os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
+ return
+ os.environ["CONDA_CPUONLY_FEATURE"] = ""
+
+ if CU_VERSION in ("cu102", "cu110"):
+ os.environ["CONDA_CUB_CONSTRAINT"] = "- nvidiacub"
+ else:
+ os.environ["CONDA_CUB_CONSTRAINT"] = ""
+
+ major, minor = get_cuda_major_minor()
+ version_clause = version_constraint(f"{major}.{minor}")
+ if pytorch_major_minor < (1, 13):
+ toolkit = f"- cudatoolkit {version_clause}"
+ else:
+ toolkit = f"- pytorch-cuda {version_clause}"
+ os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit
+
+
+def do_build(start_args: List[str]):
+ args = start_args.copy()
+
+ test_flag = os.environ.get("TEST_FLAG")
+ if test_flag is not None:
+ args.append(test_flag)
+
+ args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
+ args.append("--no-anaconda-upload")
+ args.extend(["--python", os.environ["PYTHON_VERSION"]])
+ args.append("packaging/pytorch3d")
+ print(args)
+ subprocess.check_call(args)
+
+
+if __name__ == "__main__":
+ args = ["conda", "build"]
+ setup_cuda()
+
+ init_path = source_root_dir + "/pytorch3d/__init__.py"
+ build_version = runpy.run_path(init_path)["__version__"]
+ os.environ["BUILD_VERSION"] = build_version
+
+ os.environ["SOURCE_ROOT_DIR"] = source_root_dir
+ args += setup_conda_pytorch_constraint()
+ setup_conda_cudatoolkit_constraint()
+ do_build(args)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_wheel.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_wheel.sh
new file mode 100644
index 0000000000000000000000000000000000000000..afe5a0e81359f681302628f670fb16f44cf65851
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_wheel.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+set -ex
+
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+. "$script_dir/pkg_helpers.bash"
+
+VERSION=$(python -c "exec(open('${script_dir}/../pytorch3d/__init__.py').read()); print(__version__)")
+
+export BUILD_TYPE=wheel
+setup_env "$VERSION"
+setup_wheel_python
+pip_install numpy
+setup_pip_pytorch_version
+download_nvidiacub_if_needed
+python setup.py clean
+IS_WHEEL=1 python setup.py bdist_wheel
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/build_pytorch3d.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/build_pytorch3d.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6be532fb84b3d36561cf015268b8e8f43c9228c8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/build_pytorch3d.sh
@@ -0,0 +1,218 @@
+#!/usr/bin/env bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+if [[ -x "/remote/anaconda_token" ]]; then
+ . /remote/anaconda_token || true
+fi
+
+set -ex
+
+# Function to retry functions that sometimes timeout or have flaky failures
+retry () {
+ $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
+}
+
+# Parse arguments and determmine version
+###########################################################
+
+if [ "$#" -ne 3 ]; then
+ echo "Illegal number of parameters. Pass cuda version, pytorch3d version, pytorch3d build number"
+ echo "CUDA version should be Mm with no dot, e.g. '80'"
+ echo "DESIRED_PYTHON should be M.m, e.g. '2.7'"
+ exit 1
+fi
+
+desired_cuda="$1"
+build_version="$2"
+build_number="$3"
+
+if [[ "$desired_cuda" != cpu ]]; then
+ desired_cuda="$(echo $desired_cuda | tr -d cuda. )"
+fi
+echo "Building cuda version $desired_cuda and pytorch3d version: $build_version build_number: $build_number"
+
+if [[ "$desired_cuda" == 'cpu' ]]; then
+ cpu_only=1
+ cuver="cpu"
+else
+ # Switch desired_cuda to be M.m to be consistent with other scripts in
+ # pytorch/builder
+ export FORCE_CUDA=1
+ cuda_nodot="$desired_cuda"
+
+ if [[ ${#cuda_nodot} -eq 2 ]]; then
+ desired_cuda="${desired_cuda:0:1}.${desired_cuda:1:1}"
+ elif [[ ${#cuda_nodot} -eq 3 ]]; then
+ desired_cuda="${desired_cuda:0:2}.${desired_cuda:2:1}"
+ else
+ echo "unknown cuda version $cuda_nodot"
+ exit 1
+ fi
+
+ cuver="cu$cuda_nodot"
+fi
+
+export PYTORCH3D_BUILD_VERSION=$build_version
+export PYTORCH3D_BUILD_NUMBER=$build_number
+
+if [[ -z "$DESIRED_PYTHON" ]]; then
+ DESIRED_PYTHON=('3.5' '3.6' '3.7')
+fi
+
+SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+
+if [[ -z "$WIN_PACKAGE_WORK_DIR" ]]; then
+ WIN_PACKAGE_WORK_DIR="$(echo $(pwd -W) | tr '/' '\\')\\tmp_conda_$(date +%H%M%S)"
+fi
+
+mkdir -p "$WIN_PACKAGE_WORK_DIR" || true
+pytorch3d_rootdir="$(realpath ${WIN_PACKAGE_WORK_DIR})/pytorch3d-src"
+git config --system core.longpaths true
+
+if [[ ! -d "$pytorch3d_rootdir" ]]; then
+ rm -rf "$pytorch3d_rootdir"
+ git clone SOURCE_DIR/../.. "$pytorch3d_rootdir"
+
+fi
+
+cd "$SOURCE_DIR"
+
+export tmp_conda="${WIN_PACKAGE_WORK_DIR}\\conda"
+export miniconda_exe="${WIN_PACKAGE_WORK_DIR}\\miniconda.exe"
+rm -rf "$tmp_conda"
+rm -f "$miniconda_exe"
+curl -sSk https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe -o "$miniconda_exe"
+"$SOURCE_DIR/install_conda.bat" && rm "$miniconda_exe"
+pushd $tmp_conda
+export PATH="$(pwd):$(pwd)/Library/usr/bin:$(pwd)/Library/bin:$(pwd)/Scripts:$(pwd)/bin:$PATH"
+popd
+retry conda install -yq conda-build
+
+ANACONDA_USER=pytorch-nightly
+conda config --set anaconda_upload no
+
+
+if [[ "$desired_cuda" == 'cpu' ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT=""
+ export CONDA_CPUONLY_FEATURE="- cpuonly # [not osx]"
+ export CUDA_VERSION="None"
+else
+ export CONDA_CPUONLY_FEATURE=""
+ . ./switch_cuda_version.sh $desired_cuda
+ if [[ "$desired_cuda" == "10.1" ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.1,<10.2 # [not osx]"
+ elif [[ "$desired_cuda" == "10.0" ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]"
+ elif [[ "$desired_cuda" == "9.2" ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]"
+ elif [[ "$desired_cuda" == "9.0" ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.0,<9.1 # [not osx]"
+ elif [[ "$desired_cuda" == "8.0" ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=8.0,<8.1 # [not osx]"
+ else
+ echo "unhandled desired_cuda: $desired_cuda"
+ exit 1
+ fi
+fi
+
+if [[ -z "$PYTORCH_VERSION" ]]; then
+ export CONDA_CHANNEL_FLAGS="-c pytorch-nightly"
+ export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | \
+ python -c "import os, sys, json, re; cuver = '$cuver'; \
+ cuver = cuver.replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
+ print(re.sub(r'\\+.*$', '', \
+ [x['version'] for x in json.load(sys.stdin)['pytorch'] \
+ if (x['platform'] == 'darwin' or cuver in x['fn']) \
+ and 'py' + os.environ['DESIRED_PYTHON'] in x['fn']][-1]))")"
+ if [[ -z "$PYTORCH_VERSION" ]]; then
+ echo "PyTorch version auto detection failed"
+ echo "No package found for desired_cuda=$desired_cuda and DESIRED_PYTHON=$DESIRED_PYTHON"
+ exit 1
+ fi
+else
+ export CONDA_CHANNEL_FLAGS="-c pytorch -c pytorch-nightly"
+fi
+if [[ "$desired_cuda" == 'cpu' ]]; then
+ export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
+ export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
+else
+ export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}"
+ export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}"
+fi
+
+export PYTORCH_VERSION_NODOT=${PYTORCH_VERSION//./}
+
+# Loop through all Python versions to build a package for each
+for py_ver in "${DESIRED_PYTHON[@]}"; do
+ build_string="py${py_ver}_${build_string_suffix}"
+ folder_tag="${build_string}_$(date +'%Y%m%d')"
+
+ # Create the conda package into this temporary folder. This is so we can find
+ # the package afterwards, as there's no easy way to extract the final filename
+ # from conda-build
+ output_folder="out_$folder_tag"
+ rm -rf "$output_folder"
+ mkdir "$output_folder"
+
+ export VSTOOLCHAIN_PACKAGE=vs2017
+
+ # We need to build the compiler activation scripts first on Windows
+ time VSDEVCMD_ARGS=${VSDEVCMD_ARGS[@]} \
+ conda build -c "$ANACONDA_USER" \
+ --no-anaconda-upload \
+ --output-folder "$output_folder" \
+ ../$VSTOOLCHAIN_PACKAGE
+
+ cp ../$VSTOOLCHAIN_PACKAGE/conda_build_config.yaml ../pytorch3d/conda_build_config.yaml
+
+ conda config --set anaconda_upload no
+ echo "Calling conda-build at $(date)"
+ if [[ "$desired_cuda" == "9.2" ]]; then
+ time CMAKE_ARGS=${CMAKE_ARGS[@]} \
+ BUILD_VERSION="$PYTORCH3D_BUILD_VERSION" \
+ CU_VERSION="$cuver" \
+ SOURCE_ROOT_DIR="$pytorch3d_rootdir" \
+ conda build -c "$ANACONDA_USER" \
+ -c defaults \
+ -c conda-forge \
+ -c "numba/label/dev" \
+ --no-anaconda-upload \
+ --python "$py_ver" \
+ --output-folder "$output_folder" \
+ --no-verify \
+ --no-test \
+ ../pytorch3d
+ else
+ time CMAKE_ARGS=${CMAKE_ARGS[@]} \
+ BUILD_VERSION="$PYTORCH3D_BUILD_VERSION" \
+ CU_VERSION="$cuver" \
+ SOURCE_ROOT_DIR="$pytorch3d_rootdir" \
+ conda build -c "$ANACONDA_USER" \
+ -c defaults \
+ -c conda-forge \
+ --no-anaconda-upload \
+ --python "$py_ver" \
+ --output-folder "$output_folder" \
+ --no-verify \
+ --no-test \
+ ../pytorch3d
+ fi
+ echo "Finished conda-build at $(date)"
+
+ # Extract the package for testing
+ ls -lah "$output_folder"
+ built_package="$(find $output_folder/ -name '*pytorch3d*.tar.bz2')"
+
+ # Copy the built package to the host machine for persistence before testing
+ if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then
+ mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
+ cp "$built_package" "$PYTORCH_FINAL_PACKAGE_DIR/"
+ fi
+done
+
+
+set +e
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/install_conda.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/install_conda.bat
new file mode 100644
index 0000000000000000000000000000000000000000..c9aebe988f4dac55189ac7ae81e390ba9388684f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/install_conda.bat
@@ -0,0 +1,7 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/switch_cuda_version.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/switch_cuda_version.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e30f2c7b7b765f8f7966d7c42f887c37c210a208
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/switch_cuda_version.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+if [[ "$OSTYPE" == "msys" ]]; then
+ CUDA_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v$1"
+else
+ CUDA_DIR="/usr/local/cuda-$1"
+fi
+
+if ! ls "$CUDA_DIR"
+then
+ echo "folder $CUDA_DIR not found to switch"
+fi
+
+echo "Switching symlink to $CUDA_DIR"
+mkdir -p /usr/local
+rm -fr /usr/local/cuda
+ln -s "$CUDA_DIR" /usr/local/cuda
+
+if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_VERSION=`ls /usr/local/cuda/bin/cudart64*.dll | head -1 | tr '._' ' ' | cut -d ' ' -f2`
+ export CUDNN_VERSION=`ls /usr/local/cuda/bin/cudnn64*.dll | head -1 | tr '._' ' ' | cut -d ' ' -f2`
+else
+ export CUDA_VERSION=$(ls /usr/local/cuda/lib64/libcudart.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev)
+ export CUDNN_VERSION=$(ls /usr/local/cuda/lib64/libcudnn.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev)
+fi
+
+ls -alh /usr/local/cuda
+
+echo "CUDA_VERSION=$CUDA_VERSION"
+echo "CUDNN_VERSION=$CUDNN_VERSION"
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fbf71eb4fb126dde214ebf60f99aa80738155983
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/README.md
@@ -0,0 +1,26 @@
+## For building conda package for NVIDIA CUB
+
+CUB is required for building PyTorch3D so it makes sense
+to provide a conda package to make its header files available.
+This directory is used to do that, it is independent of the rest
+of this repo.
+
+Make sure you are in a conda environment with
+anaconda-client and conda-build installed.
+
+From this directory, build the package with the following.
+```
+mkdir -p ./out
+conda build --no-anaconda-upload --output-folder ./out cub
+```
+
+You can then upload the package with the following.
+```
+retry () {
+ # run a command, and try again if it fails
+ $* || (echo && sleep 8 && echo retrying && $*)
+}
+
+file=out/linux-64/nvidiacub-1.10.0-0.tar.bz2
+retry anaconda --verbose -t ${TOKEN} upload -u pytorch3d --force ${file} --no-progress
+```
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/cub/meta.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/cub/meta.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ebb038a58deeecfcabb662ad32d77129336604a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/cub/meta.yaml
@@ -0,0 +1,12 @@
+package:
+ name: nvidiacub
+ version: 1.10.0
+source:
+ url: https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
+ folder: source
+build:
+ script: mkdir $PREFIX/include && cp -r source/cub $PREFIX/include/cub
+
+about:
+ home: https://github.com/NVIDIA/cub
+ summary: CUB provides state-of-the-art, reusable software components for every layer of the CUDA programming model.
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..172aabf033cf2c27ae7e1e8d3e17f0ccfc113c9c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/README.md
@@ -0,0 +1,31 @@
+## Building Linux pip Packages
+
+1. Make sure this directory is on a filesystem which docker can
+use - e.g. not NFS. If you are using a local hard drive there is
+nothing to do here.
+
+2. You may want to `docker pull pytorch/conda-cuda:latest`.
+
+3. Run `bash go.sh` in this directory. This takes ages
+and writes packages to `inside/output`.
+
+4. You can upload the packages to s3, along with basic html files
+which enable them to be used, with `bash after.sh`.
+
+
+In particular, if you are in a jupyter/colab notebook you can
+then install using these wheels with the following series of
+commands.
+
+```
+import sys
+import torch
+pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
+version_str="".join([
+ f"py3{sys.version_info.minor}_cu",
+ torch.version.cuda.replace(".",""),
+ f"_pyt{pyt_version_str}"
+])
+!pip install iopath
+!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
+```
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/after.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/after.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ccfce6473e1f819244b2f3cc77bb45f66e6735f8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/after.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+set -ex
+sudo chown -R "$USER" output
+python publish.py
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/go.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/go.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1929a4671ad4f2abe28bf8f2fbc725d5b0d98cc9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/go.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# Some directory to persist downloaded conda packages
+conda_cache=/raid/$USER/building_conda_cache
+
+mkdir -p "$conda_cache"
+
+sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
+sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
+sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
+sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu117 pytorch/conda-builder:cuda117 bash inside/packaging/linux_wheels/inside.sh
+sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu118 pytorch/conda-builder:cuda118 bash inside/packaging/linux_wheels/inside.sh
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/inside.sh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/inside.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8c7a718d609ae15d5b81dfe33686009df73f4466
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/inside.sh
@@ -0,0 +1,163 @@
+#!/bin/bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+set -ex
+
+conda init bash
+# shellcheck source=/dev/null
+source ~/.bashrc
+
+cd /inside
+VERSION=$(python -c "exec(open('pytorch3d/__init__.py').read()); print(__version__)")
+
+export BUILD_VERSION=$VERSION
+export FORCE_CUDA=1
+export MAX_JOBS=8
+export CONDA_PKGS_DIRS=/conda_cache
+
+if false
+then
+ # We used to have to do this for old versions of CUDA
+ wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
+ tar xzf 1.10.0.tar.gz
+ CUB_HOME=$(realpath ./cub-1.10.0)
+ export CUB_HOME
+ echo "CUB_HOME is now $CUB_HOME"
+fi
+
+# As a rule, we want to build for any combination of dependencies which is supported by
+# PyTorch3D and not older than the current Google Colab set up.
+
+PYTHON_VERSIONS="3.8 3.9 3.10"
+# the keys are pytorch versions
+declare -A CONDA_CUDA_VERSIONS=(
+# ["1.11.0"]="cu113"
+# ["1.12.0"]="cu113"
+# ["1.12.1"]="cu113"
+# ["1.13.0"]="cu116"
+# ["1.13.1"]="cu116 cu117"
+# ["2.0.0"]="cu117 cu118"
+ ["2.0.1"]="cu117 cu118"
+)
+
+
+
+for python_version in $PYTHON_VERSIONS
+do
+ for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
+ do
+ if [[ "3.7 3.8 3.9" != *$python_version* ]] && [[ "1.7.0 1.7.1 1.8.0 1.8.1 1.9.0 1.9.1 1.10.0 1.10.1 1.10.2" == *$pytorch_version* ]]
+ then
+ #python 3.10 and later not supported by pytorch 1.10.2 and before
+ continue
+ fi
+
+ extra_channel="-c nvidia"
+ cudatools="pytorch-cuda"
+ if [[ "1.11.0" == "$pytorch_version" ]]
+ then
+ extra_channel=""
+ cudatools="cudatoolkit"
+ fi
+ if [[ "1.12.0" == "$pytorch_version" ]] || [[ "1.12.1" == "$pytorch_version" ]]
+ then
+ extra_channel="-c conda-forge"
+ cudatools="cudatoolkit"
+ fi
+
+ for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
+ do
+ if [[ $SELECTED_CUDA != "$cu_version" ]]
+ then
+ continue
+ fi
+
+ case "$cu_version" in
+ cu118)
+ export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_TAG=11.8
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu117)
+ export CUDA_HOME=/usr/local/cuda-11.7/
+ export CUDA_TAG=11.7
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu116)
+ export CUDA_HOME=/usr/local/cuda-11.6/
+ export CUDA_TAG=11.6
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu115)
+ export CUDA_HOME=/usr/local/cuda-11.5/
+ export CUDA_TAG=11.5
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu113)
+ export CUDA_HOME=/usr/local/cuda-11.3/
+ export CUDA_TAG=11.3
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu112)
+ export CUDA_HOME=/usr/local/cuda-11.2/
+ export CUDA_TAG=11.2
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu111)
+ export CUDA_HOME=/usr/local/cuda-11.1/
+ export CUDA_TAG=11.1
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu110)
+ export CUDA_HOME=/usr/local/cuda-11.0/
+ export CUDA_TAG=11.0
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu102)
+ export CUDA_HOME=/usr/local/cuda-10.2/
+ export CUDA_TAG=10.2
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu101)
+ export CUDA_HOME=/usr/local/cuda-10.1/
+ export CUDA_TAG=10.1
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ *)
+ echo "Unrecognized cu_version=$cu_version"
+ exit 1
+ ;;
+ esac
+ tag=py"${python_version//./}"_"${cu_version}"_pyt"${pytorch_version//./}"
+
+ outdir="/inside/packaging/linux_wheels/output/$tag"
+ if [[ -d "$outdir" ]]
+ then
+ continue
+ fi
+
+ conda create -y -n "$tag" "python=$python_version"
+ conda activate "$tag"
+ # shellcheck disable=SC2086
+ conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
+ pip install iopath
+ echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
+
+ rm -rf dist
+
+ python setup.py clean
+ python setup.py bdist_wheel
+
+ rm -rf "$outdir"
+ mkdir -p "$outdir"
+ cp dist/*whl "$outdir"
+
+ conda deactivate
+ done
+ done
+done
+echo "DONE"
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/publish.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/publish.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d0da93c740bf7c2ae63b8c659c7d217346b5a76
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/publish.py
@@ -0,0 +1,87 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import subprocess
+from pathlib import Path
+from typing import List
+
+
+dest = "s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/"
+
+output = Path("output")
+
+
+def aws_s3_cmd(args) -> List[str]:
+ """
+ This function returns the full args for subprocess to do a command
+ with aws.
+ """
+ cmd_args = ["aws", "s3", "--profile", "saml"] + args
+ return cmd_args
+
+
+def fs3_exists(path) -> bool:
+ """
+ Returns True if the path exists inside dest on S3.
+ In fact, will also return True if there is a file which has the given
+ path as a prefix, but we are careful about this.
+ """
+ out = subprocess.check_output(aws_s3_cmd(["ls", path]))
+ return len(out) != 0
+
+
+def get_html_wrappers() -> None:
+ for directory in sorted(output.iterdir()):
+ output_wrapper = directory / "download.html"
+ assert not output_wrapper.exists()
+ dest_wrapper = dest + directory.name + "/download.html"
+ if fs3_exists(dest_wrapper):
+ subprocess.check_call(aws_s3_cmd(["cp", dest_wrapper, str(output_wrapper)]))
+
+
+def write_html_wrappers() -> None:
+ html = """
+ $
+ """
+
+ for directory in sorted(output.iterdir()):
+ files = list(directory.glob("*.whl"))
+ assert len(files) == 1, files
+ [wheel] = files
+
+ this_html = html.replace("$", wheel.name)
+ output_wrapper = directory / "download.html"
+ if output_wrapper.exists():
+ contents = output_wrapper.read_text()
+ if this_html not in contents:
+ with open(output_wrapper, "a") as f:
+ f.write(this_html)
+ else:
+ output_wrapper.write_text(this_html)
+
+
+def to_aws() -> None:
+ for directory in output.iterdir():
+ for file in directory.iterdir():
+ print(file)
+ subprocess.check_call(
+ aws_s3_cmd(["cp", str(file), dest + str(file.relative_to(output))])
+ )
+
+
+if __name__ == "__main__":
+ # Uncomment this for subsequent releases.
+ # get_html_wrappers()
+ write_html_wrappers()
+ to_aws()
+
+
+# see all files with
+# aws s3 --profile saml ls --recursive s3://dl.fbaipublicfiles.com/pytorch3d/
+
+# empty current with
+# aws s3 --profile saml rm --recursive
+# s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pkg_helpers.bash b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pkg_helpers.bash
new file mode 100644
index 0000000000000000000000000000000000000000..e22643ecc59c721edb72a19186d242d77781bdbc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pkg_helpers.bash
@@ -0,0 +1,390 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# shellcheck shell=bash
+# A set of useful bash functions for common functionality we need to do in
+# many build scripts
+
+# Setup CUDA environment variables, based on CU_VERSION
+#
+# Inputs:
+# CU_VERSION (cu92, cu100, cu101, cu102)
+# NO_CUDA_PACKAGE (bool)
+# BUILD_TYPE (conda, wheel)
+#
+# Outputs:
+# VERSION_SUFFIX (e.g., "")
+# PYTORCH_VERSION_SUFFIX (e.g., +cpu)
+# WHEEL_DIR (e.g., cu100/)
+# CUDA_HOME (e.g., /usr/local/cuda-9.2, respected by torch.utils.cpp_extension)
+# FORCE_CUDA (respected by pytorch3d setup.py)
+# NVCC_FLAGS (respected by pytorch3d setup.py)
+#
+# Precondition: CUDA versions are installed in their conventional locations in
+# /usr/local/cuda-*
+#
+# NOTE: Why VERSION_SUFFIX versus PYTORCH_VERSION_SUFFIX? If you're building
+# a package with CUDA on a platform we support CUDA on, VERSION_SUFFIX ==
+# PYTORCH_VERSION_SUFFIX and everyone is happy. However, if you are building a
+# package with only CPU bits (e.g., torchaudio), then VERSION_SUFFIX is always
+# empty, but PYTORCH_VERSION_SUFFIX is +cpu (because that's how you get a CPU
+# version of a Python package. But that doesn't apply if you're on OS X,
+# since the default CU_VERSION on OS X is cpu.
+setup_cuda() {
+
+ # First, compute version suffixes. By default, assume no version suffixes
+ export VERSION_SUFFIX=""
+ export PYTORCH_VERSION_SUFFIX=""
+ export WHEEL_DIR=""
+ # Wheel builds need suffixes (but not if they're on OS X, which never has suffix)
+ if [[ "$BUILD_TYPE" == "wheel" ]] && [[ "$(uname)" != Darwin ]]; then
+ # The default CUDA has no suffix
+ if [[ "$CU_VERSION" != "cu102" ]]; then
+ export PYTORCH_VERSION_SUFFIX="+$CU_VERSION"
+ fi
+ # Match the suffix scheme of pytorch, unless this package does not have
+ # CUDA builds (in which case, use default)
+ if [[ -z "$NO_CUDA_PACKAGE" ]]; then
+ export VERSION_SUFFIX="$PYTORCH_VERSION_SUFFIX"
+ export WHEEL_DIR="$CU_VERSION/"
+ fi
+ fi
+
+ # Now work out the CUDA settings
+ case "$CU_VERSION" in
+ cu116)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.6/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu115)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.5/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu113)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.3/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu112)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.2/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu111)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.1/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu110)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0"
+ else
+ export CUDA_HOME=/usr/local/cuda-11.0/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu102)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2"
+ else
+ export CUDA_HOME=/usr/local/cuda-10.2/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu101)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.1"
+ else
+ export CUDA_HOME=/usr/local/cuda-10.1/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu100)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.0"
+ else
+ export CUDA_HOME=/usr/local/cuda-10.0/
+ fi
+ export FORCE_CUDA=1
+ # Hard-coding gencode flags is temporary situation until
+ # https://github.com/pytorch/pytorch/pull/23408 lands
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cu92)
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.2"
+ else
+ export CUDA_HOME=/usr/local/cuda-9.2/
+ fi
+ export FORCE_CUDA=1
+ export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_50,code=compute_50"
+ ;;
+ cpu)
+ ;;
+ *)
+ echo "Unrecognized CU_VERSION=$CU_VERSION"
+ exit 1
+ ;;
+ esac
+}
+
+# Populate build version if necessary, and add version suffix
+#
+# Inputs:
+# BUILD_VERSION (e.g., 0.2.0 or empty)
+# VERSION_SUFFIX (e.g., +cpu)
+#
+# Outputs:
+# BUILD_VERSION (e.g., 0.2.0.dev20190807+cpu)
+#
+# Fill BUILD_VERSION if it doesn't exist already with a nightly string
+# Usage: setup_build_version 0.2.0
+setup_build_version() {
+ if [[ -z "$BUILD_VERSION" ]]; then
+ export BUILD_VERSION="$1.dev$(date "+%Y%m%d")$VERSION_SUFFIX"
+ else
+ export BUILD_VERSION="$BUILD_VERSION$VERSION_SUFFIX"
+ fi
+}
+
+# Set some useful variables for OS X, if applicable
+setup_macos() {
+ if [[ "$(uname)" == Darwin ]]; then
+ export MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++
+ fi
+}
+
+# Top-level entry point for things every package will need to do
+#
+# Usage: setup_env 0.2.0
+setup_env() {
+ setup_cuda
+ setup_build_version "$1"
+ setup_macos
+}
+
+# Function to retry functions that sometimes timeout or have flaky failures
+retry () {
+ $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
+}
+
+# Inputs:
+# PYTHON_VERSION (2.7, 3.5, 3.6, 3.7)
+# UNICODE_ABI (bool)
+#
+# Outputs:
+# PATH modified to put correct Python version in PATH
+#
+# Precondition: If Linux, you are in a soumith/manylinux-cuda* Docker image
+setup_wheel_python() {
+ if [[ "$(uname)" == Darwin ]]; then
+ eval "$(conda shell.bash hook)"
+ conda env remove -n "env$PYTHON_VERSION" || true
+ conda create -yn "env$PYTHON_VERSION" python="$PYTHON_VERSION"
+ conda activate "env$PYTHON_VERSION"
+ else
+ case "$PYTHON_VERSION" in
+ 2.7)
+ if [[ -n "$UNICODE_ABI" ]]; then
+ python_abi=cp27-cp27mu
+ else
+ python_abi=cp27-cp27m
+ fi
+ ;;
+ 3.5) python_abi=cp35-cp35m ;;
+ 3.6) python_abi=cp36-cp36m ;;
+ 3.7) python_abi=cp37-cp37m ;;
+ 3.8) python_abi=cp38-cp38 ;;
+ *)
+ echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
+ exit 1
+ ;;
+ esac
+ export PATH="/opt/python/$python_abi/bin:$PATH"
+ fi
+}
+
+# Install with pip a bit more robustly than the default
+pip_install() {
+ retry pip install --progress-bar off "$@"
+}
+
+# Install torch with pip, respecting PYTORCH_VERSION, and record the installed
+# version into PYTORCH_VERSION, if applicable
+setup_pip_pytorch_version() {
+ if [[ -z "$PYTORCH_VERSION" ]]; then
+ # Install latest prerelease version of torch, per our nightlies, consistent
+ # with the requested cuda version
+ pip_install --pre torch -f "https://download.pytorch.org/whl/nightly/${WHEEL_DIR}torch_nightly.html"
+ if [[ "$CUDA_VERSION" == "cpu" ]]; then
+ # CUDA and CPU are ABI compatible on the CPU-only parts, so strip
+ # in this case
+ export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//' | sed 's/+.\+//')"
+ else
+ export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//')"
+ fi
+ else
+ pip_install "torch==$PYTORCH_VERSION$CUDA_SUFFIX" \
+ -f https://download.pytorch.org/whl/torch_stable.html \
+ -f https://download.pytorch.org/whl/nightly/torch_nightly.html
+ fi
+}
+
+# Fill PYTORCH_VERSION with the latest conda nightly version, and
+# CONDA_CHANNEL_FLAGS with appropriate flags to retrieve these versions
+#
+# You MUST have populated CUDA_SUFFIX before hand.
+setup_conda_pytorch_constraint() {
+ if [[ -z "$PYTORCH_VERSION" ]]; then
+ export CONDA_CHANNEL_FLAGS="-c pytorch-nightly"
+ export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | \
+ python -c "import os, sys, json, re; cuver = os.environ.get('CU_VERSION'); \
+ cuver_1 = cuver.replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
+ cuver_2 = (cuver[:-1] + '.' + cuver[-1]).replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
+ print(re.sub(r'\\+.*$', '', \
+ [x['version'] for x in json.load(sys.stdin)['pytorch'] \
+ if (x['platform'] == 'darwin' or cuver_1 in x['fn'] or cuver_2 in x['fn']) \
+ and 'py' + os.environ['PYTHON_VERSION'] in x['fn']][-1]))")"
+ if [[ -z "$PYTORCH_VERSION" ]]; then
+ echo "PyTorch version auto detection failed"
+ echo "No package found for CU_VERSION=$CU_VERSION and PYTHON_VERSION=$PYTHON_VERSION"
+ exit 1
+ fi
+ else
+ export CONDA_CHANNEL_FLAGS="-c pytorch"
+ fi
+ if [[ "$CU_VERSION" == cpu ]]; then
+ export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION${PYTORCH_VERSION_SUFFIX}"
+ export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
+ else
+ export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
+ export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
+ fi
+ export PYTORCH_VERSION_NODOT=${PYTORCH_VERSION//./}
+}
+
+# Translate CUDA_VERSION into CUDA_CUDATOOLKIT_CONSTRAINT
+setup_conda_cudatoolkit_constraint() {
+ export CONDA_CPUONLY_FEATURE=""
+ export CONDA_CUB_CONSTRAINT=""
+ if [[ "$(uname)" == Darwin ]]; then
+ export CONDA_CUDATOOLKIT_CONSTRAINT=""
+ else
+ case "$CU_VERSION" in
+ cu116)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.6,<11.7 # [not osx]"
+ ;;
+ cu115)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.5,<11.6 # [not osx]"
+ ;;
+ cu113)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.3,<11.4 # [not osx]"
+ ;;
+ cu112)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.2,<11.3 # [not osx]"
+ ;;
+ cu111)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.1,<11.2 # [not osx]"
+ ;;
+ cu110)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]"
+ # Even though cudatoolkit 11.0 provides CUB we need our own, to control the
+ # version, because the built-in 1.9.9 in the cudatoolkit causes problems.
+ export CONDA_CUB_CONSTRAINT="- nvidiacub"
+ ;;
+ cu102)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]"
+ export CONDA_CUB_CONSTRAINT="- nvidiacub"
+ ;;
+ cu101)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.1,<10.2 # [not osx]"
+ export CONDA_CUB_CONSTRAINT="- nvidiacub"
+ ;;
+ cu100)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]"
+ export CONDA_CUB_CONSTRAINT="- nvidiacub"
+ ;;
+ cu92)
+ export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]"
+ export CONDA_CUB_CONSTRAINT="- nvidiacub"
+ ;;
+ cpu)
+ export CONDA_CUDATOOLKIT_CONSTRAINT=""
+ export CONDA_CPUONLY_FEATURE="- cpuonly"
+ ;;
+ *)
+ echo "Unrecognized CU_VERSION=$CU_VERSION"
+ exit 1
+ ;;
+ esac
+ fi
+}
+
+# Build the proper compiler package before building the final package
+setup_visual_studio_constraint() {
+ if [[ "$OSTYPE" == "msys" ]]; then
+ export VSTOOLCHAIN_PACKAGE=vs2019
+ export VSDEVCMD_ARGS=''
+ # shellcheck disable=SC2086
+ conda build $CONDA_CHANNEL_FLAGS --no-anaconda-upload packaging/$VSTOOLCHAIN_PACKAGE
+ cp packaging/$VSTOOLCHAIN_PACKAGE/conda_build_config.yaml packaging/pytorch3d/conda_build_config.yaml
+ fi
+}
+
+download_nvidiacub_if_needed() {
+ case "$CU_VERSION" in
+ cu110|cu102|cu101|cu100|cu92)
+ echo "Downloading cub"
+ wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
+ tar xzf 1.10.0.tar.gz
+ CUB_HOME=$(realpath ./cub-1.10.0)
+ export CUB_HOME
+ echo "CUB_HOME is now $CUB_HOME"
+ ;;
+ esac
+ # We don't need CUB for a cpu build or if cuda is 11.1 or higher
+}
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pytorch3d/meta.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pytorch3d/meta.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9f115ff99d5d0b7c855c6ce02972810659d2f0f5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pytorch3d/meta.yaml
@@ -0,0 +1,59 @@
+package:
+ name: pytorch3d
+ version: "{{ environ.get('BUILD_VERSION') }}"
+
+source:
+ path: "{{ environ.get('SOURCE_ROOT_DIR') }}"
+
+requirements:
+ build:
+ - {{ compiler('c') }} # [win]
+ {{ environ.get('CONDA_CUB_CONSTRAINT') }}
+
+ host:
+ - python
+ {{ environ.get('SETUPTOOLS_CONSTRAINT') }}
+ {{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }}
+ {{ environ.get('CONDA_PYTORCH_MKL_CONSTRAINT') }}
+ {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
+ {{ environ.get('CONDA_CPUONLY_FEATURE') }}
+
+ run:
+ - python
+ - numpy >=1.11
+ - torchvision >=0.5
+ - iopath
+ {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
+ {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
+
+build:
+ string: py{{py}}_{{ environ['CU_VERSION'] }}_pyt{{ environ['PYTORCH_VERSION_NODOT']}}
+ script: python setup.py install --single-version-externally-managed --record=record.txt # [not win]
+ script_env:
+ - CUDA_HOME
+ - FORCE_CUDA
+ - NVCC_FLAGS
+ - MAX_JOBS
+ features:
+ {{ environ.get('CONDA_CPUONLY_FEATURE') }}
+
+test:
+ imports:
+ - pytorch3d
+ source_files:
+ - tests
+ - docs
+ requires:
+ - imageio
+ - hydra-core
+ - accelerate
+ commands:
+ #pytest .
+ python -m unittest discover -v -s tests -t .
+
+
+about:
+ home: https://github.com/facebookresearch/pytorch3d
+ license: BSD
+ license_file: LICENSE
+ summary: '3d Geometry for pytorch'
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/activate.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/activate.bat
new file mode 100644
index 0000000000000000000000000000000000000000..55928c1e141f12753275a8c7f1768fdc0432780a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/activate.bat
@@ -0,0 +1,50 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+:: Set env vars that tell distutils to use the compiler that we put on path
+SET DISTUTILS_USE_SDK=1
+SET MSSdk=1
+
+SET "VS_VERSION=15.0"
+SET "VS_MAJOR=15"
+SET "VS_YEAR=2017"
+
+set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out"
+set "MSYS2_ENV_CONV_EXCL=CL"
+
+:: For Python 3.5+, ensure that we link with the dynamic runtime. See
+:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
+set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll"
+
+for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do (
+ if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
+ set "VSINSTALLDIR=%%i\"
+ goto :vswhere
+ )
+)
+
+:vswhere
+
+:: Shorten PATH to avoid the `input line too long` error.
+SET MyPath=%PATH%
+
+setlocal EnableDelayedExpansion
+
+SET TempPath="%MyPath:;=";"%"
+SET var=
+FOR %%a IN (%TempPath%) DO (
+ IF EXIST %%~sa (
+ SET "var=!var!;%%~sa"
+ )
+)
+
+set "TempPath=!var:~1!"
+endlocal & set "PATH=%TempPath%"
+
+:: Shorten current directory too
+FOR %%A IN (.) DO CD "%%~sA"
+
+:: other things added by install_activate.bat at package build time
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/conda_build_config.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/conda_build_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5188bb0ebecf72aefb1c2e779458998216e4d479
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/conda_build_config.yaml
@@ -0,0 +1,24 @@
+blas_impl:
+ - mkl # [x86_64]
+c_compiler:
+ - vs2017 # [win]
+cxx_compiler:
+ - vs2017 # [win]
+python:
+ - 3.5
+ - 3.6
+# This differs from target_platform in that it determines what subdir the compiler
+# will target, not what subdir the compiler package will be itself.
+# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32
+# code on win-64 miniconda.
+cross_compiler_target_platform:
+ - win-64 # [win]
+target_platform:
+ - win-64 # [win]
+vc:
+ - 14
+zip_keys:
+ - # [win]
+ - vc # [win]
+ - c_compiler # [win]
+ - cxx_compiler # [win]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_activate.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_activate.bat
new file mode 100644
index 0000000000000000000000000000000000000000..7d4e4cc31bac3cf0b168706b37c1faed703f6e57
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_activate.bat
@@ -0,0 +1,35 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+set YEAR=2017
+set VER=15
+
+mkdir "%PREFIX%\etc\conda\activate.d"
+COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+
+IF "%cross_compiler_target_platform%" == "win-64" (
+ set "target_platform=amd64"
+ echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ IF "%VSDEVCMD_ARGS%" == "" (
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ ) ELSE (
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ )
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ ) else (
+ set "target_platform=x86"
+ echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_runtime.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_runtime.bat
new file mode 100644
index 0000000000000000000000000000000000000000..9e7806657fb926fabdc501754cf26e1862678f70
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_runtime.bat
@@ -0,0 +1,55 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+set VC_PATH=x86
+if "%ARCH%"=="64" (
+ set VC_PATH=x64
+)
+
+set MSC_VER=2017
+
+rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015
+rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO (
+rem set SP=%%A
+rem )
+
+rem if not "%SP%" == "%PKG_VERSION%" (
+rem echo "Version detected from registry: %SP%"
+rem echo "does not match version of package being built (%PKG_VERSION%)"
+rem echo "Do you have current updates for VS 2015 installed?"
+rem exit 1
+rem )
+
+
+REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below!
+robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E
+robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% GEQ 8 exit 1
+
+REM ========== This one comes from visual studio 2017
+set "VC_VER=141"
+
+for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do (
+ if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
+ set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
+ goto :eof
+ )
+)
+
+@setlocal
+call "%VS15VARSALL%" x64
+
+set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%"
+
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+@endlocal
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/meta.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/meta.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..34f4860ba850120f59f3f499e21e2a4b429e03cc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/meta.yaml
@@ -0,0 +1,45 @@
+{% set vcver="14.1" %}
+{% set vcfeature="14" %}
+{% set vsyear="2017" %}
+{% set fullver="15.4.27004.2010" %}
+
+package:
+ name: vs{{ vsyear }}
+ version: {{ fullver }}
+
+build:
+ skip: True [not win]
+ script_env:
+ - VSDEVCMD_ARGS # [win]
+
+outputs:
+ - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }}
+ script: install_activate.bat
+ track_features:
+ # VS 2017 is binary-compatible with VS 2015/vc14. Tools are "v141".
+ strong:
+ - vc{{ vcfeature }}
+ run_exports:
+ - vc {{ vcver }}
+ about:
+ summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler
+ license: BSD 3-clause
+ - name: vs{{ vsyear }}_runtime
+ script: install_runtime.bat
+ - name: vc
+ version: {{ vcver }}
+ track_features:
+ - vc{{ vcfeature }}
+ requirements:
+ run:
+ - {{ pin_subpackage('vs' ~ vsyear ~ '_runtime') }}
+ about:
+ home: https://github.com/conda/conda/wiki/VC-features
+ license: Modified BSD License (3-clause)
+ license_family: BSD
+ summary: A meta-package to track VC features.
+ description: |
+ This metapackage is used to activate vc features without
+ depending on Python.
+ doc_url: https://github.com/conda/conda/wiki/VC-features
+ dev_url: https://github.com/conda/conda/wiki/VC-features
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/activate.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/activate.bat
new file mode 100644
index 0000000000000000000000000000000000000000..fd4f5706e339bb50442c6c3d63d484bf7628db72
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/activate.bat
@@ -0,0 +1,50 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+:: Set env vars that tell distutils to use the compiler that we put on path
+SET DISTUTILS_USE_SDK=1
+SET MSSdk=1
+
+SET "VS_VERSION=16.0"
+SET "VS_MAJOR=16"
+SET "VS_YEAR=2019"
+
+set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out"
+set "MSYS2_ENV_CONV_EXCL=CL"
+
+:: For Python 3.5+, ensure that we link with the dynamic runtime. See
+:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
+set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll"
+
+for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do (
+ if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
+ set "VSINSTALLDIR=%%i\"
+ goto :vswhere
+ )
+)
+
+:vswhere
+
+:: Shorten PATH to avoid the `input line too long` error.
+SET MyPath=%PATH%
+
+setlocal EnableDelayedExpansion
+
+SET TempPath="%MyPath:;=";"%"
+SET var=
+FOR %%a IN (%TempPath%) DO (
+ IF EXIST %%~sa (
+ SET "var=!var!;%%~sa"
+ )
+)
+
+set "TempPath=!var:~1!"
+endlocal & set "PATH=%TempPath%"
+
+:: Shorten current directory too
+FOR %%A IN (.) DO CD "%%~sA"
+
+:: other things added by install_activate.bat at package build time
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/conda_build_config.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/conda_build_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..358052ec012940bb56778d167bcd69302d255846
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/conda_build_config.yaml
@@ -0,0 +1,24 @@
+blas_impl:
+ - mkl # [x86_64]
+c_compiler:
+ - vs2019 # [win]
+cxx_compiler:
+ - vs2019 # [win]
+python:
+ - 3.5
+ - 3.6
+# This differs from target_platform in that it determines what subdir the compiler
+# will target, not what subdir the compiler package will be itself.
+# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32
+# code on win-64 miniconda.
+cross_compiler_target_platform:
+ - win-64 # [win]
+target_platform:
+ - win-64 # [win]
+vc:
+ - 14
+zip_keys:
+ - # [win]
+ - vc # [win]
+ - c_compiler # [win]
+ - cxx_compiler # [win]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_activate.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_activate.bat
new file mode 100644
index 0000000000000000000000000000000000000000..ee7ccdc679777691de2ac9c883e4a01118a29ec3
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_activate.bat
@@ -0,0 +1,35 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+set YEAR=2019
+set VER=16
+
+mkdir "%PREFIX%\etc\conda\activate.d"
+COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+
+IF "%cross_compiler_target_platform%" == "win-64" (
+ set "target_platform=amd64"
+ echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ IF "%VSDEVCMD_ARGS%" == "" (
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ ) ELSE (
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ )
+ echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ ) else (
+ set "target_platform=x86"
+ echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
+ echo popd
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_runtime.bat b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_runtime.bat
new file mode 100644
index 0000000000000000000000000000000000000000..1c842cfe350db46f09c9ac79dcd3bc22965d3315
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_runtime.bat
@@ -0,0 +1,55 @@
+@REM Copyright (c) Meta Platforms, Inc. and affiliates.
+@REM All rights reserved.
+@REM
+@REM This source code is licensed under the BSD-style license found in the
+@REM LICENSE file in the root directory of this source tree.
+
+set VC_PATH=x86
+if "%ARCH%"=="64" (
+ set VC_PATH=x64
+)
+
+set MSC_VER=2019
+
+rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015
+rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO (
+rem set SP=%%A
+rem )
+
+rem if not "%SP%" == "%PKG_VERSION%" (
+rem echo "Version detected from registry: %SP%"
+rem echo "does not match version of package being built (%PKG_VERSION%)"
+rem echo "Do you have current updates for VS 2015 installed?"
+rem exit 1
+rem )
+
+
+REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below!
+robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E
+robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% GEQ 8 exit 1
+
+REM ========== This one comes from visual studio 2019
+set "VC_VER=142"
+
+for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do (
+ if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
+ set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
+ goto :eof
+ )
+)
+
+@setlocal
+call "%VS15VARSALL%" x64
+
+set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%"
+
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E
+if %ERRORLEVEL% LSS 8 exit 0
+@endlocal
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/meta.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/meta.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3f8b4714818e1fe5754a30ceb2070ff000991fd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/meta.yaml
@@ -0,0 +1,45 @@
+{% set vcver="14.2" %}
+{% set vcfeature="14" %}
+{% set vsyear="2019" %}
+{% set fullver="15.4.27004.2010" %}
+
+package:
+ name: vs{{ vsyear }}
+ version: {{ fullver }}
+
+build:
+ skip: True [not win]
+ script_env:
+ - VSDEVCMD_ARGS # [win]
+
+outputs:
+ - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }}
+ script: install_activate.bat
+ track_features:
+ # VS 2019 is binary-compatible with VS 2017/vc 14.1 and 2015/vc14. Tools are "v142".
+ strong:
+ - vc{{ vcfeature }}
+ run_exports:
+ - vc {{ vcver }}
+ about:
+ summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler
+ license: BSD 3-clause
+ - name: vs{{ vsyear }}_runtime
+ script: install_runtime.bat
+ - name: vc
+ version: {{ vcver }}
+ track_features:
+ - vc{{ vcfeature }}
+ requirements:
+ run:
+ - {{ pin_subpackage('vs' ~ vsyear ~ '_runtime') }}
+ about:
+ home: https://github.com/conda/conda/wiki/VC-features
+ license: Modified BSD License (3-clause)
+ license_family: BSD
+ summary: A meta-package to track VC features.
+ description: |
+ This metapackage is used to activate vc features without
+ depending on Python.
+ doc_url: https://github.com/conda/conda/wiki/VC-features
+ dev_url: https://github.com/conda/conda/wiki/VC-features
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e41cd717f6a439a9c08d76a9d0e4a54e190fc5a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b2154c8bfa130d90073f70b7d54ac540a9e557ef
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml
@@ -0,0 +1,18 @@
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ image_feature_extractor_class_type: ResNetFeatureExtractor
+ image_feature_extractor_ResNetFeatureExtractor_args:
+ add_images: true
+ add_masks: true
+ first_max_pool: true
+ image_rescale: 0.375
+ l2_norm: true
+ name: resnet34
+ normalize_image: true
+ pretrained: true
+ stages:
+ - 1
+ - 2
+ - 3
+ - 4
+ proj_dim: 32
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8d24495bbb15ad8d8770dadf5147ec49d2706b08
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml
@@ -0,0 +1,18 @@
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ image_feature_extractor_class_type: ResNetFeatureExtractor
+ image_feature_extractor_ResNetFeatureExtractor_args:
+ add_images: true
+ add_masks: true
+ first_max_pool: false
+ image_rescale: 0.375
+ l2_norm: true
+ name: resnet34
+ normalize_image: true
+ pretrained: true
+ stages:
+ - 1
+ - 2
+ - 3
+ - 4
+ proj_dim: 16
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2d4eb3f861089e96bf63b9b0bced5bed7943f134
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml
@@ -0,0 +1,19 @@
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ image_feature_extractor_class_type: ResNetFeatureExtractor
+ image_feature_extractor_ResNetFeatureExtractor_args:
+ stages:
+ - 1
+ - 2
+ - 3
+ first_max_pool: false
+ proj_dim: -1
+ l2_norm: false
+ image_rescale: 0.375
+ name: resnet34
+ normalize_image: true
+ pretrained: true
+ view_pooler_args:
+ feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
+ reduction_functions:
+ - AVG
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9eb9bd9030a5fbc0b48006416137762d89ac2757
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml
@@ -0,0 +1,8 @@
+data_source_ImplicitronDataSource_args:
+ dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2
+ dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
+ category: teddybear
+ subset_name: fewview_dev
+training_loop_ImplicitronTrainingLoop_args:
+ evaluator_ImplicitronEvaluator_args:
+ is_multisequence: true
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6bb1fe40ca47fb9456b74932e380b43a97e8d43
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml
@@ -0,0 +1,65 @@
+defaults:
+- repro_multiseq_base.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ loss_weights:
+ loss_mask_bce: 100.0
+ loss_kl: 0.0
+ loss_rgb_mse: 1.0
+ loss_eikonal: 0.1
+ chunk_size_grid: 65536
+ num_passes: 1
+ output_rasterized_mc: true
+ sampling_mode_training: mask_sample
+ global_encoder_class_type: SequenceAutodecoder
+ global_encoder_SequenceAutodecoder_args:
+ autodecoder_args:
+ n_instances: 20000
+ init_scale: 1.0
+ encoding_dim: 256
+ implicit_function_IdrFeatureField_args:
+ n_harmonic_functions_xyz: 6
+ bias: 0.6
+ d_in: 3
+ d_out: 1
+ dims:
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ geometric_init: true
+ pooled_feature_dim: 0
+ skip_in:
+ - 6
+ weight_norm: true
+ renderer_SignedDistanceFunctionRenderer_args:
+ ray_tracer_args:
+ line_search_step: 0.5
+ line_step_iters: 3
+ n_secant_steps: 8
+ n_steps: 100
+ sdf_threshold: 5.0e-05
+ ray_normal_coloring_network_args:
+ d_in: 9
+ d_out: 3
+ dims:
+ - 512
+ - 512
+ - 512
+ - 512
+ mode: idr
+ n_harmonic_functions_dir: 4
+ pooled_feature_dim: 0
+ weight_norm: true
+ raysampler_AdaptiveRaySampler_args:
+ n_rays_per_image_sampled_from_mask: 1024
+ n_pts_per_ray_training: 0
+ n_pts_per_ray_evaluation: 0
+ scene_extent: 8.0
+ renderer_class_type: SignedDistanceFunctionRenderer
+ implicit_function_class_type: IdrFeatureField
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fa366d46ac4a2f09a437cf2632e5735aee34d5fa
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml
@@ -0,0 +1,12 @@
+defaults:
+- repro_multiseq_base.yaml
+- repro_feat_extractor_unnormed.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ chunk_size_grid: 16000
+ view_pooler_enabled: true
+ raysampler_AdaptiveRaySampler_args:
+ n_rays_per_image_sampled_from_mask: 850
+training_loop_ImplicitronTrainingLoop_args:
+ clip_grad: 1.0
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b4a2ef2d17d5a7a2d868b1603c996e2fb3ad7b2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml
@@ -0,0 +1,35 @@
+defaults:
+- repro_multiseq_base.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ chunk_size_grid: 16000
+ view_pooler_enabled: false
+ n_train_target_views: -1
+ num_passes: 1
+ loss_weights:
+ loss_rgb_mse: 200.0
+ loss_prev_stage_rgb_mse: 0.0
+ loss_mask_bce: 1.0
+ loss_prev_stage_mask_bce: 0.0
+ loss_autodecoder_norm: 0.001
+ depth_neg_penalty: 10000.0
+ global_encoder_class_type: SequenceAutodecoder
+ global_encoder_SequenceAutodecoder_args:
+ autodecoder_args:
+ encoding_dim: 256
+ n_instances: 20000
+ raysampler_class_type: NearFarRaySampler
+ raysampler_NearFarRaySampler_args:
+ n_rays_per_image_sampled_from_mask: 2048
+ min_depth: 0.05
+ max_depth: 0.05
+ n_pts_per_ray_training: 1
+ n_pts_per_ray_evaluation: 1
+ stratified_point_sampling_training: false
+ stratified_point_sampling_evaluation: false
+ renderer_class_type: LSTMRenderer
+ implicit_function_class_type: SRNHyperNetImplicitFunction
+optimizer_factory_ImplicitronOptimizerFactory_args:
+ breed: Adam
+ lr: 5.0e-05
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2ea11e367e6b169895546286c80c939724a4754
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml
@@ -0,0 +1,11 @@
+defaults:
+- repro_multiseq_srn_wce.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ num_passes: 1
+ implicit_function_SRNImplicitFunction_args:
+ pixel_generator_args:
+ n_harmonic_functions: 0
+ raymarch_function_args:
+ n_harmonic_functions: 0
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..572fc7d5e71323f61c9b099c56b7f7aeb900b614
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml
@@ -0,0 +1,41 @@
+defaults:
+- repro_base
+- _self_
+data_source_ImplicitronDataSource_args:
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
+ batch_size: 1
+ dataset_length_train: 1000
+ dataset_length_val: 1
+ num_workers: 8
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
+ assert_single_seq: true
+ n_frames_per_sequence: -1
+ test_restrict_sequence_id: 0
+ test_on_train: false
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ render_image_height: 800
+ render_image_width: 800
+ log_vars:
+ - loss_rgb_psnr_fg
+ - loss_rgb_psnr
+ - loss_eikonal
+ - loss_prev_stage_rgb_psnr
+ - loss_mask_bce
+ - loss_prev_stage_mask_bce
+ - loss_rgb_mse
+ - loss_prev_stage_rgb_mse
+ - loss_depth_abs
+ - loss_depth_abs_fg
+ - loss_kl
+ - loss_mask_neg_iou
+ - objective
+ - epoch
+ - sec/it
+optimizer_factory_ImplicitronOptimizerFactory_args:
+ lr: 0.0005
+ multistep_lr_milestones:
+ - 200
+ - 300
+training_loop_ImplicitronTrainingLoop_args:
+ max_epochs: 400
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a92a92c1f20ea48a2b655211655dafa4e894c23
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml
@@ -0,0 +1,55 @@
+defaults:
+- repro_singleseq_base
+- _self_
+exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
+data_source_ImplicitronDataSource_args:
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
+ dataset_length_train: 100
+ dataset_map_provider_class_type: BlenderDatasetMapProvider
+ dataset_map_provider_BlenderDatasetMapProvider_args:
+ base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
+ n_known_frames_for_test: null
+ object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
+ path_manager_factory_class_type: PathManagerFactory
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ mask_images: false
+ raysampler_class_type: AdaptiveRaySampler
+ raysampler_AdaptiveRaySampler_args:
+ n_pts_per_ray_training: 64
+ n_pts_per_ray_evaluation: 64
+ n_rays_per_image_sampled_from_mask: 4096
+ stratified_point_sampling_training: true
+ stratified_point_sampling_evaluation: false
+ scene_extent: 2.0
+ scene_center:
+ - 0.0
+ - 0.0
+ - 0.0
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
+ density_noise_std_train: 0.0
+ n_pts_per_ray_fine_training: 128
+ n_pts_per_ray_fine_evaluation: 128
+ raymarcher_EmissionAbsorptionRaymarcher_args:
+ blend_output: false
+ loss_weights:
+ loss_rgb_mse: 1.0
+ loss_prev_stage_rgb_mse: 1.0
+ loss_mask_bce: 0.0
+ loss_prev_stage_mask_bce: 0.0
+ loss_autodecoder_norm: 0.00
+
+optimizer_factory_ImplicitronOptimizerFactory_args:
+ exponential_lr_step_size: 3001
+ lr_policy: LinearExponential
+ linear_exponential_lr_milestone: 200
+
+training_loop_ImplicitronTrainingLoop_args:
+ max_epochs: 6000
+ metric_print_interval: 10
+ store_checkpoints_purge: 3
+ test_when_finished: true
+ validation_interval: 100
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..38212e35707e2c26b93d3aa593e76579c483ca91
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml
@@ -0,0 +1,10 @@
+defaults:
+- repro_singleseq_wce_base.yaml
+- repro_feat_extractor_unnormed.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ chunk_size_grid: 16000
+ view_pooler_enabled: true
+ raysampler_AdaptiveRaySampler_args:
+ n_rays_per_image_sampled_from_mask: 850
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8983c26f34309fe35d41d43a87f53ddd564db3a5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml
@@ -0,0 +1,18 @@
+defaults:
+- repro_singleseq_wce_base.yaml
+- repro_feat_extractor_transformer.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ chunk_size_grid: 16000
+ view_pooler_enabled: true
+ implicit_function_class_type: NeRFormerImplicitFunction
+ raysampler_AdaptiveRaySampler_args:
+ n_rays_per_image_sampled_from_mask: 800
+ n_pts_per_ray_training: 32
+ n_pts_per_ray_evaluation: 32
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
+ n_pts_per_ray_fine_training: 16
+ n_pts_per_ray_fine_evaluation: 16
+ view_pooler_args:
+ feature_aggregator_class_type: IdentityFeatureAggregator
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28b7570c8c9f49f3ecc5a45056c1467b3b3b2130
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml
@@ -0,0 +1,11 @@
+defaults:
+- repro_singleseq_srn.yaml
+- _self_
+model_factory_ImplicitronModelFactory_args:
+ model_GenericModel_args:
+ num_passes: 1
+ implicit_function_SRNImplicitFunction_args:
+ pixel_generator_args:
+ n_harmonic_functions: 0
+ raymarch_function_args:
+ n_harmonic_functions: 0
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4b73e40797d30f70420e213588fa46f110895cde
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml
@@ -0,0 +1,4 @@
+defaults:
+- repro_singleseq_idr.yaml
+- repro_singleseq_co3dv2_base.yaml
+- _self_
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..510589a0c048f1f915da6b0e4c57dfbc3f8f29b5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml
@@ -0,0 +1,4 @@
+defaults:
+- repro_singleseq_nerformer.yaml
+- repro_singleseq_co3dv2_base.yaml
+- _self_
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8964a4a21e41286e9587cc2209a786b54482ab44
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml
@@ -0,0 +1,4 @@
+defaults:
+- repro_singleseq_srn_noharm.yaml
+- repro_singleseq_co3dv2_base.yaml
+- _self_
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f5b174c04a9b48646151509bdd22db24bc495702
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml
@@ -0,0 +1,22 @@
+defaults:
+- repro_singleseq_base
+- _self_
+data_source_ImplicitronDataSource_args:
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
+ batch_size: 10
+ dataset_length_train: 1000
+ dataset_length_val: 1
+ num_workers: 8
+ train_conditioning_type: SAME
+ val_conditioning_type: SAME
+ test_conditioning_type: SAME
+ images_per_seq_options:
+ - 2
+ - 3
+ - 4
+ - 5
+ - 6
+ - 7
+ - 8
+ - 9
+ - 10
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c2f532ea35ebc74f272bfafef57e6b009eba299
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from .shapenet_core import ShapeNetCore
+
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..70524b15dc6f460762334ef0c9b7d1aba7af4e01
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py
@@ -0,0 +1,160 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import json
+import os
+import warnings
+from os import path
+from pathlib import Path
+from typing import Dict
+
+from pytorch3d.datasets.shapenet_base import ShapeNetBase
+
+
+SYNSET_DICT_DIR = Path(__file__).resolve().parent
+
+
+class ShapeNetCore(ShapeNetBase): # pragma: no cover
+ """
+ This class loads ShapeNetCore from a given directory into a Dataset object.
+ ShapeNetCore is a subset of the ShapeNet dataset and can be downloaded from
+ https://www.shapenet.org/.
+ """
+
+ def __init__(
+ self,
+ data_dir,
+ synsets=None,
+ version: int = 1,
+ load_textures: bool = True,
+ texture_resolution: int = 4,
+ ) -> None:
+ """
+ Store each object's synset id and models id from data_dir.
+
+ Args:
+ data_dir: Path to ShapeNetCore data.
+ synsets: List of synset categories to load from ShapeNetCore in the form of
+ synset offsets or labels. A combination of both is also accepted.
+ When no category is specified, all categories in data_dir are loaded.
+ version: (int) version of ShapeNetCore data in data_dir, 1 or 2.
+ Default is set to be 1. Version 1 has 57 categories and version 2 has 55
+ categories.
+ Note: version 1 has two categories 02858304(boat) and 02992529(cellphone)
+ that are hyponyms of categories 04530566(watercraft) and 04401088(telephone)
+ respectively. You can combine the categories manually if needed.
+ Version 2 doesn't have 02858304(boat) or 02834778(bicycle) compared to
+ version 1.
+ load_textures: Boolean indicating whether textures should loaded for the model.
+ Textures will be of type TexturesAtlas i.e. a texture map per face.
+ texture_resolution: Int specifying the resolution of the texture map per face
+ created using the textures in the obj file. A
+ (texture_resolution, texture_resolution, 3) map is created per face.
+ """
+ super().__init__()
+ self.shapenet_dir = data_dir
+ self.load_textures = load_textures
+ self.texture_resolution = texture_resolution
+
+ if version not in [1, 2]:
+ raise ValueError("Version number must be either 1 or 2.")
+ self.model_dir = "model.obj" if version == 1 else "models/model_normalized.obj"
+
+ # Synset dictionary mapping synset offsets to corresponding labels.
+ dict_file = "shapenet_synset_dict_v%d.json" % version
+ with open(path.join(SYNSET_DICT_DIR, dict_file), "r") as read_dict:
+ self.synset_dict = json.load(read_dict)
+ # Inverse dictionary mapping synset labels to corresponding offsets.
+ self.synset_inv = {label: offset for offset, label in self.synset_dict.items()}
+
+ # If categories are specified, check if each category is in the form of either
+ # synset offset or synset label, and if the category exists in the given directory.
+ if synsets is not None:
+ # Set of categories to load in the form of synset offsets.
+ synset_set = set()
+ for synset in synsets:
+ if (synset in self.synset_dict.keys()) and (
+ path.isdir(path.join(data_dir, synset))
+ ):
+ synset_set.add(synset)
+ elif (synset in self.synset_inv.keys()) and (
+ (path.isdir(path.join(data_dir, self.synset_inv[synset])))
+ ):
+ synset_set.add(self.synset_inv[synset])
+ else:
+ msg = (
+ "Synset category %s either not part of ShapeNetCore dataset "
+ "or cannot be found in %s."
+ ) % (synset, data_dir)
+ warnings.warn(msg)
+ # If no category is given, load every category in the given directory.
+ # Ignore synset folders not included in the official mapping.
+ else:
+ synset_set = {
+ synset
+ for synset in os.listdir(data_dir)
+ if path.isdir(path.join(data_dir, synset))
+ and synset in self.synset_dict
+ }
+
+ # Check if there are any categories in the official mapping that are not loaded.
+ # Update self.synset_inv so that it only includes the loaded categories.
+ synset_not_present = set(self.synset_dict.keys()).difference(synset_set)
+ [self.synset_inv.pop(self.synset_dict[synset]) for synset in synset_not_present]
+
+ if len(synset_not_present) > 0:
+ msg = (
+ "The following categories are included in ShapeNetCore ver.%d's "
+ "official mapping but not found in the dataset location %s: %s"
+ ""
+ ) % (version, data_dir, ", ".join(synset_not_present))
+ warnings.warn(msg)
+
+ # Extract model_id of each object from directory names.
+ # Each grandchildren directory of data_dir contains an object, and the name
+ # of the directory is the object's model_id.
+ for synset in synset_set:
+ self.synset_start_idxs[synset] = len(self.synset_ids)
+ for model in os.listdir(path.join(data_dir, synset)):
+ if not path.exists(path.join(data_dir, synset, model, self.model_dir)):
+ msg = (
+ "Object file not found in the model directory %s "
+ "under synset directory %s."
+ ) % (model, synset)
+ warnings.warn(msg)
+ continue
+ self.synset_ids.append(synset)
+ self.model_ids.append(model)
+ model_count = len(self.synset_ids) - self.synset_start_idxs[synset]
+ self.synset_num_models[synset] = model_count
+
+ def __getitem__(self, idx: int) -> Dict:
+ """
+ Read a model by the given index.
+
+ Args:
+ idx: The idx of the model to be retrieved in the dataset.
+
+ Returns:
+ dictionary with following keys:
+ - verts: FloatTensor of shape (V, 3).
+ - faces: LongTensor of shape (F, 3) which indexes into the verts tensor.
+ - synset_id (str): synset id
+ - model_id (str): model id
+ - label (str): synset label.
+ """
+ model = self._get_item_ids(idx)
+ model_path = path.join(
+ self.shapenet_dir, model["synset_id"], model["model_id"], self.model_dir
+ )
+ verts, faces, textures = self._load_mesh(model_path)
+ model["verts"] = verts
+ model["faces"] = faces
+ model["textures"] = textures
+ model["label"] = self.synset_dict[model["synset_id"]]
+ return model
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json
new file mode 100644
index 0000000000000000000000000000000000000000..b2fc62ae62107a81e078ec02432fb554ae8f1b41
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json
@@ -0,0 +1,59 @@
+{
+ "04379243": "table",
+ "02958343": "car",
+ "03001627": "chair",
+ "02691156": "airplane",
+ "04256520": "sofa",
+ "04090263": "rifle",
+ "03636649": "lamp",
+ "04530566": "watercraft",
+ "02828884": "bench",
+ "03691459": "loudspeaker",
+ "02933112": "cabinet",
+ "03211117": "display",
+ "04401088": "telephone",
+ "02924116": "bus",
+ "02808440": "bathtub",
+ "03467517": "guitar",
+ "03325088": "faucet",
+ "03046257": "clock",
+ "03991062": "flowerpot",
+ "03593526": "jar",
+ "02876657": "bottle",
+ "02871439": "bookshelf",
+ "03642806": "laptop",
+ "03624134": "knife",
+ "04468005": "train",
+ "02747177": "trash bin",
+ "03790512": "motorbike",
+ "03948459": "pistol",
+ "03337140": "file cabinet",
+ "02818832": "bed",
+ "03928116": "piano",
+ "04330267": "stove",
+ "03797390": "mug",
+ "02880940": "bowl",
+ "04554684": "washer",
+ "04004475": "printer",
+ "03513137": "helmet",
+ "03761084": "microwaves",
+ "04225987": "skateboard",
+ "04460130": "tower",
+ "02942699": "camera",
+ "02801938": "basket",
+ "02946921": "can",
+ "03938244": "pillow",
+ "03710193": "mailbox",
+ "03207941": "dishwasher",
+ "04099429": "rocket",
+ "02773838": "bag",
+ "02843684": "birdhouse",
+ "03261776": "earphone",
+ "03759954": "microphone",
+ "04074963": "remote",
+ "03085013": "keyboard",
+ "02834778": "bicycle",
+ "02954340": "cap",
+ "02858304": "boat",
+ "02992529": "mobile phone"
+}
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json
new file mode 100644
index 0000000000000000000000000000000000000000..f0107c93c3535e2454070be1dcb622ac66899c90
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json
@@ -0,0 +1,57 @@
+{
+ "02691156": "airplane",
+ "02747177": "trash bin",
+ "02773838": "bag",
+ "02801938": "basket",
+ "02808440": "bathtub",
+ "02818832": "bed",
+ "02828884": "bench",
+ "02843684": "birdhouse",
+ "02871439": "bookshelf",
+ "02876657": "bottle",
+ "02880940": "bowl",
+ "02924116": "bus",
+ "02933112": "cabinet",
+ "02942699": "camera",
+ "02946921": "can",
+ "02954340": "cap",
+ "02958343": "car",
+ "02992529": "cellphone",
+ "03001627": "chair",
+ "03046257": "clock",
+ "03085013": "keyboard",
+ "03207941": "dishwasher",
+ "03211117": "display",
+ "03261776": "earphone",
+ "03325088": "faucet",
+ "03337140": "file cabinet",
+ "03467517": "guitar",
+ "03513137": "helmet",
+ "03593526": "jar",
+ "03624134": "knife",
+ "03636649": "lamp",
+ "03642806": "laptop",
+ "03691459": "loudspeaker",
+ "03710193": "mailbox",
+ "03759954": "microphone",
+ "03761084": "microwaves",
+ "03790512": "motorbike",
+ "03797390": "mug",
+ "03928116": "piano",
+ "03938244": "pillow",
+ "03948459": "pistol",
+ "03991062": "flowerpot",
+ "04004475": "printer",
+ "04074963": "remote",
+ "04090263": "rifle",
+ "04099429": "rocket",
+ "04225987": "skateboard",
+ "04256520": "sofa",
+ "04330267": "stove",
+ "04379243": "table",
+ "04401088": "telephone",
+ "04460130": "tower",
+ "04468005": "train",
+ "04530566": "watercraft",
+ "04554684": "washer"
+}
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/blender_dataset_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/blender_dataset_map_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..3db89a817295a1ab8691e2d5641d3f3dbf67ecfd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/blender_dataset_map_provider.py
@@ -0,0 +1,55 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import torch
+from pytorch3d.implicitron.tools.config import registry
+
+from .load_blender import load_blender_data
+from .single_sequence_dataset import (
+ _interpret_blender_cameras,
+ SingleSceneDatasetMapProviderBase,
+)
+
+
+@registry.register
+class BlenderDatasetMapProvider(SingleSceneDatasetMapProviderBase):
+ """
+ Provides data for one scene from Blender synthetic dataset.
+ Uses the code in load_blender.py
+
+ Members:
+ base_dir: directory holding the data for the scene.
+ object_name: The name of the scene (e.g. "lego"). This is just used as a label.
+ It will typically be equal to the name of the directory self.base_dir.
+ path_manager_factory: Creates path manager which may be used for
+ interpreting paths.
+ n_known_frames_for_test: If set, training frames are included in the val
+ and test datasets, and this many random training frames are added to
+ each test batch. If not set, test batches each contain just a single
+ testing frame.
+ """
+
+ def _load_data(self) -> None:
+ path_manager = self.path_manager_factory.get()
+ images, poses, _, hwf, i_split = load_blender_data(
+ self.base_dir,
+ testskip=1,
+ path_manager=path_manager,
+ )
+ H, W, focal = hwf
+ images_masks = torch.from_numpy(images).permute(0, 3, 1, 2)
+
+ # pyre-ignore[16]
+ self.poses = _interpret_blender_cameras(poses, focal)
+ # pyre-ignore[16]
+ self.images = images_masks[:, :3]
+ # pyre-ignore[16]
+ self.fg_probabilities = images_masks[:, 3:4]
+ # pyre-ignore[16]
+ self.i_split = i_split
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_base.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b0827882c82b1a0e838e5ed77e9e3d8559ac2bf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_base.py
@@ -0,0 +1,147 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import (
+ ClassVar,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+)
+
+import torch
+
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.utils import GenericWorkaround
+
+
+@dataclass(eq=False)
+class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
+ """
+ Base class to describe a dataset to be used with Implicitron.
+
+ The dataset is made up of frames, and the frames are grouped into sequences.
+ Each sequence has a name (a string).
+ (A sequence could be a video, or a set of images of one scene.)
+
+ This means they have a __getitem__ which returns an instance of a FrameData,
+ which will describe one frame in one sequence.
+ """
+
+ # _seq_to_idx is a member which implementations can define.
+ # It maps sequence name to the sequence's global frame indices.
+ # It is used for the default implementations of some functions in this class.
+ # Implementations which override them are free to ignore it.
+ # _seq_to_idx: Dict[str, List[int]] = field(init=False)
+
+ def __len__(self) -> int:
+ raise NotImplementedError()
+
+ def get_frame_numbers_and_timestamps(
+ self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None
+ ) -> List[Tuple[int, float]]:
+ """
+ If the sequences in the dataset are videos rather than
+ unordered views, then the dataset should override this method to
+ return the index and timestamp in their videos of the frames whose
+ indices are given in `idxs`. In addition,
+ the values in _seq_to_idx should be in ascending order.
+ If timestamps are absent, they should be replaced with a constant.
+
+ This is used for letting SceneBatchSampler identify consecutive
+ frames.
+
+ Args:
+ idxs: frame index in self
+ subset_filter: If given, an index in idxs is ignored if the
+ corresponding frame is not in any of the named subsets.
+
+ Returns:
+ tuple of
+ - frame index in video
+ - timestamp of frame in video
+ """
+ raise ValueError("This dataset does not contain videos.")
+
+ def join(self, other_datasets: Iterable["DatasetBase"]) -> None:
+ """
+ Joins the current dataset with a list of other datasets of the same type.
+ """
+ raise NotImplementedError()
+
+ def get_eval_batches(self) -> Optional[List[List[int]]]:
+ return None
+
+ def sequence_names(self) -> Iterable[str]:
+ """Returns an iterator over sequence names in the dataset."""
+ # pyre-ignore[16]
+ return self._seq_to_idx.keys()
+
+ def category_to_sequence_names(self) -> Dict[str, List[str]]:
+ """
+ Returns a dict mapping from each dataset category to a list of its
+ sequence names.
+
+ Returns:
+ category_to_sequence_names: Dict {category_i: [..., sequence_name_j, ...]}
+ """
+ c2seq = defaultdict(list)
+ for sequence_name in self.sequence_names():
+ first_frame_idx = next(self.sequence_indices_in_order(sequence_name))
+ # crashes without overriding __getitem__
+ sequence_category = self[first_frame_idx].sequence_category
+ c2seq[sequence_category].append(sequence_name)
+ return dict(c2seq)
+
+ def sequence_frames_in_order(
+ self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
+ ) -> Iterator[Tuple[float, int, int]]:
+ """Returns an iterator over the frame indices in a given sequence.
+ We attempt to first sort by timestamp (if they are available),
+ then by frame number.
+
+ Args:
+ seq_name: the name of the sequence.
+
+ Returns:
+ an iterator over triplets `(timestamp, frame_no, dataset_idx)`,
+ where `frame_no` is the index within the sequence, and
+ `dataset_idx` is the index within the dataset.
+ `None` timestamps are replaced with 0s.
+ """
+ # pyre-ignore[16]
+ seq_frame_indices = self._seq_to_idx[seq_name]
+ nos_timestamps = self.get_frame_numbers_and_timestamps(
+ seq_frame_indices, subset_filter
+ )
+
+ yield from sorted(
+ [
+ (timestamp, frame_no, idx)
+ for idx, (frame_no, timestamp) in zip(seq_frame_indices, nos_timestamps)
+ ]
+ )
+
+ def sequence_indices_in_order(
+ self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
+ ) -> Iterator[int]:
+ """Same as `sequence_frames_in_order` but returns the iterator over
+ only dataset indices.
+ """
+ for _, _, idx in self.sequence_frames_in_order(seq_name, subset_filter):
+ yield idx
+
+ # frame_data_type is the actual type of frames returned by the dataset.
+ # Collation uses its classmethod `collate`
+ frame_data_type: ClassVar[Type[FrameData]] = FrameData
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/frame_data.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/frame_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..631bea8a945d0f27bf482ae32ed93e99a1fa41bb
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/frame_data.py
@@ -0,0 +1,780 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import os
+from abc import ABC, abstractmethod
+from collections import defaultdict
+from dataclasses import dataclass, field, fields
+from typing import (
+ Any,
+ ClassVar,
+ Generic,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+import numpy as np
+import torch
+
+from pytorch3d.implicitron.dataset import types
+from pytorch3d.implicitron.dataset.utils import (
+ adjust_camera_to_bbox_crop_,
+ adjust_camera_to_image_scale_,
+ bbox_xyxy_to_xywh,
+ clamp_box_to_image_bounds_and_round,
+ crop_around_box,
+ GenericWorkaround,
+ get_bbox_from_mask,
+ get_clamp_bbox,
+ load_depth,
+ load_depth_mask,
+ load_image,
+ load_mask,
+ load_pointcloud,
+ rescale_bbox,
+ resize_image,
+ safe_as_tensor,
+)
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.renderer.camera_utils import join_cameras_as_batch
+from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
+from pytorch3d.structures.pointclouds import join_pointclouds_as_batch, Pointclouds
+
+
+@dataclass
+class FrameData(Mapping[str, Any]):
+ """
+ A type of the elements returned by indexing the dataset object.
+ It can represent both individual frames and batches of thereof;
+ in this documentation, the sizes of tensors refer to single frames;
+ add the first batch dimension for the collation result.
+
+ Args:
+ frame_number: The number of the frame within its sequence.
+ 0-based continuous integers.
+ sequence_name: The unique name of the frame's sequence.
+ sequence_category: The object category of the sequence.
+ frame_timestamp: The time elapsed since the start of a sequence in sec.
+ image_size_hw: The size of the original image in pixels; (height, width)
+ tensor of shape (2,). Note that it is optional, e.g. it can be `None`
+ if the frame annotation has no size ans image_rgb has not [yet] been
+ loaded. Image-less FrameData is valid but mutators like crop/resize
+ may fail if the original image size cannot be deduced.
+ effective_image_size_hw: The size of the image after mutations such as
+ crop/resize in pixels; (height, width). if the image has not been mutated,
+ it is equal to `image_size_hw`. Note that it is also optional, for the
+ same reason as `image_size_hw`.
+ image_path: The qualified path to the loaded image (with dataset_root).
+ image_rgb: A Tensor of shape `(3, H, W)` holding the RGB image
+ of the frame; elements are floats in [0, 1].
+ mask_crop: A binary mask of shape `(1, H, W)` denoting the valid image
+ regions. Regions can be invalid (mask_crop[i,j]=0) in case they
+ are a result of zero-padding of the image after cropping around
+ the object bounding box; elements are floats in {0.0, 1.0}.
+ depth_path: The qualified path to the frame's depth map.
+ depth_map: A float Tensor of shape `(1, H, W)` holding the depth map
+ of the frame; values correspond to distances from the camera;
+ use `depth_mask` and `mask_crop` to filter for valid pixels.
+ depth_mask: A binary mask of shape `(1, H, W)` denoting pixels of the
+ depth map that are valid for evaluation, they have been checked for
+ consistency across views; elements are floats in {0.0, 1.0}.
+ mask_path: A qualified path to the foreground probability mask.
+ fg_probability: A Tensor of `(1, H, W)` denoting the probability of the
+ pixels belonging to the captured object; elements are floats
+ in [0, 1].
+ bbox_xywh: The bounding box tightly enclosing the foreground object in the
+ format (x0, y0, width, height). The convention assumes that
+ `x0+width` and `y0+height` includes the boundary of the box.
+ I.e., to slice out the corresponding crop from an image tensor `I`
+ we execute `crop = I[..., y0:y0+height, x0:x0+width]`
+ crop_bbox_xywh: The bounding box denoting the boundaries of `image_rgb`
+ in the original image coordinates in the format (x0, y0, width, height).
+ The convention is the same as for `bbox_xywh`. `crop_bbox_xywh` differs
+ from `bbox_xywh` due to padding (which can happen e.g. due to
+ setting `JsonIndexDataset.box_crop_context > 0`)
+ camera: A PyTorch3D camera object corresponding the frame's viewpoint,
+ corrected for cropping if it happened.
+ camera_quality_score: The score proportional to the confidence of the
+ frame's camera estimation (the higher the more accurate).
+ point_cloud_quality_score: The score proportional to the accuracy of the
+ frame's sequence point cloud (the higher the more accurate).
+ sequence_point_cloud_path: The path to the sequence's point cloud.
+ sequence_point_cloud: A PyTorch3D Pointclouds object holding the
+ point cloud corresponding to the frame's sequence. When the object
+ represents a batch of frames, point clouds may be deduplicated;
+ see `sequence_point_cloud_idx`.
+ sequence_point_cloud_idx: Integer indices mapping frame indices to the
+ corresponding point clouds in `sequence_point_cloud`; to get the
+ corresponding point cloud to `image_rgb[i]`, use
+ `sequence_point_cloud[sequence_point_cloud_idx[i]]`.
+ frame_type: The type of the loaded frame specified in
+ `subset_lists_file`, if provided.
+ meta: A dict for storing additional frame information.
+ """
+
+ frame_number: Optional[torch.LongTensor]
+ sequence_name: Union[str, List[str]]
+ sequence_category: Union[str, List[str]]
+ frame_timestamp: Optional[torch.Tensor] = None
+ image_size_hw: Optional[torch.LongTensor] = None
+ effective_image_size_hw: Optional[torch.LongTensor] = None
+ image_path: Union[str, List[str], None] = None
+ image_rgb: Optional[torch.Tensor] = None
+ # masks out padding added due to cropping the square bit
+ mask_crop: Optional[torch.Tensor] = None
+ depth_path: Union[str, List[str], None] = None
+ depth_map: Optional[torch.Tensor] = None
+ depth_mask: Optional[torch.Tensor] = None
+ mask_path: Union[str, List[str], None] = None
+ fg_probability: Optional[torch.Tensor] = None
+ bbox_xywh: Optional[torch.Tensor] = None
+ crop_bbox_xywh: Optional[torch.Tensor] = None
+ camera: Optional[PerspectiveCameras] = None
+ camera_quality_score: Optional[torch.Tensor] = None
+ point_cloud_quality_score: Optional[torch.Tensor] = None
+ sequence_point_cloud_path: Union[str, List[str], None] = None
+ sequence_point_cloud: Optional[Pointclouds] = None
+ sequence_point_cloud_idx: Optional[torch.Tensor] = None
+ frame_type: Union[str, List[str], None] = None # known | unseen
+ meta: dict = field(default_factory=lambda: {})
+
+ # NOTE that batching resets this attribute
+ _uncropped: bool = field(init=False, default=True)
+
+ def to(self, *args, **kwargs):
+ new_params = {}
+ for field_name in iter(self):
+ value = getattr(self, field_name)
+ if isinstance(value, (torch.Tensor, Pointclouds, CamerasBase)):
+ new_params[field_name] = value.to(*args, **kwargs)
+ else:
+ new_params[field_name] = value
+ frame_data = type(self)(**new_params)
+ frame_data._uncropped = self._uncropped
+ return frame_data
+
+ def cpu(self):
+ return self.to(device=torch.device("cpu"))
+
+ def cuda(self):
+ return self.to(device=torch.device("cuda"))
+
+ # the following functions make sure **frame_data can be passed to functions
+ def __iter__(self):
+ for f in fields(self):
+ if f.name.startswith("_"):
+ continue
+
+ yield f.name
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __len__(self):
+ return sum(1 for f in iter(self))
+
+ def crop_by_metadata_bbox_(
+ self,
+ box_crop_context: float,
+ ) -> None:
+ """Crops the frame data in-place by (possibly expanded) bounding box.
+ The bounding box is taken from the object state (usually taken from
+ the frame annotation or estimated from the foregroubnd mask).
+ If the expanded bounding box does not fit the image, it is clamped,
+ i.e. the image is *not* padded.
+
+ Args:
+ box_crop_context: rate of expansion for bbox; 0 means no expansion,
+
+ Raises:
+ ValueError: If the object does not contain a bounding box (usually when no
+ mask annotation is provided)
+ ValueError: If the frame data have been cropped or resized, thus the intrinsic
+ bounding box is not valid for the current image size.
+ ValueError: If the frame does not have an image size (usually a corner case
+ when no image has been loaded)
+ """
+ if self.bbox_xywh is None:
+ raise ValueError(
+ "Attempted cropping by metadata with empty bounding box. Consider either"
+ " to remove_empty_masks or turn off box_crop in the dataset config."
+ )
+
+ if not self._uncropped:
+ raise ValueError(
+ "Trying to apply the metadata bounding box to already cropped "
+ "or resized image; coordinates have changed."
+ )
+
+ self._crop_by_bbox_(
+ box_crop_context,
+ self.bbox_xywh,
+ )
+
+ def crop_by_given_bbox_(
+ self,
+ box_crop_context: float,
+ bbox_xywh: torch.Tensor,
+ ) -> None:
+ """Crops the frame data in-place by (possibly expanded) bounding box.
+ If the expanded bounding box does not fit the image, it is clamped,
+ i.e. the image is *not* padded.
+
+ Args:
+ box_crop_context: rate of expansion for bbox; 0 means no expansion,
+ bbox_xywh: bounding box in [x0, y0, width, height] format. If float
+ tensor, values are floored (after converting to [x0, y0, x1, y1]).
+
+ Raises:
+ ValueError: If the frame does not have an image size (usually a corner case
+ when no image has been loaded)
+ """
+ self._crop_by_bbox_(
+ box_crop_context,
+ bbox_xywh,
+ )
+
+ def _crop_by_bbox_(
+ self,
+ box_crop_context: float,
+ bbox_xywh: torch.Tensor,
+ ) -> None:
+ """Crops the frame data in-place by (possibly expanded) bounding box.
+ If the expanded bounding box does not fit the image, it is clamped,
+ i.e. the image is *not* padded.
+
+ Args:
+ box_crop_context: rate of expansion for bbox; 0 means no expansion,
+ bbox_xywh: bounding box in [x0, y0, width, height] format. If float
+ tensor, values are floored (after converting to [x0, y0, x1, y1]).
+
+ Raises:
+ ValueError: If the frame does not have an image size (usually a corner case
+ when no image has been loaded)
+ """
+ effective_image_size_hw = self.effective_image_size_hw
+ if effective_image_size_hw is None:
+ raise ValueError("Calling crop on image-less FrameData")
+
+ bbox_xyxy = get_clamp_bbox(
+ bbox_xywh,
+ image_path=self.image_path, # pyre-ignore
+ box_crop_context=box_crop_context,
+ )
+ clamp_bbox_xyxy = clamp_box_to_image_bounds_and_round(
+ bbox_xyxy,
+ image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
+ )
+ crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
+ self.crop_bbox_xywh = crop_bbox_xywh
+
+ if self.fg_probability is not None:
+ self.fg_probability = crop_around_box(
+ self.fg_probability,
+ clamp_bbox_xyxy,
+ self.mask_path, # pyre-ignore
+ )
+ if self.image_rgb is not None:
+ self.image_rgb = crop_around_box(
+ self.image_rgb,
+ clamp_bbox_xyxy,
+ self.image_path, # pyre-ignore
+ )
+
+ depth_map = self.depth_map
+ if depth_map is not None:
+ clamp_bbox_xyxy_depth = rescale_bbox(
+ clamp_bbox_xyxy, tuple(depth_map.shape[-2:]), effective_image_size_hw
+ ).long()
+ self.depth_map = crop_around_box(
+ depth_map,
+ clamp_bbox_xyxy_depth,
+ self.depth_path, # pyre-ignore
+ )
+
+ depth_mask = self.depth_mask
+ if depth_mask is not None:
+ clamp_bbox_xyxy_depth = rescale_bbox(
+ clamp_bbox_xyxy, tuple(depth_mask.shape[-2:]), effective_image_size_hw
+ ).long()
+ self.depth_mask = crop_around_box(
+ depth_mask,
+ clamp_bbox_xyxy_depth,
+ self.mask_path, # pyre-ignore
+ )
+
+ # changing principal_point according to bbox_crop
+ if self.camera is not None:
+ adjust_camera_to_bbox_crop_(
+ camera=self.camera,
+ image_size_wh=effective_image_size_hw.flip(dims=[-1]),
+ clamp_bbox_xywh=crop_bbox_xywh,
+ )
+
+ # pyre-ignore
+ self.effective_image_size_hw = crop_bbox_xywh[..., 2:].flip(dims=[-1])
+ self._uncropped = False
+
+ def resize_frame_(self, new_size_hw: torch.LongTensor) -> None:
+ """Resizes frame data in-place according to given dimensions.
+
+ Args:
+ new_size_hw: target image size [height, width], a LongTensor of shape (2,)
+
+ Raises:
+ ValueError: If the frame does not have an image size (usually a corner case
+ when no image has been loaded)
+ """
+
+ effective_image_size_hw = self.effective_image_size_hw
+ if effective_image_size_hw is None:
+ raise ValueError("Calling resize on image-less FrameData")
+
+ image_height, image_width = new_size_hw.tolist()
+
+ if self.fg_probability is not None:
+ self.fg_probability, _, _ = resize_image(
+ self.fg_probability,
+ image_height=image_height,
+ image_width=image_width,
+ mode="nearest",
+ )
+
+ if self.image_rgb is not None:
+ self.image_rgb, _, self.mask_crop = resize_image(
+ self.image_rgb, image_height=image_height, image_width=image_width
+ )
+
+ if self.depth_map is not None:
+ self.depth_map, _, _ = resize_image(
+ self.depth_map,
+ image_height=image_height,
+ image_width=image_width,
+ mode="nearest",
+ )
+
+ if self.depth_mask is not None:
+ self.depth_mask, _, _ = resize_image(
+ self.depth_mask,
+ image_height=image_height,
+ image_width=image_width,
+ mode="nearest",
+ )
+
+ if self.camera is not None:
+ if self.image_size_hw is None:
+ raise ValueError(
+ "image_size_hw has to be defined for resizing FrameData with cameras."
+ )
+ adjust_camera_to_image_scale_(
+ camera=self.camera,
+ original_size_wh=effective_image_size_hw.flip(dims=[-1]),
+ new_size_wh=new_size_hw.flip(dims=[-1]), # pyre-ignore
+ )
+
+ self.effective_image_size_hw = new_size_hw
+ self._uncropped = False
+
+ @classmethod
+ def collate(cls, batch):
+ """
+ Given a list objects `batch` of class `cls`, collates them into a batched
+ representation suitable for processing with deep networks.
+ """
+
+ elem = batch[0]
+
+ if isinstance(elem, cls):
+ pointcloud_ids = [id(el.sequence_point_cloud) for el in batch]
+ id_to_idx = defaultdict(list)
+ for i, pc_id in enumerate(pointcloud_ids):
+ id_to_idx[pc_id].append(i)
+
+ sequence_point_cloud = []
+ sequence_point_cloud_idx = -np.ones((len(batch),))
+ for i, ind in enumerate(id_to_idx.values()):
+ sequence_point_cloud_idx[ind] = i
+ sequence_point_cloud.append(batch[ind[0]].sequence_point_cloud)
+ assert (sequence_point_cloud_idx >= 0).all()
+
+ override_fields = {
+ "sequence_point_cloud": sequence_point_cloud,
+ "sequence_point_cloud_idx": sequence_point_cloud_idx.tolist(),
+ }
+ # note that the pre-collate value of sequence_point_cloud_idx is unused
+
+ collated = {}
+ for f in fields(elem):
+ if not f.init:
+ continue
+
+ list_values = override_fields.get(
+ f.name, [getattr(d, f.name) for d in batch]
+ )
+ collated[f.name] = (
+ cls.collate(list_values)
+ if all(list_value is not None for list_value in list_values)
+ else None
+ )
+ return cls(**collated)
+
+ elif isinstance(elem, Pointclouds):
+ return join_pointclouds_as_batch(batch)
+
+ elif isinstance(elem, CamerasBase):
+ # TODO: don't store K; enforce working in NDC space
+ return join_cameras_as_batch(batch)
+ else:
+ return torch.utils.data.dataloader.default_collate(batch)
+
+
+FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
+
+
+class FrameDataBuilderBase(ReplaceableBase, Generic[FrameDataSubtype], ABC):
+ """A base class for FrameDataBuilders that build a FrameData object, load and
+ process the binary data (crop and resize). Implementations should parametrize
+ the class with a subtype of FrameData and set frame_data_type class variable to
+ that type. They have to also implement `build` method.
+ """
+
+ # To be initialised to FrameDataSubtype
+ frame_data_type: ClassVar[Type[FrameDataSubtype]]
+
+ @abstractmethod
+ def build(
+ self,
+ frame_annotation: types.FrameAnnotation,
+ sequence_annotation: types.SequenceAnnotation,
+ *,
+ load_blobs: bool = True,
+ **kwargs,
+ ) -> FrameDataSubtype:
+ """An abstract method to build the frame data based on raw frame/sequence
+ annotations, load the binary data and adjust them according to the metadata.
+ """
+ raise NotImplementedError()
+
+
+class GenericFrameDataBuilder(FrameDataBuilderBase[FrameDataSubtype], ABC):
+ """
+ A class to build a FrameData object, load and process the binary data (crop and
+ resize). This is an abstract class for extending to build FrameData subtypes. Most
+ users need to use concrete `FrameDataBuilder` class instead.
+ Beware that modifications of frame data are done in-place.
+
+ Args:
+ dataset_root: The root folder of the dataset; all paths in frame / sequence
+ annotations are defined w.r.t. this root. Has to be set if any of the
+ load_* flabs below is true.
+ load_images: Enable loading the frame RGB data.
+ load_depths: Enable loading the frame depth maps.
+ load_depth_masks: Enable loading the frame depth map masks denoting the
+ depth values used for evaluation (the points consistent across views).
+ load_masks: Enable loading frame foreground masks.
+ load_point_clouds: Enable loading sequence-level point clouds.
+ max_points: Cap on the number of loaded points in the point cloud;
+ if reached, they are randomly sampled without replacement.
+ mask_images: Whether to mask the images with the loaded foreground masks;
+ 0 value is used for background.
+ mask_depths: Whether to mask the depth maps with the loaded foreground
+ masks; 0 value is used for background.
+ image_height: The height of the returned images, masks, and depth maps;
+ aspect ratio is preserved during cropping/resizing.
+ image_width: The width of the returned images, masks, and depth maps;
+ aspect ratio is preserved during cropping/resizing.
+ box_crop: Enable cropping of the image around the bounding box inferred
+ from the foreground region of the loaded segmentation mask; masks
+ and depth maps are cropped accordingly; cameras are corrected.
+ box_crop_mask_thr: The threshold used to separate pixels into foreground
+ and background based on the foreground_probability mask; if no value
+ is greater than this threshold, the loader lowers it and repeats.
+ box_crop_context: The amount of additional padding added to each
+ dimension of the cropping bounding box, relative to box size.
+ path_manager: Optionally a PathManager for interpreting paths in a special way.
+ """
+
+ dataset_root: Optional[str] = None
+ load_images: bool = True
+ load_depths: bool = True
+ load_depth_masks: bool = True
+ load_masks: bool = True
+ load_point_clouds: bool = False
+ max_points: int = 0
+ mask_images: bool = False
+ mask_depths: bool = False
+ image_height: Optional[int] = 800
+ image_width: Optional[int] = 800
+ box_crop: bool = True
+ box_crop_mask_thr: float = 0.4
+ box_crop_context: float = 0.3
+ path_manager: Any = None
+
+ def __post_init__(self) -> None:
+ load_any_blob = (
+ self.load_images
+ or self.load_depths
+ or self.load_depth_masks
+ or self.load_masks
+ or self.load_point_clouds
+ )
+ if load_any_blob and self.dataset_root is None:
+ raise ValueError(
+ "dataset_root must be set to load any blob data. "
+ "Make sure it is set in either FrameDataBuilder or Dataset params."
+ )
+
+ if load_any_blob and not self._exists_in_dataset_root(""):
+ raise ValueError(
+ f"dataset_root is passed but {self.dataset_root} does not exist."
+ )
+
+ def build(
+ self,
+ frame_annotation: types.FrameAnnotation,
+ sequence_annotation: types.SequenceAnnotation,
+ *,
+ load_blobs: bool = True,
+ **kwargs,
+ ) -> FrameDataSubtype:
+ """Builds the frame data based on raw frame/sequence annotations, loads the
+ binary data and adjust them according to the metadata. The processing includes:
+ * if box_crop is set, the image/mask/depth are cropped with the bounding
+ box provided or estimated from MaskAnnotation,
+ * if image_height/image_width are set, the image/mask/depth are resized to
+ fit that resolution. Note that the aspect ratio is preserved, and the
+ (possibly cropped) image is pasted into the top-left corner. In the
+ resulting frame_data, mask_crop field corresponds to the mask of the
+ pasted image.
+
+ Args:
+ frame_annotation: frame annotation
+ sequence_annotation: sequence annotation
+ load_blobs: if the function should attempt loading the image, depth map
+ and mask, and foreground mask
+
+ Returns:
+ The constructed FrameData object.
+ """
+
+ point_cloud = sequence_annotation.point_cloud
+
+ frame_data = self.frame_data_type(
+ frame_number=safe_as_tensor(frame_annotation.frame_number, torch.long),
+ frame_timestamp=safe_as_tensor(
+ frame_annotation.frame_timestamp, torch.float
+ ),
+ sequence_name=frame_annotation.sequence_name,
+ sequence_category=sequence_annotation.category,
+ camera_quality_score=safe_as_tensor(
+ sequence_annotation.viewpoint_quality_score, torch.float
+ ),
+ point_cloud_quality_score=(
+ safe_as_tensor(point_cloud.quality_score, torch.float)
+ if point_cloud is not None
+ else None
+ ),
+ )
+
+ fg_mask_np: Optional[np.ndarray] = None
+ mask_annotation = frame_annotation.mask
+ if mask_annotation is not None:
+ if load_blobs and self.load_masks:
+ fg_mask_np, mask_path = self._load_fg_probability(frame_annotation)
+ frame_data.mask_path = mask_path
+ frame_data.fg_probability = safe_as_tensor(fg_mask_np, torch.float)
+
+ bbox_xywh = mask_annotation.bounding_box_xywh
+ if bbox_xywh is None and fg_mask_np is not None:
+ bbox_xywh = get_bbox_from_mask(fg_mask_np, self.box_crop_mask_thr)
+
+ frame_data.bbox_xywh = safe_as_tensor(bbox_xywh, torch.float)
+
+ if frame_annotation.image is not None:
+ image_size_hw = safe_as_tensor(frame_annotation.image.size, torch.long)
+ frame_data.image_size_hw = image_size_hw # original image size
+ # image size after crop/resize
+ frame_data.effective_image_size_hw = image_size_hw
+ image_path = None
+ dataset_root = self.dataset_root
+ if frame_annotation.image.path is not None and dataset_root is not None:
+ image_path = os.path.join(dataset_root, frame_annotation.image.path)
+ frame_data.image_path = image_path
+
+ if load_blobs and self.load_images:
+ if image_path is None:
+ raise ValueError("Image path is required to load images.")
+
+ image_np = load_image(self._local_path(image_path))
+ frame_data.image_rgb = self._postprocess_image(
+ image_np, frame_annotation.image.size, frame_data.fg_probability
+ )
+
+ if (
+ load_blobs
+ and self.load_depths
+ and frame_annotation.depth is not None
+ and frame_annotation.depth.path is not None
+ ):
+ (
+ frame_data.depth_map,
+ frame_data.depth_path,
+ frame_data.depth_mask,
+ ) = self._load_mask_depth(frame_annotation, fg_mask_np)
+
+ if load_blobs and self.load_point_clouds and point_cloud is not None:
+ pcl_path = self._fix_point_cloud_path(point_cloud.path)
+ frame_data.sequence_point_cloud = load_pointcloud(
+ self._local_path(pcl_path), max_points=self.max_points
+ )
+ frame_data.sequence_point_cloud_path = pcl_path
+
+ if frame_annotation.viewpoint is not None:
+ frame_data.camera = self._get_pytorch3d_camera(frame_annotation)
+
+ if self.box_crop:
+ frame_data.crop_by_metadata_bbox_(self.box_crop_context)
+
+ if self.image_height is not None and self.image_width is not None:
+ new_size = (self.image_height, self.image_width)
+ frame_data.resize_frame_(
+ new_size_hw=torch.tensor(new_size, dtype=torch.long), # pyre-ignore
+ )
+
+ return frame_data
+
+ def _load_fg_probability(
+ self, entry: types.FrameAnnotation
+ ) -> Tuple[np.ndarray, str]:
+ assert self.dataset_root is not None and entry.mask is not None
+ full_path = os.path.join(self.dataset_root, entry.mask.path)
+ fg_probability = load_mask(self._local_path(full_path))
+ if fg_probability.shape[-2:] != entry.image.size:
+ raise ValueError(
+ f"bad mask size: {fg_probability.shape[-2:]} vs {entry.image.size}!"
+ )
+
+ return fg_probability, full_path
+
+ def _postprocess_image(
+ self,
+ image_np: np.ndarray,
+ image_size: Tuple[int, int],
+ fg_probability: Optional[torch.Tensor],
+ ) -> torch.Tensor:
+ image_rgb = safe_as_tensor(image_np, torch.float)
+
+ if image_rgb.shape[-2:] != image_size:
+ raise ValueError(f"bad image size: {image_rgb.shape[-2:]} vs {image_size}!")
+
+ if self.mask_images:
+ assert fg_probability is not None
+ image_rgb *= fg_probability
+
+ return image_rgb
+
+ def _load_mask_depth(
+ self,
+ entry: types.FrameAnnotation,
+ fg_mask: Optional[np.ndarray],
+ ) -> Tuple[torch.Tensor, str, torch.Tensor]:
+ entry_depth = entry.depth
+ dataset_root = self.dataset_root
+ assert dataset_root is not None
+ assert entry_depth is not None and entry_depth.path is not None
+ path = os.path.join(dataset_root, entry_depth.path)
+ depth_map = load_depth(self._local_path(path), entry_depth.scale_adjustment)
+
+ if self.mask_depths:
+ assert fg_mask is not None
+ depth_map *= fg_mask
+
+ mask_path = entry_depth.mask_path
+ if self.load_depth_masks and mask_path is not None:
+ mask_path = os.path.join(dataset_root, mask_path)
+ depth_mask = load_depth_mask(self._local_path(mask_path))
+ else:
+ depth_mask = (depth_map > 0.0).astype(np.float32)
+
+ return torch.tensor(depth_map), path, torch.tensor(depth_mask)
+
+ def _get_pytorch3d_camera(
+ self,
+ entry: types.FrameAnnotation,
+ ) -> PerspectiveCameras:
+ entry_viewpoint = entry.viewpoint
+ assert entry_viewpoint is not None
+ # principal point and focal length
+ principal_point = torch.tensor(
+ entry_viewpoint.principal_point, dtype=torch.float
+ )
+ focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
+
+ format = entry_viewpoint.intrinsics_format
+ if entry_viewpoint.intrinsics_format == "ndc_norm_image_bounds":
+ # legacy PyTorch3D NDC format
+ # convert to pixels unequally and convert to ndc equally
+ image_size_as_list = list(reversed(entry.image.size))
+ image_size_wh = torch.tensor(image_size_as_list, dtype=torch.float)
+ per_axis_scale = image_size_wh / image_size_wh.min()
+ focal_length = focal_length * per_axis_scale
+ principal_point = principal_point * per_axis_scale
+ elif entry_viewpoint.intrinsics_format != "ndc_isotropic":
+ raise ValueError(f"Unknown intrinsics format: {format}")
+
+ return PerspectiveCameras(
+ focal_length=focal_length[None],
+ principal_point=principal_point[None],
+ R=torch.tensor(entry_viewpoint.R, dtype=torch.float)[None],
+ T=torch.tensor(entry_viewpoint.T, dtype=torch.float)[None],
+ )
+
+ def _fix_point_cloud_path(self, path: str) -> str:
+ """
+ Fix up a point cloud path from the dataset.
+ Some files in Co3Dv2 have an accidental absolute path stored.
+ """
+ unwanted_prefix = (
+ "/large_experiments/p3/replay/datasets/co3d/co3d45k_220512/export_v23/"
+ )
+ if path.startswith(unwanted_prefix):
+ path = path[len(unwanted_prefix) :]
+ assert self.dataset_root is not None
+ return os.path.join(self.dataset_root, path)
+
+ def _local_path(self, path: str) -> str:
+ if self.path_manager is None:
+ return path
+ return self.path_manager.get_local_path(path)
+
+ def _exists_in_dataset_root(self, relpath) -> bool:
+ if not self.dataset_root:
+ return False
+
+ full_path = os.path.join(self.dataset_root, relpath)
+ if self.path_manager is None:
+ return os.path.exists(full_path)
+ else:
+ return self.path_manager.exists(full_path)
+
+
+@registry.register
+class FrameDataBuilder(GenericWorkaround, GenericFrameDataBuilder[FrameData]):
+ """
+ A concrete class to build a FrameData object, load and process the binary data (crop
+ and resize). Beware that modifications of frame data are done in-place. Please see
+ the documentation for `GenericFrameDataBuilder` for the description of parameters
+ and methods.
+ """
+
+ frame_data_type: ClassVar[Type[FrameData]] = FrameData
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..69a7859aa77b887997555fd888a4411ff0937d56
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset.py
@@ -0,0 +1,671 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import copy
+import functools
+import gzip
+import hashlib
+import json
+import logging
+import os
+import random
+import warnings
+from collections import defaultdict
+from itertools import islice
+from typing import (
+ Any,
+ ClassVar,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ Union,
+)
+
+from pytorch3d.implicitron.dataset import types
+from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
+from pytorch3d.implicitron.dataset.frame_data import FrameData, FrameDataBuilder
+from pytorch3d.implicitron.dataset.utils import is_known_frame_scalar
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.renderer.camera_utils import join_cameras_as_batch
+from pytorch3d.renderer.cameras import CamerasBase
+
+from tqdm import tqdm
+
+
+logger = logging.getLogger(__name__)
+
+
+if TYPE_CHECKING:
+ from typing import TypedDict
+
+ class FrameAnnotsEntry(TypedDict):
+ subset: Optional[str]
+ frame_annotation: types.FrameAnnotation
+
+else:
+ FrameAnnotsEntry = dict
+
+
+@registry.register
+class JsonIndexDataset(DatasetBase, ReplaceableBase):
+ """
+ A dataset with annotations in json files like the Common Objects in 3D
+ (CO3D) dataset.
+
+ Metadata-related args::
+ frame_annotations_file: A zipped json file containing metadata of the
+ frames in the dataset, serialized List[types.FrameAnnotation].
+ sequence_annotations_file: A zipped json file containing metadata of the
+ sequences in the dataset, serialized List[types.SequenceAnnotation].
+ subset_lists_file: A json file containing the lists of frames corresponding
+ corresponding to different subsets (e.g. train/val/test) of the dataset;
+ format: {subset: (sequence_name, frame_id, file_path)}.
+ subsets: Restrict frames/sequences only to the given list of subsets
+ as defined in subset_lists_file (see above).
+ limit_to: Limit the dataset to the first #limit_to frames (after other
+ filters have been applied).
+ limit_sequences_to: Limit the dataset to the first
+ #limit_sequences_to sequences (after other sequence filters have been
+ applied but before frame-based filters).
+ pick_sequence: A list of sequence names to restrict the dataset to.
+ exclude_sequence: A list of the names of the sequences to exclude.
+ limit_category_to: Restrict the dataset to the given list of categories.
+ remove_empty_masks: Removes the frames with no active foreground pixels
+ in the segmentation mask after thresholding (see box_crop_mask_thr).
+ n_frames_per_sequence: If > 0, randomly samples #n_frames_per_sequence
+ frames in each sequences uniformly without replacement if it has
+ more frames than that; applied before other frame-level filters.
+ seed: The seed of the random generator sampling #n_frames_per_sequence
+ random frames per sequence.
+ sort_frames: Enable frame annotations sorting to group frames from the
+ same sequences together and order them by timestamps
+ eval_batches: A list of batches that form the evaluation set;
+ list of batch-sized lists of indices corresponding to __getitem__
+ of this class, thus it can be used directly as a batch sampler.
+ eval_batch_index:
+ ( Optional[List[List[Union[Tuple[str, int, str], Tuple[str, int]]]] )
+ A list of batches of frames described as (sequence_name, frame_idx)
+ that can form the evaluation set, `eval_batches` will be set from this.
+
+ Blob-loading parameters:
+ dataset_root: The root folder of the dataset; all the paths in jsons are
+ specified relative to this root (but not json paths themselves).
+ load_images: Enable loading the frame RGB data.
+ load_depths: Enable loading the frame depth maps.
+ load_depth_masks: Enable loading the frame depth map masks denoting the
+ depth values used for evaluation (the points consistent across views).
+ load_masks: Enable loading frame foreground masks.
+ load_point_clouds: Enable loading sequence-level point clouds.
+ max_points: Cap on the number of loaded points in the point cloud;
+ if reached, they are randomly sampled without replacement.
+ mask_images: Whether to mask the images with the loaded foreground masks;
+ 0 value is used for background.
+ mask_depths: Whether to mask the depth maps with the loaded foreground
+ masks; 0 value is used for background.
+ image_height: The height of the returned images, masks, and depth maps;
+ aspect ratio is preserved during cropping/resizing.
+ image_width: The width of the returned images, masks, and depth maps;
+ aspect ratio is preserved during cropping/resizing.
+ box_crop: Enable cropping of the image around the bounding box inferred
+ from the foreground region of the loaded segmentation mask; masks
+ and depth maps are cropped accordingly; cameras are corrected.
+ box_crop_mask_thr: The threshold used to separate pixels into foreground
+ and background based on the foreground_probability mask; if no value
+ is greater than this threshold, the loader lowers it and repeats.
+ box_crop_context: The amount of additional padding added to each
+ dimension of the cropping bounding box, relative to box size.
+ """
+
+ frame_annotations_type: ClassVar[Type[types.FrameAnnotation]] = (
+ types.FrameAnnotation
+ )
+
+ path_manager: Any = None
+ frame_annotations_file: str = ""
+ sequence_annotations_file: str = ""
+ subset_lists_file: str = ""
+ subsets: Optional[List[str]] = None
+ limit_to: int = 0
+ limit_sequences_to: int = 0
+ pick_sequence: Tuple[str, ...] = ()
+ exclude_sequence: Tuple[str, ...] = ()
+ limit_category_to: Tuple[int, ...] = ()
+ dataset_root: str = ""
+ load_images: bool = True
+ load_depths: bool = True
+ load_depth_masks: bool = True
+ load_masks: bool = True
+ load_point_clouds: bool = False
+ max_points: int = 0
+ mask_images: bool = False
+ mask_depths: bool = False
+ image_height: Optional[int] = 800
+ image_width: Optional[int] = 800
+ box_crop: bool = True
+ box_crop_mask_thr: float = 0.4
+ box_crop_context: float = 0.3
+ remove_empty_masks: bool = True
+ n_frames_per_sequence: int = -1
+ seed: int = 0
+ sort_frames: bool = False
+ eval_batches: Any = None
+ eval_batch_index: Any = None
+ # initialised in __post_init__
+ # commented because of OmegaConf (for tests to pass)
+ # _frame_data_builder: FrameDataBuilder = field(init=False)
+ # frame_annots: List[FrameAnnotsEntry] = field(init=False)
+ # seq_annots: Dict[str, types.SequenceAnnotation] = field(init=False)
+ # _seq_to_idx: Dict[str, List[int]] = field(init=False)
+
+ def __post_init__(self) -> None:
+ self._load_frames()
+ self._load_sequences()
+ if self.sort_frames:
+ self._sort_frames()
+ self._load_subset_lists()
+ self._filter_db() # also computes sequence indices
+ self._extract_and_set_eval_batches()
+
+ # pyre-ignore
+ self._frame_data_builder = FrameDataBuilder(
+ dataset_root=self.dataset_root,
+ load_images=self.load_images,
+ load_depths=self.load_depths,
+ load_depth_masks=self.load_depth_masks,
+ load_masks=self.load_masks,
+ load_point_clouds=self.load_point_clouds,
+ max_points=self.max_points,
+ mask_images=self.mask_images,
+ mask_depths=self.mask_depths,
+ image_height=self.image_height,
+ image_width=self.image_width,
+ box_crop=self.box_crop,
+ box_crop_mask_thr=self.box_crop_mask_thr,
+ box_crop_context=self.box_crop_context,
+ path_manager=self.path_manager,
+ )
+ logger.info(str(self))
+
+ def _extract_and_set_eval_batches(self) -> None:
+ """
+ Sets eval_batches based on input eval_batch_index.
+ """
+ if self.eval_batch_index is not None:
+ if self.eval_batches is not None:
+ raise ValueError(
+ "Cannot define both eval_batch_index and eval_batches."
+ )
+ self.eval_batches = self.seq_frame_index_to_dataset_index(
+ self.eval_batch_index
+ )
+
+ def join(self, other_datasets: Iterable[DatasetBase]) -> None:
+ """
+ Join the dataset with other JsonIndexDataset objects.
+
+ Args:
+ other_datasets: A list of JsonIndexDataset objects to be joined
+ into the current dataset.
+ """
+ if not all(isinstance(d, JsonIndexDataset) for d in other_datasets):
+ raise ValueError("This function can only join a list of JsonIndexDataset")
+ # pyre-ignore[16]
+ self.frame_annots.extend([fa for d in other_datasets for fa in d.frame_annots])
+ # pyre-ignore[16]
+ self.seq_annots.update(
+ # https://gist.github.com/treyhunner/f35292e676efa0be1728
+ functools.reduce(
+ lambda a, b: {**a, **b},
+ # pyre-ignore[16]
+ [d.seq_annots for d in other_datasets],
+ )
+ )
+ all_eval_batches = [
+ self.eval_batches,
+ *[d.eval_batches for d in other_datasets], # pyre-ignore[16]
+ ]
+ if not (
+ all(ba is None for ba in all_eval_batches)
+ or all(ba is not None for ba in all_eval_batches)
+ ):
+ raise ValueError(
+ "When joining datasets, either all joined datasets have to have their"
+ " eval_batches defined, or all should have their eval batches undefined."
+ )
+ if self.eval_batches is not None:
+ self.eval_batches = sum(all_eval_batches, [])
+ self._invalidate_indexes(filter_seq_annots=True)
+
+ def is_filtered(self) -> bool:
+ """
+ Returns `True` in case the dataset has been filtered and thus some frame annotations
+ stored on the disk might be missing in the dataset object.
+
+ Returns:
+ is_filtered: `True` if the dataset has been filtered, else `False`.
+ """
+ return (
+ self.remove_empty_masks
+ or self.limit_to > 0
+ or self.limit_sequences_to > 0
+ or len(self.pick_sequence) > 0
+ or len(self.exclude_sequence) > 0
+ or len(self.limit_category_to) > 0
+ or self.n_frames_per_sequence > 0
+ )
+
+ def seq_frame_index_to_dataset_index(
+ self,
+ seq_frame_index: List[List[Union[Tuple[str, int, str], Tuple[str, int]]]],
+ allow_missing_indices: bool = False,
+ remove_missing_indices: bool = False,
+ suppress_missing_index_warning: bool = True,
+ ) -> Union[List[List[Optional[int]]], List[List[int]]]:
+ """
+ Obtain indices into the dataset object given a list of frame ids.
+
+ Args:
+ seq_frame_index: The list of frame ids specified as
+ `List[List[Tuple[sequence_name:str, frame_number:int]]]`. Optionally,
+ Image paths relative to the dataset_root can be stored specified as well:
+ `List[List[Tuple[sequence_name:str, frame_number:int, image_path:str]]]`
+ allow_missing_indices: If `False`, throws an IndexError upon reaching the first
+ entry from `seq_frame_index` which is missing in the dataset.
+ Otherwise, depending on `remove_missing_indices`, either returns `None`
+ in place of missing entries or removes the indices of missing entries.
+ remove_missing_indices: Active when `allow_missing_indices=True`.
+ If `False`, returns `None` in place of `seq_frame_index` entries that
+ are not present in the dataset.
+ If `True` removes missing indices from the returned indices.
+ suppress_missing_index_warning:
+ Active if `allow_missing_indices==True`. Suppressess a warning message
+ in case an entry from `seq_frame_index` is missing in the dataset
+ (expected in certain cases - e.g. when setting
+ `self.remove_empty_masks=True`).
+
+ Returns:
+ dataset_idx: Indices of dataset entries corresponding to`seq_frame_index`.
+ """
+ _dataset_seq_frame_n_index = {
+ seq: {
+ # pyre-ignore[16]
+ self.frame_annots[idx]["frame_annotation"].frame_number: idx
+ for idx in seq_idx
+ }
+ # pyre-ignore[16]
+ for seq, seq_idx in self._seq_to_idx.items()
+ }
+
+ def _get_dataset_idx(
+ seq_name: str, frame_no: int, path: Optional[str] = None
+ ) -> Optional[int]:
+ idx_seq = _dataset_seq_frame_n_index.get(seq_name, None)
+ idx = idx_seq.get(frame_no, None) if idx_seq is not None else None
+ if idx is None:
+ msg = (
+ f"sequence_name={seq_name} / frame_number={frame_no}"
+ " not in the dataset!"
+ )
+ if not allow_missing_indices:
+ raise IndexError(msg)
+ if not suppress_missing_index_warning:
+ warnings.warn(msg)
+ return idx
+ if path is not None:
+ # Check that the loaded frame path is consistent
+ # with the one stored in self.frame_annots.
+ assert os.path.normpath(
+ # pyre-ignore[16]
+ self.frame_annots[idx]["frame_annotation"].image.path
+ ) == os.path.normpath(
+ path
+ ), f"Inconsistent frame indices {seq_name, frame_no, path}."
+ return idx
+
+ dataset_idx = [
+ [_get_dataset_idx(*b) for b in batch] # pyre-ignore [6]
+ for batch in seq_frame_index
+ ]
+
+ if allow_missing_indices and remove_missing_indices:
+ # remove all None indices, and also batches with only None entries
+ valid_dataset_idx = [
+ [b for b in batch if b is not None] for batch in dataset_idx
+ ]
+ return [batch for batch in valid_dataset_idx if len(batch) > 0]
+
+ return dataset_idx
+
+ def subset_from_frame_index(
+ self,
+ frame_index: List[Union[Tuple[str, int], Tuple[str, int, str]]],
+ allow_missing_indices: bool = True,
+ ) -> "JsonIndexDataset":
+ """
+ Generate a dataset subset given the list of frames specified in `frame_index`.
+
+ Args:
+ frame_index: The list of frame indentifiers (as stored in the metadata)
+ specified as `List[Tuple[sequence_name:str, frame_number:int]]`. Optionally,
+ Image paths relative to the dataset_root can be stored specified as well:
+ `List[Tuple[sequence_name:str, frame_number:int, image_path:str]]`,
+ in the latter case, if imaga_path do not match the stored paths, an error
+ is raised.
+ allow_missing_indices: If `False`, throws an IndexError upon reaching the first
+ entry from `frame_index` which is missing in the dataset.
+ Otherwise, generates a subset consisting of frames entries that actually
+ exist in the dataset.
+ """
+ # Get the indices into the frame annots.
+ dataset_indices = self.seq_frame_index_to_dataset_index(
+ [frame_index],
+ allow_missing_indices=self.is_filtered() and allow_missing_indices,
+ )[0]
+ valid_dataset_indices = [i for i in dataset_indices if i is not None]
+
+ # Deep copy the whole dataset except frame_annots, which are large so we
+ # deep copy only the requested subset of frame_annots.
+ memo = {id(self.frame_annots): None} # pyre-ignore[16]
+ dataset_new = copy.deepcopy(self, memo)
+ dataset_new.frame_annots = copy.deepcopy(
+ [self.frame_annots[i] for i in valid_dataset_indices]
+ )
+
+ # This will kill all unneeded sequence annotations.
+ dataset_new._invalidate_indexes(filter_seq_annots=True)
+
+ # Finally annotate the frame annotations with the name of the subset
+ # stored in meta.
+ for frame_annot in dataset_new.frame_annots:
+ frame_annotation = frame_annot["frame_annotation"]
+ if frame_annotation.meta is not None:
+ frame_annot["subset"] = frame_annotation.meta.get("frame_type", None)
+
+ # A sanity check - this will crash in case some entries from frame_index are missing
+ # in dataset_new.
+ valid_frame_index = [
+ fi for fi, di in zip(frame_index, dataset_indices) if di is not None
+ ]
+ dataset_new.seq_frame_index_to_dataset_index(
+ [valid_frame_index], allow_missing_indices=False
+ )
+
+ return dataset_new
+
+ def __str__(self) -> str:
+ # pyre-ignore[16]
+ return f"JsonIndexDataset #frames={len(self.frame_annots)}"
+
+ def __len__(self) -> int:
+ # pyre-ignore[16]
+ return len(self.frame_annots)
+
+ def _get_frame_type(self, entry: FrameAnnotsEntry) -> Optional[str]:
+ return entry["subset"]
+
+ def get_all_train_cameras(self) -> CamerasBase:
+ """
+ Returns the cameras corresponding to all the known frames.
+ """
+ logger.info("Loading all train cameras.")
+ cameras = []
+ # pyre-ignore[16]
+ for frame_idx, frame_annot in enumerate(tqdm(self.frame_annots)):
+ frame_type = self._get_frame_type(frame_annot)
+ if frame_type is None:
+ raise ValueError("subsets not loaded")
+ if is_known_frame_scalar(frame_type):
+ cameras.append(self[frame_idx].camera)
+ return join_cameras_as_batch(cameras)
+
+ def __getitem__(self, index) -> FrameData:
+ # pyre-ignore[16]
+ if index >= len(self.frame_annots):
+ raise IndexError(f"index {index} out of range {len(self.frame_annots)}")
+
+ entry = self.frame_annots[index]["frame_annotation"]
+
+ # pyre-ignore
+ frame_data = self._frame_data_builder.build(
+ entry,
+ # pyre-ignore
+ self.seq_annots[entry.sequence_name],
+ )
+ # Optional field
+ frame_data.frame_type = self._get_frame_type(self.frame_annots[index])
+
+ return frame_data
+
+ def _load_frames(self) -> None:
+ logger.info(f"Loading Co3D frames from {self.frame_annotations_file}.")
+ local_file = self._local_path(self.frame_annotations_file)
+ with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
+ frame_annots_list = types.load_dataclass(
+ zipfile, List[self.frame_annotations_type]
+ )
+ if not frame_annots_list:
+ raise ValueError("Empty dataset!")
+ # pyre-ignore[16]
+ self.frame_annots = [
+ FrameAnnotsEntry(frame_annotation=a, subset=None) for a in frame_annots_list
+ ]
+
+ def _load_sequences(self) -> None:
+ logger.info(f"Loading Co3D sequences from {self.sequence_annotations_file}.")
+ local_file = self._local_path(self.sequence_annotations_file)
+ with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
+ seq_annots = types.load_dataclass(zipfile, List[types.SequenceAnnotation])
+ if not seq_annots:
+ raise ValueError("Empty sequences file!")
+ # pyre-ignore[16]
+ self.seq_annots = {entry.sequence_name: entry for entry in seq_annots}
+
+ def _load_subset_lists(self) -> None:
+ logger.info(f"Loading Co3D subset lists from {self.subset_lists_file}.")
+ if not self.subset_lists_file:
+ return
+
+ with open(self._local_path(self.subset_lists_file), "r") as f:
+ subset_to_seq_frame = json.load(f)
+
+ frame_path_to_subset = {
+ path: subset
+ for subset, frames in subset_to_seq_frame.items()
+ for _, _, path in frames
+ }
+ # pyre-ignore[16]
+ for frame in self.frame_annots:
+ frame["subset"] = frame_path_to_subset.get(
+ frame["frame_annotation"].image.path, None
+ )
+ if frame["subset"] is None:
+ warnings.warn(
+ "Subset lists are given but don't include "
+ + frame["frame_annotation"].image.path
+ )
+
+ def _sort_frames(self) -> None:
+ # Sort frames to have them grouped by sequence, ordered by timestamp
+ # pyre-ignore[16]
+ self.frame_annots = sorted(
+ self.frame_annots,
+ key=lambda f: (
+ f["frame_annotation"].sequence_name,
+ f["frame_annotation"].frame_timestamp or 0,
+ ),
+ )
+
+ def _filter_db(self) -> None:
+ if self.remove_empty_masks:
+ logger.info("Removing images with empty masks.")
+ # pyre-ignore[16]
+ old_len = len(self.frame_annots)
+
+ msg = "remove_empty_masks needs every MaskAnnotation.mass to be set."
+
+ def positive_mass(frame_annot: types.FrameAnnotation) -> bool:
+ mask = frame_annot.mask
+ if mask is None:
+ return False
+ if mask.mass is None:
+ raise ValueError(msg)
+ return mask.mass > 1
+
+ self.frame_annots = [
+ frame
+ for frame in self.frame_annots
+ if positive_mass(frame["frame_annotation"])
+ ]
+ logger.info("... filtered %d -> %d" % (old_len, len(self.frame_annots)))
+
+ # this has to be called after joining with categories!!
+ subsets = self.subsets
+ if subsets:
+ if not self.subset_lists_file:
+ raise ValueError(
+ "Subset filter is on but subset_lists_file was not given"
+ )
+
+ logger.info(f"Limiting Co3D dataset to the '{subsets}' subsets.")
+
+ # truncate the list of subsets to the valid one
+ self.frame_annots = [
+ entry for entry in self.frame_annots if entry["subset"] in subsets
+ ]
+ if len(self.frame_annots) == 0:
+ raise ValueError(f"There are no frames in the '{subsets}' subsets!")
+
+ self._invalidate_indexes(filter_seq_annots=True)
+
+ if len(self.limit_category_to) > 0:
+ logger.info(f"Limiting dataset to categories: {self.limit_category_to}")
+ # pyre-ignore[16]
+ self.seq_annots = {
+ name: entry
+ for name, entry in self.seq_annots.items()
+ if entry.category in self.limit_category_to
+ }
+
+ # sequence filters
+ for prefix in ("pick", "exclude"):
+ orig_len = len(self.seq_annots)
+ attr = f"{prefix}_sequence"
+ arr = getattr(self, attr)
+ if len(arr) > 0:
+ logger.info(f"{attr}: {str(arr)}")
+ self.seq_annots = {
+ name: entry
+ for name, entry in self.seq_annots.items()
+ if (name in arr) == (prefix == "pick")
+ }
+ logger.info("... filtered %d -> %d" % (orig_len, len(self.seq_annots)))
+
+ if self.limit_sequences_to > 0:
+ self.seq_annots = dict(
+ islice(self.seq_annots.items(), self.limit_sequences_to)
+ )
+
+ # retain only frames from retained sequences
+ self.frame_annots = [
+ f
+ for f in self.frame_annots
+ if f["frame_annotation"].sequence_name in self.seq_annots
+ ]
+
+ self._invalidate_indexes()
+
+ if self.n_frames_per_sequence > 0:
+ logger.info(f"Taking max {self.n_frames_per_sequence} per sequence.")
+ keep_idx = []
+ # pyre-ignore[16]
+ for seq, seq_indices in self._seq_to_idx.items():
+ # infer the seed from the sequence name, this is reproducible
+ # and makes the selection differ for different sequences
+ seed = _seq_name_to_seed(seq) + self.seed
+ seq_idx_shuffled = random.Random(seed).sample(
+ sorted(seq_indices), len(seq_indices)
+ )
+ keep_idx.extend(seq_idx_shuffled[: self.n_frames_per_sequence])
+
+ logger.info(
+ "... filtered %d -> %d" % (len(self.frame_annots), len(keep_idx))
+ )
+ self.frame_annots = [self.frame_annots[i] for i in keep_idx]
+ self._invalidate_indexes(filter_seq_annots=False)
+ # sequences are not decimated, so self.seq_annots is valid
+
+ if self.limit_to > 0 and self.limit_to < len(self.frame_annots):
+ logger.info(
+ "limit_to: filtered %d -> %d" % (len(self.frame_annots), self.limit_to)
+ )
+ self.frame_annots = self.frame_annots[: self.limit_to]
+ self._invalidate_indexes(filter_seq_annots=True)
+
+ def _invalidate_indexes(self, filter_seq_annots: bool = False) -> None:
+ # update _seq_to_idx and filter seq_meta according to frame_annots change
+ # if filter_seq_annots, also uldates seq_annots based on the changed _seq_to_idx
+ self._invalidate_seq_to_idx()
+
+ if filter_seq_annots:
+ # pyre-ignore[16]
+ self.seq_annots = {
+ k: v
+ for k, v in self.seq_annots.items()
+ # pyre-ignore[16]
+ if k in self._seq_to_idx
+ }
+
+ def _invalidate_seq_to_idx(self) -> None:
+ seq_to_idx = defaultdict(list)
+ # pyre-ignore[16]
+ for idx, entry in enumerate(self.frame_annots):
+ seq_to_idx[entry["frame_annotation"].sequence_name].append(idx)
+ # pyre-ignore[16]
+ self._seq_to_idx = seq_to_idx
+
+ def _local_path(self, path: str) -> str:
+ if self.path_manager is None:
+ return path
+ return self.path_manager.get_local_path(path)
+
+ def get_frame_numbers_and_timestamps(
+ self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None
+ ) -> List[Tuple[int, float]]:
+ out: List[Tuple[int, float]] = []
+ for idx in idxs:
+ if (
+ subset_filter is not None
+ # pyre-fixme[16]: `JsonIndexDataset` has no attribute `frame_annots`.
+ and self.frame_annots[idx]["subset"] not in subset_filter
+ ):
+ continue
+
+ frame_annotation = self.frame_annots[idx]["frame_annotation"]
+ out.append(
+ (frame_annotation.frame_number, frame_annotation.frame_timestamp)
+ )
+ return out
+
+ def category_to_sequence_names(self) -> Dict[str, List[str]]:
+ c2seq = defaultdict(list)
+ # pyre-ignore
+ for sequence_name, sa in self.seq_annots.items():
+ c2seq[sa.category].append(sequence_name)
+ return dict(c2seq)
+
+ def get_eval_batches(self) -> Optional[List[List[int]]]:
+ return self.eval_batches
+
+
+def _seq_name_to_seed(seq_name) -> int:
+ return int(hashlib.sha1(seq_name.encode("utf-8")).hexdigest(), 16)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_llff.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_llff.py
new file mode 100644
index 0000000000000000000000000000000000000000..d29b650b7e2e4b602e5d3c92c78fb21974ec6ddf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_llff.py
@@ -0,0 +1,341 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from https://github.com/bmild/nerf/blob/master/load_llff.py
+# Copyright (c) 2020 bmild
+
+# pyre-unsafe
+import logging
+import os
+import warnings
+
+import numpy as np
+
+from PIL import Image
+
+
+# Slightly modified version of LLFF data loading code
+# see https://github.com/Fyusion/LLFF for original
+
+logger = logging.getLogger(__name__)
+
+
+def _minify(basedir, path_manager, factors=(), resolutions=()):
+ needtoload = False
+ for r in factors:
+ imgdir = os.path.join(basedir, "images_{}".format(r))
+ if not _exists(path_manager, imgdir):
+ needtoload = True
+ for r in resolutions:
+ imgdir = os.path.join(basedir, "images_{}x{}".format(r[1], r[0]))
+ if not _exists(path_manager, imgdir):
+ needtoload = True
+ if not needtoload:
+ return
+ assert path_manager is None
+
+ from subprocess import check_output
+
+ imgdir = os.path.join(basedir, "images")
+ imgs = [os.path.join(imgdir, f) for f in sorted(_ls(path_manager, imgdir))]
+ imgs = [f for f in imgs if f.endswith("JPG", "jpg", "png", "jpeg", "PNG")]
+ imgdir_orig = imgdir
+
+ wd = os.getcwd()
+
+ for r in factors + resolutions:
+ if isinstance(r, int):
+ name = "images_{}".format(r)
+ resizearg = "{}%".format(100.0 / r)
+ else:
+ name = "images_{}x{}".format(r[1], r[0])
+ resizearg = "{}x{}".format(r[1], r[0])
+ imgdir = os.path.join(basedir, name)
+ if os.path.exists(imgdir):
+ continue
+
+ logger.info(f"Minifying {r}, {basedir}")
+
+ os.makedirs(imgdir)
+ check_output("cp {}/* {}".format(imgdir_orig, imgdir), shell=True)
+
+ ext = imgs[0].split(".")[-1]
+ args = " ".join(
+ ["mogrify", "-resize", resizearg, "-format", "png", "*.{}".format(ext)]
+ )
+ logger.info(args)
+ os.chdir(imgdir)
+ check_output(args, shell=True)
+ os.chdir(wd)
+
+ if ext != "png":
+ check_output("rm {}/*.{}".format(imgdir, ext), shell=True)
+ logger.info("Removed duplicates")
+ logger.info("Done")
+
+
+def _load_data(
+ basedir, factor=None, width=None, height=None, load_imgs=True, path_manager=None
+):
+
+ poses_arr = np.load(
+ _local_path(path_manager, os.path.join(basedir, "poses_bounds.npy"))
+ )
+ poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])
+ bds = poses_arr[:, -2:].transpose([1, 0])
+
+ img0 = [
+ os.path.join(basedir, "images", f)
+ for f in sorted(_ls(path_manager, os.path.join(basedir, "images")))
+ if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
+ ][0]
+
+ def imread(f):
+ return np.array(Image.open(f))
+
+ sh = imread(_local_path(path_manager, img0)).shape
+
+ sfx = ""
+
+ if factor is not None:
+ sfx = "_{}".format(factor)
+ _minify(basedir, path_manager, factors=[factor])
+ factor = factor
+ elif height is not None:
+ factor = sh[0] / float(height)
+ width = int(sh[1] / factor)
+ _minify(basedir, path_manager, resolutions=[[height, width]])
+ sfx = "_{}x{}".format(width, height)
+ elif width is not None:
+ factor = sh[1] / float(width)
+ height = int(sh[0] / factor)
+ _minify(basedir, path_manager, resolutions=[[height, width]])
+ sfx = "_{}x{}".format(width, height)
+ else:
+ factor = 1
+
+ imgdir = os.path.join(basedir, "images" + sfx)
+ if not _exists(path_manager, imgdir):
+ raise ValueError(f"{imgdir} does not exist, returning")
+
+ imgfiles = [
+ _local_path(path_manager, os.path.join(imgdir, f))
+ for f in sorted(_ls(path_manager, imgdir))
+ if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
+ ]
+ if poses.shape[-1] != len(imgfiles):
+ raise ValueError(
+ "Mismatch between imgs {} and poses {} !!!!".format(
+ len(imgfiles), poses.shape[-1]
+ )
+ )
+
+ sh = imread(imgfiles[0]).shape
+ poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
+ poses[2, 4, :] = poses[2, 4, :] * 1.0 / factor
+
+ if not load_imgs:
+ return poses, bds
+
+ imgs = imgs = [imread(f)[..., :3] / 255.0 for f in imgfiles]
+ imgs = np.stack(imgs, -1)
+
+ logger.info(f"Loaded image data, shape {imgs.shape}")
+ return poses, bds, imgs
+
+
+def normalize(x):
+ denom = np.linalg.norm(x)
+ if denom < 0.001:
+ warnings.warn("unsafe normalize()")
+ return x / denom
+
+
+def viewmatrix(z, up, pos):
+ vec2 = normalize(z)
+ vec1_avg = up
+ vec0 = normalize(np.cross(vec1_avg, vec2))
+ vec1 = normalize(np.cross(vec2, vec0))
+ m = np.stack([vec0, vec1, vec2, pos], 1)
+ return m
+
+
+def ptstocam(pts, c2w):
+ tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]
+ return tt
+
+
+def poses_avg(poses):
+
+ hwf = poses[0, :3, -1:]
+
+ center = poses[:, :3, 3].mean(0)
+ vec2 = normalize(poses[:, :3, 2].sum(0))
+ up = poses[:, :3, 1].sum(0)
+ c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
+
+ return c2w
+
+
+def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
+ render_poses = []
+ rads = np.array(list(rads) + [1.0])
+ hwf = c2w[:, 4:5]
+
+ for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
+ c = np.dot(
+ c2w[:3, :4],
+ np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0])
+ * rads,
+ )
+ z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
+ render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
+ return render_poses
+
+
+def recenter_poses(poses):
+
+ poses_ = poses + 0
+ bottom = np.reshape([0, 0, 0, 1.0], [1, 4])
+ c2w = poses_avg(poses)
+ c2w = np.concatenate([c2w[:3, :4], bottom], -2)
+ bottom = np.tile(np.reshape(bottom, [1, 1, 4]), [poses.shape[0], 1, 1])
+ poses = np.concatenate([poses[:, :3, :4], bottom], -2)
+
+ poses = np.linalg.inv(c2w) @ poses
+ poses_[:, :3, :4] = poses[:, :3, :4]
+ poses = poses_
+ return poses
+
+
+def spherify_poses(poses, bds):
+ def add_row_to_homogenize_transform(p):
+ r"""Add the last row to homogenize 3 x 4 transformation matrices."""
+ return np.concatenate(
+ [p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
+ )
+
+ # p34_to_44 = lambda p: np.concatenate(
+ # [p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
+ # )
+
+ p34_to_44 = add_row_to_homogenize_transform
+
+ rays_d = poses[:, :3, 2:3]
+ rays_o = poses[:, :3, 3:4]
+
+ def min_line_dist(rays_o, rays_d):
+ A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
+ b_i = -A_i @ rays_o
+ pt_mindist = np.squeeze(
+ -np.linalg.inv((np.transpose(A_i, [0, 2, 1]) @ A_i).mean(0)) @ (b_i).mean(0)
+ )
+ return pt_mindist
+
+ pt_mindist = min_line_dist(rays_o, rays_d)
+
+ center = pt_mindist
+ up = (poses[:, :3, 3] - center).mean(0)
+
+ vec0 = normalize(up)
+ vec1 = normalize(np.cross([0.1, 0.2, 0.3], vec0))
+ vec2 = normalize(np.cross(vec0, vec1))
+ pos = center
+ c2w = np.stack([vec1, vec2, vec0, pos], 1)
+
+ poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
+
+ rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
+
+ sc = 1.0 / rad
+ poses_reset[:, :3, 3] *= sc
+ bds *= sc
+ rad *= sc
+
+ centroid = np.mean(poses_reset[:, :3, 3], 0)
+ zh = centroid[2]
+ radcircle = np.sqrt(rad**2 - zh**2)
+ new_poses = []
+
+ for th in np.linspace(0.0, 2.0 * np.pi, 120):
+
+ camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
+ up = np.array([0, 0, -1.0])
+
+ vec2 = normalize(camorigin)
+ vec0 = normalize(np.cross(vec2, up))
+ vec1 = normalize(np.cross(vec2, vec0))
+ pos = camorigin
+ p = np.stack([vec0, vec1, vec2, pos], 1)
+
+ new_poses.append(p)
+
+ new_poses = np.stack(new_poses, 0)
+
+ new_poses = np.concatenate(
+ [new_poses, np.broadcast_to(poses[0, :3, -1:], new_poses[:, :3, -1:].shape)], -1
+ )
+ poses_reset = np.concatenate(
+ [
+ poses_reset[:, :3, :4],
+ np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape),
+ ],
+ -1,
+ )
+
+ return poses_reset, new_poses, bds
+
+
+def _local_path(path_manager, path):
+ if path_manager is None:
+ return path
+ return path_manager.get_local_path(path)
+
+
+def _ls(path_manager, path):
+ if path_manager is None:
+ return os.listdir(path)
+ return path_manager.ls(path)
+
+
+def _exists(path_manager, path):
+ if path_manager is None:
+ return os.path.exists(path)
+ return path_manager.exists(path)
+
+
+def load_llff_data(
+ basedir,
+ factor=8,
+ recenter=True,
+ bd_factor=0.75,
+ spherify=False,
+ path_zflat=False,
+ path_manager=None,
+):
+
+ poses, bds, imgs = _load_data(
+ basedir, factor=factor, path_manager=path_manager
+ ) # factor=8 downsamples original imgs by 8x
+ logger.info(f"Loaded {basedir}, {bds.min()}, {bds.max()}")
+
+ # Correct rotation matrix ordering and move variable dim to axis 0
+ poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
+ poses = np.moveaxis(poses, -1, 0).astype(np.float32)
+ imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
+ images = imgs
+ bds = np.moveaxis(bds, -1, 0).astype(np.float32)
+
+ # Rescale if bd_factor is provided
+ sc = 1.0 if bd_factor is None else 1.0 / (bds.min() * bd_factor)
+ poses[:, :3, 3] *= sc
+ bds *= sc
+
+ if recenter:
+ poses = recenter_poses(poses)
+
+ if spherify:
+ poses, render_poses, bds = spherify_poses(poses, bds)
+
+ images = images.astype(np.float32)
+ poses = poses.astype(np.float32)
+
+ return images, poses, bds
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/scene_batch_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/scene_batch_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ce7e27176e0bfc9a47a305d025ad837aae66ab8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/scene_batch_sampler.py
@@ -0,0 +1,216 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import warnings
+from collections import Counter
+from dataclasses import dataclass, field
+from typing import Dict, Iterable, Iterator, List, Sequence, Tuple
+
+import numpy as np
+from torch.utils.data.sampler import Sampler
+
+from .dataset_base import DatasetBase
+
+
+@dataclass(eq=False) # TODO: do we need this if not init from config?
+class SceneBatchSampler(Sampler[List[int]]):
+ """
+ A class for sampling training batches with a controlled composition
+ of sequences.
+ """
+
+ dataset: DatasetBase
+ batch_size: int
+ num_batches: int
+ # the sampler first samples a random element k from this list and then
+ # takes k random frames per sequence
+ images_per_seq_options: Sequence[int]
+
+ # if True, will sample a contiguous interval of frames in the sequence
+ # it first finds the connected segments within the sequence of sufficient length,
+ # then samples a random pivot element among them and ideally uses it as a middle
+ # of the temporal window, shifting the borders where necessary.
+ # This strategy mitigates the bias against shorter segments and their boundaries.
+ sample_consecutive_frames: bool = False
+ # if a number > 0, then used to define the maximum difference in frame_number
+ # of neighbouring frames when forming connected segments; otherwise the whole
+ # sequence is considered a segment regardless of frame numbers
+ consecutive_frames_max_gap: int = 0
+ # same but for timestamps if they are available
+ consecutive_frames_max_gap_seconds: float = 0.1
+
+ # if True, the sampler first reads from the dataset the mapping between
+ # sequence names and their categories.
+ # During batch sampling, the sampler ensures uniform distribution over the categories
+ # of the sampled sequences.
+ category_aware: bool = True
+
+ seq_names: List[str] = field(init=False)
+
+ category_to_sequence_names: Dict[str, List[str]] = field(init=False)
+ categories: List[str] = field(init=False)
+
+ def __post_init__(self) -> None:
+ if self.batch_size <= 0:
+ raise ValueError(
+ "batch_size should be a positive integral value, "
+ f"but got batch_size={self.batch_size}"
+ )
+
+ if len(self.images_per_seq_options) < 1:
+ raise ValueError("n_per_seq_posibilities list cannot be empty")
+
+ self.seq_names = list(self.dataset.sequence_names())
+
+ if self.category_aware:
+ self.category_to_sequence_names = self.dataset.category_to_sequence_names()
+ self.categories = list(self.category_to_sequence_names.keys())
+
+ def __len__(self) -> int:
+ return self.num_batches
+
+ def __iter__(self) -> Iterator[List[int]]:
+ for batch_idx in range(len(self)):
+ batch = self._sample_batch(batch_idx)
+ yield batch
+
+ def _sample_batch(self, batch_idx) -> List[int]:
+ n_per_seq = np.random.choice(self.images_per_seq_options)
+ n_seqs = -(-self.batch_size // n_per_seq) # round up
+
+ if self.category_aware:
+ # first sample categories at random, these can be repeated in the batch
+ chosen_cat = _capped_random_choice(self.categories, n_seqs, replace=True)
+ # then randomly sample a set of unique sequences within each category
+ chosen_seq = []
+ for cat, n_per_category in Counter(chosen_cat).items():
+ category_chosen_seq = _capped_random_choice(
+ self.category_to_sequence_names[cat],
+ n_per_category,
+ replace=False,
+ )
+ chosen_seq.extend([str(s) for s in category_chosen_seq])
+ else:
+ chosen_seq = _capped_random_choice(
+ self.seq_names,
+ n_seqs,
+ replace=False,
+ )
+
+ if self.sample_consecutive_frames:
+ frame_idx = []
+ for seq in chosen_seq:
+ segment_index = self._build_segment_index(seq, n_per_seq)
+
+ segment, idx = segment_index[np.random.randint(len(segment_index))]
+ if len(segment) <= n_per_seq:
+ frame_idx.append(segment)
+ else:
+ start = np.clip(idx - n_per_seq // 2, 0, len(segment) - n_per_seq)
+ frame_idx.append(segment[start : start + n_per_seq])
+
+ else:
+ frame_idx = [
+ _capped_random_choice(
+ list(self.dataset.sequence_indices_in_order(seq)),
+ n_per_seq,
+ replace=False,
+ )
+ for seq in chosen_seq
+ ]
+ frame_idx = np.concatenate(frame_idx)[: self.batch_size].tolist()
+ if len(frame_idx) < self.batch_size:
+ warnings.warn(
+ "Batch size smaller than self.batch_size!"
+ + " (This is fine for experiments with a single scene and viewpooling)"
+ )
+ return frame_idx
+
+ def _build_segment_index(self, seq: str, size: int) -> List[Tuple[List[int], int]]:
+ """
+ Returns a list of (segment, index) tuples, one per eligible frame, where
+ segment is a list of frame indices in the contiguous segment the frame
+ belongs to index is the frame's index within that segment.
+ Segment references are repeated but the memory is shared.
+ """
+ if (
+ self.consecutive_frames_max_gap > 0
+ or self.consecutive_frames_max_gap_seconds > 0.0
+ ):
+ segments = self._split_to_segments(
+ self.dataset.sequence_frames_in_order(seq)
+ )
+ segments = _cull_short_segments(segments, size)
+ if not segments:
+ raise AssertionError("Empty segments after culling")
+ else:
+ segments = [list(self.dataset.sequence_indices_in_order(seq))]
+
+ # build an index of segment for random selection of a pivot frame
+ segment_index = [
+ (segment, i) for segment in segments for i in range(len(segment))
+ ]
+
+ return segment_index
+
+ def _split_to_segments(
+ self, sequence_timestamps: Iterable[Tuple[float, int, int]]
+ ) -> List[List[int]]:
+ if (
+ self.consecutive_frames_max_gap <= 0
+ and self.consecutive_frames_max_gap_seconds <= 0.0
+ ):
+ raise AssertionError("This function is only needed for non-trivial max_gap")
+
+ segments = []
+ last_no = -self.consecutive_frames_max_gap - 1 # will trigger a new segment
+ last_ts = -self.consecutive_frames_max_gap_seconds - 1.0
+ for ts, no, idx in sequence_timestamps:
+ if ts <= 0.0 and no <= last_no:
+ raise AssertionError(
+ "Sequence frames are not ordered while timestamps are not given"
+ )
+
+ if (
+ no - last_no > self.consecutive_frames_max_gap > 0
+ or ts - last_ts > self.consecutive_frames_max_gap_seconds > 0.0
+ ): # new group
+ segments.append([idx])
+ else:
+ segments[-1].append(idx)
+
+ last_no = no
+ last_ts = ts
+
+ return segments
+
+
+def _cull_short_segments(segments: List[List[int]], min_size: int) -> List[List[int]]:
+ lengths = [(len(segment), segment) for segment in segments]
+ max_len, longest_segment = max(lengths)
+
+ if max_len < min_size:
+ return [longest_segment]
+
+ return [segment for segment in segments if len(segment) >= min_size]
+
+
+def _capped_random_choice(x, size, replace: bool = True):
+ """
+ if replace==True
+ randomly chooses from x `size` elements without replacement if len(x)>size
+ else allows replacement and selects `size` elements again.
+ if replace==False
+ randomly chooses from x `min(len(x), size)` elements without replacement
+ """
+ len_x = x if isinstance(x, int) else len(x)
+ if replace:
+ return np.random.choice(x, size=size, replace=len_x < size)
+ else:
+ return np.random.choice(x, size=min(size, len_x), replace=False)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/single_sequence_dataset.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/single_sequence_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c79556edd64a68ab56e72b13d70849328046eb19
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/single_sequence_dataset.py
@@ -0,0 +1,210 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+# This file defines a base class for dataset map providers which
+# provide data for a single scene.
+
+from dataclasses import field
+from typing import Iterable, Iterator, List, Optional, Sequence, Tuple
+
+import numpy as np
+import torch
+from pytorch3d.implicitron.tools.config import (
+ Configurable,
+ expand_args_fields,
+ run_auto_creation,
+)
+from pytorch3d.renderer import CamerasBase, join_cameras_as_batch, PerspectiveCameras
+
+from .dataset_base import DatasetBase
+from .dataset_map_provider import DatasetMap, DatasetMapProviderBase, PathManagerFactory
+from .frame_data import FrameData
+from .utils import DATASET_TYPE_KNOWN, DATASET_TYPE_UNKNOWN
+
+_SINGLE_SEQUENCE_NAME: str = "one_sequence"
+
+
+@expand_args_fields
+class SingleSceneDataset(DatasetBase, Configurable):
+ """
+ A dataset from images from a single scene.
+ """
+
+ images: List[torch.Tensor] = field()
+ fg_probabilities: Optional[List[torch.Tensor]] = field()
+ poses: List[PerspectiveCameras] = field()
+ object_name: str = field()
+ frame_types: List[str] = field()
+ eval_batches: Optional[List[List[int]]] = field()
+
+ def sequence_names(self) -> Iterable[str]:
+ return [_SINGLE_SEQUENCE_NAME]
+
+ def __len__(self) -> int:
+ return len(self.poses)
+
+ def sequence_frames_in_order(
+ self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
+ ) -> Iterator[Tuple[float, int, int]]:
+ for i in range(len(self)):
+ if subset_filter is None or self.frame_types[i] in subset_filter:
+ yield 0.0, i, i
+
+ def __getitem__(self, index) -> FrameData:
+ if index >= len(self):
+ raise IndexError(f"index {index} out of range {len(self)}")
+ image = self.images[index]
+ pose = self.poses[index]
+ frame_type = self.frame_types[index]
+ fg_probability = (
+ None if self.fg_probabilities is None else self.fg_probabilities[index]
+ )
+
+ frame_data = FrameData(
+ frame_number=index,
+ sequence_name=_SINGLE_SEQUENCE_NAME,
+ sequence_category=self.object_name,
+ camera=pose,
+ # pyre-ignore
+ image_size_hw=torch.tensor(image.shape[1:], dtype=torch.long),
+ image_rgb=image,
+ fg_probability=fg_probability,
+ frame_type=frame_type,
+ )
+ return frame_data
+
+ def get_eval_batches(self) -> Optional[List[List[int]]]:
+ return self.eval_batches
+
+
+class SingleSceneDatasetMapProviderBase(DatasetMapProviderBase):
+ """
+ Base for provider of data for one scene from LLFF or blender datasets.
+
+ Members:
+ base_dir: directory holding the data for the scene.
+ object_name: The name of the scene (e.g. "lego"). This is just used as a label.
+ It will typically be equal to the name of the directory self.base_dir.
+ path_manager_factory: Creates path manager which may be used for
+ interpreting paths.
+ n_known_frames_for_test: If set, training frames are included in the val
+ and test datasets, and this many random training frames are added to
+ each test batch. If not set, test batches each contain just a single
+ testing frame.
+ """
+
+ # pyre-fixme[13]: Attribute `base_dir` is never initialized.
+ base_dir: str
+ # pyre-fixme[13]: Attribute `object_name` is never initialized.
+ object_name: str
+ # pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
+ path_manager_factory: PathManagerFactory
+ path_manager_factory_class_type: str = "PathManagerFactory"
+ n_known_frames_for_test: Optional[int] = None
+
+ def __post_init__(self) -> None:
+ run_auto_creation(self)
+ self._load_data()
+
+ def _load_data(self) -> None:
+ # This must be defined by each subclass,
+ # and should set the following on self.
+ # - poses: a list of length-1 camera objects
+ # - images: [N, 3, H, W] tensor of rgb images - floats in [0,1]
+ # - fg_probabilities: None or [N, 1, H, W] of floats in [0,1]
+ # - splits: List[List[int]] of indices for train/val/test subsets.
+ raise NotImplementedError()
+
+ def _get_dataset(
+ self, split_idx: int, frame_type: str, set_eval_batches: bool = False
+ ) -> SingleSceneDataset:
+ # pyre-ignore[16]
+ split = self.i_split[split_idx]
+ frame_types = [frame_type] * len(split)
+ fg_probabilities = (
+ None
+ # pyre-ignore[16]
+ if self.fg_probabilities is None
+ else self.fg_probabilities[split]
+ )
+ eval_batches = [[i] for i in range(len(split))]
+ if split_idx != 0 and self.n_known_frames_for_test is not None:
+ train_split = self.i_split[0]
+ if set_eval_batches:
+ generator = np.random.default_rng(seed=0)
+ for batch in eval_batches:
+ # using permutation so that changes to n_known_frames_for_test
+ # result in consistent batches.
+ to_add = generator.permutation(len(train_split))[
+ : self.n_known_frames_for_test
+ ]
+ batch.extend((to_add + len(split)).tolist())
+ split = np.concatenate([split, train_split])
+ frame_types.extend([DATASET_TYPE_KNOWN] * len(train_split))
+
+ # pyre-ignore[28]
+ return SingleSceneDataset(
+ object_name=self.object_name,
+ # pyre-ignore[16]
+ images=self.images[split],
+ fg_probabilities=fg_probabilities,
+ # pyre-ignore[16]
+ poses=[self.poses[i] for i in split],
+ frame_types=frame_types,
+ eval_batches=eval_batches if set_eval_batches else None,
+ )
+
+ def get_dataset_map(self) -> DatasetMap:
+ return DatasetMap(
+ train=self._get_dataset(0, DATASET_TYPE_KNOWN),
+ val=self._get_dataset(1, DATASET_TYPE_UNKNOWN),
+ test=self._get_dataset(2, DATASET_TYPE_UNKNOWN, True),
+ )
+
+ def get_all_train_cameras(self) -> Optional[CamerasBase]:
+ # pyre-ignore[16]
+ cameras = [self.poses[i] for i in self.i_split[0]]
+ return join_cameras_as_batch(cameras)
+
+
+def _interpret_blender_cameras(
+ poses: torch.Tensor, focal: float
+) -> List[PerspectiveCameras]:
+ """
+ Convert 4x4 matrices representing cameras in blender format
+ to PyTorch3D format.
+
+ Args:
+ poses: N x 3 x 4 camera matrices
+ focal: ndc space focal length
+ """
+ pose_target_cameras = []
+ for pose_target in poses:
+ pose_target = pose_target[:3, :4]
+ mtx = torch.eye(4, dtype=pose_target.dtype)
+ mtx[:3, :3] = pose_target[:3, :3].t()
+ mtx[3, :3] = pose_target[:, 3]
+ mtx = mtx.inverse()
+
+ # flip the XZ coordinates.
+ mtx[:, [0, 2]] *= -1.0
+
+ Rpt3, Tpt3 = mtx[:, :3].split([3, 1], dim=0)
+
+ focal_length_pt3 = torch.FloatTensor([[focal, focal]])
+ principal_point_pt3 = torch.FloatTensor([[0.0, 0.0]])
+
+ cameras = PerspectiveCameras(
+ focal_length=focal_length_pt3,
+ principal_point=principal_point_pt3,
+ R=Rpt3[None],
+ T=Tpt3,
+ )
+ pose_target_cameras.append(cameras)
+ return pose_target_cameras
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/train_eval_data_loader_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/train_eval_data_loader_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7c0c39a4c0ea852fae3e634ccc7a6a40e2ae766
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/train_eval_data_loader_provider.py
@@ -0,0 +1,191 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import logging
+from typing import Any, Dict, Optional, Tuple
+
+from pytorch3d.implicitron.dataset.data_loader_map_provider import (
+ DataLoaderMap,
+ SceneBatchSampler,
+ SequenceDataLoaderMapProvider,
+)
+from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
+from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.tools.config import registry, run_auto_creation
+
+from torch.utils.data import DataLoader
+
+logger = logging.getLogger(__name__)
+
+
+# TODO: we can merge it with SequenceDataLoaderMapProvider in PyTorch3D
+# and support both eval_batches protocols
+@registry.register
+class TrainEvalDataLoaderMapProvider(SequenceDataLoaderMapProvider):
+ """
+ Implementation of DataLoaderMapProviderBase that may use internal eval batches for
+ the test dataset. In particular, if `eval_batches_relpath` is set, it loads
+ eval batches from that json file, otherwise test set is treated in the same way as
+ train and val, i.e. the parameters `dataset_length_test` and `test_conditioning_type`
+ are respected.
+
+ If conditioning is not required, then the batch size should
+ be set as 1, and most of the fields do not matter.
+
+ If conditioning is required, each batch will contain one main
+ frame first to predict and the, rest of the elements are for
+ conditioning.
+
+ If images_per_seq_options is left empty, the conditioning
+ frames are picked according to the conditioning type given.
+ This does not have regard to the order of frames in a
+ scene, or which frames belong to what scene.
+
+ If images_per_seq_options is given, then the conditioning types
+ must be SAME and the remaining fields are used.
+
+ Members:
+ batch_size: The size of the batch of the data loader.
+ num_workers: Number of data-loading threads in each data loader.
+ dataset_length_train: The number of batches in a training epoch. Or 0 to mean
+ an epoch is the length of the training set.
+ dataset_length_val: The number of batches in a validation epoch. Or 0 to mean
+ an epoch is the length of the validation set.
+ dataset_length_test: used if test_dataset.eval_batches is NOT set. The number of
+ batches in a testing epoch. Or 0 to mean an epoch is the length of the test
+ set.
+ images_per_seq_options: Possible numbers of frames sampled per sequence in a batch.
+ If a conditioning_type is KNOWN or TRAIN, then this must be left at its initial
+ value. Empty (the default) means that we are not careful about which frames
+ come from which scene.
+ sample_consecutive_frames: if True, will sample a contiguous interval of frames
+ in the sequence. It first sorts the frames by timestimps when available,
+ otherwise by frame numbers, finds the connected segments within the sequence
+ of sufficient length, then samples a random pivot element among them and
+ ideally uses it as a middle of the temporal window, shifting the borders
+ where necessary. This strategy mitigates the bias against shorter segments
+ and their boundaries.
+ consecutive_frames_max_gap: if a number > 0, then used to define the maximum
+ difference in frame_number of neighbouring frames when forming connected
+ segments; if both this and consecutive_frames_max_gap_seconds are 0s,
+ the whole sequence is considered a segment regardless of frame numbers.
+ consecutive_frames_max_gap_seconds: if a number > 0.0, then used to define the
+ maximum difference in frame_timestamp of neighbouring frames when forming
+ connected segments; if both this and consecutive_frames_max_gap are 0s,
+ the whole sequence is considered a segment regardless of frame timestamps.
+ """
+
+ batch_size: int = 1
+ num_workers: int = 0
+
+ dataset_length_train: int = 0
+ dataset_length_val: int = 0
+ dataset_length_test: int = 0
+
+ images_per_seq_options: Tuple[int, ...] = ()
+ sample_consecutive_frames: bool = False
+ consecutive_frames_max_gap: int = 0
+ consecutive_frames_max_gap_seconds: float = 0.1
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def get_data_loader_map(self, datasets: DatasetMap) -> DataLoaderMap:
+ """
+ Returns a collection of data loaders for a given collection of datasets.
+ """
+ train = self._make_generic_data_loader(
+ datasets.train,
+ self.dataset_length_train,
+ datasets.train,
+ )
+
+ val = self._make_generic_data_loader(
+ datasets.val,
+ self.dataset_length_val,
+ datasets.train,
+ )
+
+ if datasets.test is not None and datasets.test.get_eval_batches() is not None:
+ test = self._make_eval_data_loader(datasets.test)
+ else:
+ test = self._make_generic_data_loader(
+ datasets.test,
+ self.dataset_length_test,
+ datasets.train,
+ )
+
+ return DataLoaderMap(train=train, val=val, test=test)
+
+ def _make_eval_data_loader(
+ self,
+ dataset: Optional[DatasetBase],
+ ) -> Optional[DataLoader[FrameData]]:
+ if dataset is None:
+ return None
+
+ return DataLoader(
+ dataset,
+ batch_sampler=dataset.get_eval_batches(),
+ **self._get_data_loader_common_kwargs(dataset),
+ )
+
+ def _make_generic_data_loader(
+ self,
+ dataset: Optional[DatasetBase],
+ num_batches: int,
+ train_dataset: Optional[DatasetBase],
+ ) -> Optional[DataLoader[FrameData]]:
+ """
+ Returns the dataloader for a dataset.
+
+ Args:
+ dataset: the dataset
+ num_batches: possible ceiling on number of batches per epoch
+ train_dataset: the training dataset, used if conditioning_type==TRAIN
+ conditioning_type: source for padding of batches
+ """
+ if dataset is None:
+ return None
+
+ data_loader_kwargs = self._get_data_loader_common_kwargs(dataset)
+
+ if len(self.images_per_seq_options) > 0:
+ # this is a typical few-view setup
+ # conditioning comes from the same subset since subsets are split by seqs
+ batch_sampler = SceneBatchSampler(
+ dataset,
+ self.batch_size,
+ num_batches=len(dataset) if num_batches <= 0 else num_batches,
+ images_per_seq_options=self.images_per_seq_options,
+ sample_consecutive_frames=self.sample_consecutive_frames,
+ consecutive_frames_max_gap=self.consecutive_frames_max_gap,
+ consecutive_frames_max_gap_seconds=self.consecutive_frames_max_gap_seconds,
+ )
+ return DataLoader(
+ dataset,
+ batch_sampler=batch_sampler,
+ **data_loader_kwargs,
+ )
+
+ if self.batch_size == 1:
+ # this is a typical many-view setup (without conditioning)
+ return self._simple_loader(dataset, num_batches, data_loader_kwargs)
+
+ # edge case: conditioning on train subset, typical for Nerformer-like many-view
+ # there is only one sequence in all datasets, so we condition on another subset
+ return self._train_loader(
+ dataset, train_dataset, num_batches, data_loader_kwargs
+ )
+
+ def _get_data_loader_common_kwargs(self, dataset: DatasetBase) -> Dict[str, Any]:
+ return {
+ "num_workers": self.num_workers,
+ "collate_fn": dataset.frame_data_type.collate,
+ }
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2dc59bbe22dceb44dd383462174fee3927a06c0
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/utils.py
@@ -0,0 +1,383 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import functools
+import warnings
+from pathlib import Path
+from typing import List, Optional, Tuple, TypeVar, Union
+
+import numpy as np
+import torch
+from PIL import Image
+
+from pytorch3d.io import IO
+from pytorch3d.renderer.cameras import PerspectiveCameras
+from pytorch3d.structures.pointclouds import Pointclouds
+
+DATASET_TYPE_TRAIN = "train"
+DATASET_TYPE_TEST = "test"
+DATASET_TYPE_KNOWN = "known"
+DATASET_TYPE_UNKNOWN = "unseen"
+
+
+class GenericWorkaround:
+ """
+ OmegaConf.structured has a weirdness when you try to apply
+ it to a dataclass whose first base class is a Generic which is not
+ Dict. The issue is with a function called get_dict_key_value_types
+ in omegaconf/_utils.py.
+ For example this fails:
+
+ @dataclass(eq=False)
+ class D(torch.utils.data.Dataset[int]):
+ a: int = 3
+
+ OmegaConf.structured(D)
+
+ We avoid the problem by adding this class as an extra base class.
+ """
+
+ pass
+
+
+def is_known_frame_scalar(frame_type: str) -> bool:
+ """
+ Given a single frame type corresponding to a single frame, return whether
+ the frame is a known frame.
+ """
+ return frame_type.endswith(DATASET_TYPE_KNOWN)
+
+
+def is_known_frame(
+ frame_type: List[str], device: Optional[str] = None
+) -> torch.BoolTensor:
+ """
+ Given a list `frame_type` of frame types in a batch, return a tensor
+ of boolean flags expressing whether the corresponding frame is a known frame.
+ """
+ # pyre-fixme[7]: Expected `BoolTensor` but got `Tensor`.
+ return torch.tensor(
+ [is_known_frame_scalar(ft) for ft in frame_type],
+ dtype=torch.bool,
+ device=device,
+ )
+
+
+def is_train_frame(
+ frame_type: List[str], device: Optional[str] = None
+) -> torch.BoolTensor:
+ """
+ Given a list `frame_type` of frame types in a batch, return a tensor
+ of boolean flags expressing whether the corresponding frame is a training frame.
+ """
+ # pyre-fixme[7]: Expected `BoolTensor` but got `Tensor`.
+ return torch.tensor(
+ [ft.startswith(DATASET_TYPE_TRAIN) for ft in frame_type],
+ dtype=torch.bool,
+ device=device,
+ )
+
+
+def get_bbox_from_mask(
+ mask: np.ndarray, thr: float, decrease_quant: float = 0.05
+) -> Tuple[int, int, int, int]:
+ # bbox in xywh
+ masks_for_box = np.zeros_like(mask)
+ while masks_for_box.sum() <= 1.0:
+ masks_for_box = (mask > thr).astype(np.float32)
+ thr -= decrease_quant
+ if thr <= 0.0:
+ warnings.warn(
+ f"Empty masks_for_bbox (thr={thr}) => using full image.", stacklevel=1
+ )
+
+ x0, x1 = get_1d_bounds(masks_for_box.sum(axis=-2))
+ y0, y1 = get_1d_bounds(masks_for_box.sum(axis=-1))
+
+ return x0, y0, x1 - x0, y1 - y0
+
+
+def crop_around_box(
+ tensor: torch.Tensor, bbox: torch.Tensor, impath: str = ""
+) -> torch.Tensor:
+ # bbox is xyxy, where the upper bound is corrected with +1
+ bbox = clamp_box_to_image_bounds_and_round(
+ bbox,
+ image_size_hw=tuple(tensor.shape[-2:]),
+ )
+ tensor = tensor[..., bbox[1] : bbox[3], bbox[0] : bbox[2]]
+ assert all(c > 0 for c in tensor.shape), f"squashed image {impath}"
+ return tensor
+
+
+def clamp_box_to_image_bounds_and_round(
+ bbox_xyxy: torch.Tensor,
+ image_size_hw: Tuple[int, int],
+) -> torch.LongTensor:
+ bbox_xyxy = bbox_xyxy.clone()
+ bbox_xyxy[[0, 2]] = torch.clamp(bbox_xyxy[[0, 2]], 0, image_size_hw[-1])
+ bbox_xyxy[[1, 3]] = torch.clamp(bbox_xyxy[[1, 3]], 0, image_size_hw[-2])
+ if not isinstance(bbox_xyxy, torch.LongTensor):
+ bbox_xyxy = bbox_xyxy.round().long()
+ return bbox_xyxy # pyre-ignore [7]
+
+
+T = TypeVar("T", bound=torch.Tensor)
+
+
+def bbox_xyxy_to_xywh(xyxy: T) -> T:
+ wh = xyxy[2:] - xyxy[:2]
+ xywh = torch.cat([xyxy[:2], wh])
+ return xywh # pyre-ignore
+
+
+def get_clamp_bbox(
+ bbox: torch.Tensor,
+ box_crop_context: float = 0.0,
+ image_path: str = "",
+) -> torch.Tensor:
+ # box_crop_context: rate of expansion for bbox
+ # returns possibly expanded bbox xyxy as float
+
+ bbox = bbox.clone() # do not edit bbox in place
+
+ # increase box size
+ if box_crop_context > 0.0:
+ c = box_crop_context
+ bbox = bbox.float()
+ bbox[0] -= bbox[2] * c / 2
+ bbox[1] -= bbox[3] * c / 2
+ bbox[2] += bbox[2] * c
+ bbox[3] += bbox[3] * c
+
+ if (bbox[2:] <= 1.0).any():
+ raise ValueError(
+ f"squashed image {image_path}!! The bounding box contains no pixels."
+ )
+
+ bbox[2:] = torch.clamp(bbox[2:], 2) # set min height, width to 2 along both axes
+ bbox_xyxy = bbox_xywh_to_xyxy(bbox, clamp_size=2)
+
+ return bbox_xyxy
+
+
+def rescale_bbox(
+ bbox: torch.Tensor,
+ orig_res: Union[Tuple[int, int], torch.LongTensor],
+ new_res: Union[Tuple[int, int], torch.LongTensor],
+) -> torch.Tensor:
+ assert bbox is not None
+ assert np.prod(orig_res) > 1e-8
+ # average ratio of dimensions
+ # pyre-ignore
+ rel_size = (new_res[0] / orig_res[0] + new_res[1] / orig_res[1]) / 2.0
+ return bbox * rel_size
+
+
+def bbox_xywh_to_xyxy(
+ xywh: torch.Tensor, clamp_size: Optional[int] = None
+) -> torch.Tensor:
+ xyxy = xywh.clone()
+ if clamp_size is not None:
+ xyxy[2:] = torch.clamp(xyxy[2:], clamp_size)
+ xyxy[2:] += xyxy[:2]
+ return xyxy
+
+
+def get_1d_bounds(arr: np.ndarray) -> Tuple[int, int]:
+ nz = np.flatnonzero(arr)
+ return nz[0], nz[-1] + 1
+
+
+def resize_image(
+ image: Union[np.ndarray, torch.Tensor],
+ image_height: Optional[int],
+ image_width: Optional[int],
+ mode: str = "bilinear",
+) -> Tuple[torch.Tensor, float, torch.Tensor]:
+
+ if isinstance(image, np.ndarray):
+ image = torch.from_numpy(image)
+
+ if image_height is None or image_width is None:
+ # skip the resizing
+ return image, 1.0, torch.ones_like(image[:1])
+ # takes numpy array or tensor, returns pytorch tensor
+ minscale = min(
+ image_height / image.shape[-2],
+ image_width / image.shape[-1],
+ )
+ imre = torch.nn.functional.interpolate(
+ image[None],
+ scale_factor=minscale,
+ mode=mode,
+ align_corners=False if mode == "bilinear" else None,
+ recompute_scale_factor=True,
+ )[0]
+ imre_ = torch.zeros(image.shape[0], image_height, image_width)
+ imre_[:, 0 : imre.shape[1], 0 : imre.shape[2]] = imre
+ mask = torch.zeros(1, image_height, image_width)
+ mask[:, 0 : imre.shape[1], 0 : imre.shape[2]] = 1.0
+ return imre_, minscale, mask
+
+
+def transpose_normalize_image(image: np.ndarray) -> np.ndarray:
+ im = np.atleast_3d(image).transpose((2, 0, 1))
+ return im.astype(np.float32) / 255.0
+
+
+def load_image(path: str) -> np.ndarray:
+ with Image.open(path) as pil_im:
+ im = np.array(pil_im.convert("RGB"))
+
+ return transpose_normalize_image(im)
+
+
+def load_mask(path: str) -> np.ndarray:
+ with Image.open(path) as pil_im:
+ mask = np.array(pil_im)
+
+ return transpose_normalize_image(mask)
+
+
+def load_depth(path: str, scale_adjustment: float) -> np.ndarray:
+ if path.lower().endswith(".exr"):
+ # NOTE: environment variable OPENCV_IO_ENABLE_OPENEXR must be set to 1
+ # You will have to accept these vulnerabilities by using OpenEXR:
+ # https://github.com/opencv/opencv/issues/21326
+ import cv2
+
+ d = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)[..., 0]
+ d[d > 1e9] = 0.0
+ elif path.lower().endswith(".png"):
+ d = load_16big_png_depth(path)
+ else:
+ raise ValueError('unsupported depth file name "%s"' % path)
+
+ d = d * scale_adjustment
+
+ d[~np.isfinite(d)] = 0.0
+ return d[None] # fake feature channel
+
+
+def load_16big_png_depth(depth_png: str) -> np.ndarray:
+ with Image.open(depth_png) as depth_pil:
+ # the image is stored with 16-bit depth but PIL reads it as I (32 bit).
+ # we cast it to uint16, then reinterpret as float16, then cast to float32
+ depth = (
+ np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
+ .astype(np.float32)
+ .reshape((depth_pil.size[1], depth_pil.size[0]))
+ )
+ return depth
+
+
+def load_1bit_png_mask(file: str) -> np.ndarray:
+ with Image.open(file) as pil_im:
+ mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
+ return mask
+
+
+def load_depth_mask(path: str) -> np.ndarray:
+ if not path.lower().endswith(".png"):
+ raise ValueError('unsupported depth mask file name "%s"' % path)
+ m = load_1bit_png_mask(path)
+ return m[None] # fake feature channel
+
+
+def safe_as_tensor(data, dtype):
+ return torch.tensor(data, dtype=dtype) if data is not None else None
+
+
+def _convert_ndc_to_pixels(
+ focal_length: torch.Tensor,
+ principal_point: torch.Tensor,
+ image_size_wh: torch.Tensor,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ half_image_size = image_size_wh / 2
+ rescale = half_image_size.min()
+ principal_point_px = half_image_size - principal_point * rescale
+ focal_length_px = focal_length * rescale
+ return focal_length_px, principal_point_px
+
+
+def _convert_pixels_to_ndc(
+ focal_length_px: torch.Tensor,
+ principal_point_px: torch.Tensor,
+ image_size_wh: torch.Tensor,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ half_image_size = image_size_wh / 2
+ rescale = half_image_size.min()
+ principal_point = (half_image_size - principal_point_px) / rescale
+ focal_length = focal_length_px / rescale
+ return focal_length, principal_point
+
+
+def adjust_camera_to_bbox_crop_(
+ camera: PerspectiveCameras,
+ image_size_wh: torch.Tensor,
+ clamp_bbox_xywh: torch.Tensor,
+) -> None:
+ if len(camera) != 1:
+ raise ValueError("Adjusting currently works with singleton cameras camera only")
+
+ focal_length_px, principal_point_px = _convert_ndc_to_pixels(
+ camera.focal_length[0],
+ camera.principal_point[0],
+ image_size_wh,
+ )
+ principal_point_px_cropped = principal_point_px - clamp_bbox_xywh[:2]
+
+ focal_length, principal_point_cropped = _convert_pixels_to_ndc(
+ focal_length_px,
+ principal_point_px_cropped,
+ clamp_bbox_xywh[2:],
+ )
+
+ camera.focal_length = focal_length[None]
+ camera.principal_point = principal_point_cropped[None]
+
+
+def adjust_camera_to_image_scale_(
+ camera: PerspectiveCameras,
+ original_size_wh: torch.Tensor,
+ new_size_wh: torch.LongTensor,
+ # pyre-fixme[7]: Expected `PerspectiveCameras` but got implicit return value of `None`.
+) -> PerspectiveCameras:
+ focal_length_px, principal_point_px = _convert_ndc_to_pixels(
+ camera.focal_length[0],
+ camera.principal_point[0],
+ original_size_wh,
+ )
+
+ # now scale and convert from pixels to NDC
+ image_size_wh_output = new_size_wh.float()
+ scale = (image_size_wh_output / original_size_wh).min(dim=-1, keepdim=True).values
+ focal_length_px_scaled = focal_length_px * scale
+ principal_point_px_scaled = principal_point_px * scale
+
+ focal_length_scaled, principal_point_scaled = _convert_pixels_to_ndc(
+ focal_length_px_scaled,
+ principal_point_px_scaled,
+ image_size_wh_output,
+ )
+ camera.focal_length = focal_length_scaled[None]
+ camera.principal_point = principal_point_scaled[None]
+
+
+# NOTE this cache is per-worker; they are implemented as processes.
+# each batch is loaded and collated by a single worker;
+# since sequences tend to co-occur within batches, this is useful.
+@functools.lru_cache(maxsize=256)
+def load_pointcloud(pcl_path: Union[str, Path], max_points: int = 0) -> Pointclouds:
+ pcl = IO().load_pointcloud(pcl_path)
+ if max_points > 0:
+ pcl = pcl.subsample(max_points)
+
+ return pcl
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py
new file mode 100644
index 0000000000000000000000000000000000000000..05b893d7030719e344764a6b7f3f94c6bd2e0b09
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py
@@ -0,0 +1,598 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import copy
+import warnings
+from collections import OrderedDict
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.utils import is_train_frame
+from pytorch3d.implicitron.models.base_model import ImplicitronRender
+from pytorch3d.implicitron.tools import vis_utils
+from pytorch3d.implicitron.tools.image_utils import mask_background
+from pytorch3d.implicitron.tools.metric_utils import calc_psnr, eval_depth, iou, rgb_l1
+from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud
+from pytorch3d.implicitron.tools.vis_utils import make_depth_image
+from pytorch3d.renderer.cameras import PerspectiveCameras
+from pytorch3d.vis.plotly_vis import plot_scene
+from tabulate import tabulate
+
+if TYPE_CHECKING:
+ from visdom import Visdom
+
+
+EVAL_N_SRC_VIEWS = [1, 3, 5, 7, 9]
+
+
+@dataclass
+class _Visualizer:
+ image_render: torch.Tensor
+ image_rgb_masked: torch.Tensor
+ depth_render: torch.Tensor
+ depth_map: Optional[torch.Tensor]
+ depth_mask: Optional[torch.Tensor]
+
+ visdom_env: str = "eval_debug"
+
+ _viz: Optional["Visdom"] = field(init=False)
+
+ def __post_init__(self):
+ self._viz = vis_utils.get_visdom_connection()
+
+ def show_rgb(
+ self, loss_value: float, metric_name: str, loss_mask_now: torch.Tensor
+ ):
+ if self._viz is None:
+ return
+ self._viz.images(
+ torch.cat(
+ (
+ self.image_render,
+ self.image_rgb_masked,
+ loss_mask_now.repeat(1, 3, 1, 1),
+ ),
+ dim=3,
+ ),
+ env=self.visdom_env,
+ win=metric_name,
+ opts={"title": f"{metric_name}_{loss_value:1.2f}"},
+ )
+
+ def show_depth(
+ self, depth_loss: float, name_postfix: str, loss_mask_now: torch.Tensor
+ ):
+ if self._viz is None:
+ return
+ viz = self._viz
+ viz.images(
+ torch.cat(
+ (make_depth_image(self.depth_render, loss_mask_now),)
+ + (
+ (make_depth_image(self.depth_map, loss_mask_now),)
+ if self.depth_map is not None
+ else ()
+ ),
+ dim=3,
+ ),
+ env=self.visdom_env,
+ win="depth_abs" + name_postfix,
+ opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}"},
+ )
+ viz.images(
+ loss_mask_now,
+ env=self.visdom_env,
+ win="depth_abs" + name_postfix + "_mask",
+ opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_mask"},
+ )
+ if self.depth_mask is not None:
+ viz.images(
+ self.depth_mask,
+ env=self.visdom_env,
+ win="depth_abs" + name_postfix + "_maskd",
+ opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_maskd"},
+ )
+
+ # show the 3D plot
+ # pyre-fixme[9]: viewpoint_trivial has type `PerspectiveCameras`; used as
+ # `TensorProperties`.
+ viewpoint_trivial: PerspectiveCameras = PerspectiveCameras().to(
+ loss_mask_now.device
+ )
+ _pcls = {
+ "pred_depth": get_rgbd_point_cloud(
+ viewpoint_trivial,
+ self.image_render,
+ self.depth_render,
+ # mask_crop,
+ torch.ones_like(self.depth_render),
+ # loss_mask_now,
+ )
+ }
+ if self.depth_map is not None:
+ _pcls["gt_depth"] = get_rgbd_point_cloud(
+ viewpoint_trivial,
+ self.image_rgb_masked,
+ self.depth_map,
+ # mask_crop,
+ torch.ones_like(self.depth_map),
+ # loss_mask_now,
+ )
+
+ _pcls = {pn: p for pn, p in _pcls.items() if int(p.num_points_per_cloud()) > 0}
+
+ plotlyplot = plot_scene(
+ {f"pcl{name_postfix}": _pcls}, # pyre-ignore
+ camera_scale=1.0,
+ pointcloud_max_points=10000,
+ pointcloud_marker_size=1,
+ )
+ viz.plotlyplot(
+ plotlyplot,
+ env=self.visdom_env,
+ win=f"pcl{name_postfix}",
+ )
+
+
+def eval_batch(
+ frame_data: FrameData,
+ implicitron_render: ImplicitronRender,
+ bg_color: Union[torch.Tensor, Sequence, str, float] = "black",
+ mask_thr: float = 0.5,
+ lpips_model=None,
+ visualize: bool = False,
+ visualize_visdom_env: str = "eval_debug",
+ break_after_visualising: bool = True,
+) -> Dict[str, Any]:
+ """
+ Produce performance metrics for a single batch of new-view synthesis
+ predictions.
+
+ Given a set of known views (for which frame_data.frame_type.endswith('known')
+ is True), a new-view synthesis method (NVS) is tasked to generate new views
+ of the scene from the viewpoint of the target views (for which
+ frame_data.frame_type.endswith('known') is False). The resulting
+ synthesized new views, stored in `implicitron_render`, are compared to the
+ target ground truth in `frame_data` in terms of geometry and appearance
+ resulting in a dictionary of metrics returned by the `eval_batch` function.
+
+ Args:
+ frame_data: A FrameData object containing the input to the new view
+ synthesis method.
+ implicitron_render: The data describing the synthesized new views.
+ bg_color: The background color of the generated new views and the
+ ground truth.
+ lpips_model: A pre-trained model for evaluating the LPIPS metric.
+ visualize: If True, visualizes the results to Visdom.
+
+ Returns:
+ results: A dictionary holding evaluation metrics.
+
+ Throws:
+ ValueError if frame_data does not have frame_type, camera, or image_rgb
+ ValueError if the batch has a mix of training and test samples
+ ValueError if the batch frames are not [unseen, known, known, ...]
+ ValueError if one of the required fields in implicitron_render is missing
+ """
+ frame_type = frame_data.frame_type
+ if frame_type is None:
+ raise ValueError("Frame type has not been set.")
+
+ # we check that all those fields are not None but Pyre can't infer that properly
+ # TODO: assign to local variables and simplify the code.
+ if frame_data.image_rgb is None:
+ raise ValueError("Image is not in the evaluation batch.")
+
+ if frame_data.camera is None:
+ raise ValueError("Camera is not in the evaluation batch.")
+
+ # eval all results in the resolution of the frame_data image
+ image_resol = tuple(frame_data.image_rgb.shape[2:])
+
+ # Post-process the render:
+ # 1) check implicitron_render for Nones,
+ # 2) obtain copies to make sure we dont edit the original data,
+ # 3) take only the 1st (target) image
+ # 4) resize to match ground-truth resolution
+ cloned_render: Dict[str, torch.Tensor] = {}
+ for k in ["mask_render", "image_render", "depth_render"]:
+ field = getattr(implicitron_render, k)
+ if field is None:
+ raise ValueError(f"A required predicted field {k} is missing")
+
+ imode = "bilinear" if k == "image_render" else "nearest"
+ cloned_render[k] = (
+ F.interpolate(field[:1], size=image_resol, mode=imode).detach().clone()
+ )
+
+ frame_data = copy.deepcopy(frame_data)
+
+ # mask the ground truth depth in case frame_data contains the depth mask
+ if frame_data.depth_map is not None and frame_data.depth_mask is not None:
+ frame_data.depth_map *= frame_data.depth_mask
+
+ if not isinstance(frame_type, list): # not batch FrameData
+ frame_type = [frame_type]
+
+ is_train = is_train_frame(frame_type)
+ if len(is_train) > 1 and (is_train[1] != is_train[1:]).any():
+ raise ValueError(
+ "All (conditioning) frames in the eval batch have to be either train/test."
+ )
+
+ for k in [
+ "depth_map",
+ "image_rgb",
+ "fg_probability",
+ "mask_crop",
+ ]:
+ if not hasattr(frame_data, k) or getattr(frame_data, k) is None:
+ continue
+ setattr(frame_data, k, getattr(frame_data, k)[:1])
+
+ if frame_data.depth_map is None or frame_data.depth_map.sum() <= 0:
+ warnings.warn("Empty or missing depth map in evaluation!")
+
+ if frame_data.mask_crop is None:
+ warnings.warn("mask_crop is None, assuming the whole image is valid.")
+
+ if frame_data.fg_probability is None:
+ warnings.warn("fg_probability is None, assuming the whole image is fg.")
+
+ # threshold the masks to make ground truth binary masks
+ mask_fg = (
+ frame_data.fg_probability >= mask_thr
+ if frame_data.fg_probability is not None
+ # pyre-ignore [16]
+ else torch.ones_like(frame_data.image_rgb[:, :1, ...]).bool()
+ )
+
+ mask_crop = (
+ frame_data.mask_crop
+ if frame_data.mask_crop is not None
+ else torch.ones_like(mask_fg)
+ )
+
+ # unmasked g.t. image
+ image_rgb = frame_data.image_rgb
+
+ # fg-masked g.t. image
+ image_rgb_masked = mask_background(
+ # pyre-fixme[6]: Expected `Tensor` for 1st param but got
+ # `Optional[torch.Tensor]`.
+ frame_data.image_rgb,
+ mask_fg,
+ bg_color=bg_color,
+ )
+
+ # clamp predicted images
+ image_render = cloned_render["image_render"].clamp(0.0, 1.0)
+
+ if visualize:
+ visualizer = _Visualizer(
+ image_render=image_render,
+ image_rgb_masked=image_rgb_masked,
+ depth_render=cloned_render["depth_render"],
+ depth_map=frame_data.depth_map,
+ depth_mask=(
+ frame_data.depth_mask[:1] if frame_data.depth_mask is not None else None
+ ),
+ visdom_env=visualize_visdom_env,
+ )
+
+ results: Dict[str, Any] = {}
+
+ results["iou"] = iou(
+ cloned_render["mask_render"],
+ mask_fg,
+ mask=mask_crop,
+ )
+
+ for loss_fg_mask, name_postfix in zip((mask_crop, mask_fg), ("_masked", "_fg")):
+
+ loss_mask_now = mask_crop * loss_fg_mask
+
+ for rgb_metric_name, rgb_metric_fun in zip(
+ ("psnr", "rgb_l1"), (calc_psnr, rgb_l1)
+ ):
+ metric_name = rgb_metric_name + name_postfix
+ results[metric_name] = rgb_metric_fun(
+ image_render,
+ image_rgb_masked,
+ mask=loss_mask_now,
+ )
+
+ if visualize:
+ visualizer.show_rgb(
+ results[metric_name].item(), metric_name, loss_mask_now
+ )
+
+ if name_postfix == "_fg" and frame_data.depth_map is not None:
+ # only record depth metrics for the foreground
+ _, abs_ = eval_depth(
+ cloned_render["depth_render"],
+ # pyre-fixme[6]: For 2nd param expected `Tensor` but got
+ # `Optional[Tensor]`.
+ frame_data.depth_map,
+ get_best_scale=True,
+ mask=loss_mask_now,
+ crop=5,
+ )
+ results["depth_abs" + name_postfix] = abs_.mean()
+
+ if visualize:
+ visualizer.show_depth(abs_.mean().item(), name_postfix, loss_mask_now)
+ if break_after_visualising:
+ breakpoint() # noqa: B601
+
+ # add the rgb metrics between the render and the unmasked image
+ for rgb_metric_name, rgb_metric_fun in zip(
+ ("psnr_full_image", "rgb_l1_full_image"), (calc_psnr, rgb_l1)
+ ):
+ results[rgb_metric_name] = rgb_metric_fun(
+ image_render,
+ # pyre-fixme[6]: For 2nd argument expected `Tensor` but got
+ # `Optional[Tensor]`.
+ image_rgb,
+ mask=mask_crop,
+ )
+
+ if lpips_model is not None:
+ for gt_image_type in ("_full_image", "_masked"):
+ im1, im2 = [
+ 2.0 * im.clamp(0.0, 1.0) - 1.0 # pyre-ignore[16]
+ for im in (
+ image_rgb_masked if gt_image_type == "_masked" else image_rgb,
+ cloned_render["image_render"],
+ )
+ ]
+ results["lpips" + gt_image_type] = lpips_model.forward(im1, im2).item()
+
+ # convert all metrics to floats
+ results = {k: float(v) for k, v in results.items()}
+
+ results["meta"] = {
+ # store the size of the batch (corresponds to n_src_views+1)
+ "batch_size": len(frame_type),
+ # store the type of the target frame
+ # pyre-fixme[16]: `None` has no attribute `__getitem__`.
+ "frame_type": str(frame_data.frame_type[0]),
+ }
+
+ return results
+
+
+def average_per_batch_results(
+ results_per_batch: List[Dict[str, Any]],
+ idx: Optional[torch.Tensor] = None,
+) -> dict:
+ """
+ Average a list of per-batch metrics `results_per_batch`.
+ Optionally, if `idx` is given, only a subset of the per-batch
+ metrics, indexed by `idx`, is averaged.
+ """
+ result_keys = list(results_per_batch[0].keys())
+ result_keys.remove("meta")
+ if idx is not None:
+ results_per_batch = [results_per_batch[i] for i in idx]
+ if len(results_per_batch) == 0:
+ return {k: float("NaN") for k in result_keys}
+ return {
+ k: float(np.array([r[k] for r in results_per_batch]).mean())
+ for k in result_keys
+ }
+
+
+def _reduce_camera_iou_overlap(ious: torch.Tensor, topk: int = 2) -> torch.Tensor:
+ """
+ Calculate the final camera difficulty by computing the average of the
+ ious of the two most similar cameras.
+
+ Returns:
+ single-element Tensor
+ """
+ return ious.topk(k=min(topk, len(ious) - 1)).values.mean()
+
+
+def _get_camera_difficulty_bin_edges(camera_difficulty_bin_breaks: Tuple[float, float]):
+ """
+ Get the edges of camera difficulty bins.
+ """
+ _eps = 1e-5
+ lower, upper = camera_difficulty_bin_breaks
+ diff_bin_edges = torch.tensor([0.0 - _eps, lower, upper, 1.0 + _eps]).float()
+ diff_bin_names = ["hard", "medium", "easy"]
+ return diff_bin_edges, diff_bin_names
+
+
+def summarize_nvs_eval_results(
+ per_batch_eval_results: List[Dict[str, Any]],
+ is_multisequence: bool,
+) -> Tuple[Dict[str, Any], Dict[str, Any]]:
+ """
+ Compile the per-batch evaluation results `per_batch_eval_results` into
+ a set of aggregate metrics. The produced metrics depend on is_multisequence.
+
+ Args:
+ per_batch_eval_results: Metrics of each per-batch evaluation.
+ is_multisequence: Whether to evaluate as a multisequence task
+ camera_difficulty_bin_breaks: edge hard-medium and medium-easy
+
+
+ Returns:
+ nvs_results_flat: A flattened dict of all aggregate metrics.
+ aux_out: A dictionary holding a set of auxiliary results.
+ """
+ n_batches = len(per_batch_eval_results)
+ eval_sets: List[Optional[str]] = []
+ eval_sets = [None]
+ if is_multisequence:
+ eval_sets = ["train", "test"]
+ batch_sizes = torch.tensor(
+ [r["meta"]["batch_size"] for r in per_batch_eval_results]
+ ).long()
+
+ is_train = is_train_frame([r["meta"]["frame_type"] for r in per_batch_eval_results])
+
+ # init the result database dict
+ results = []
+
+ # add per set averages
+ for SET in eval_sets:
+ if SET is None:
+ ok_set = torch.ones(n_batches, dtype=torch.bool)
+ set_name = "test"
+ else:
+ ok_set = is_train == int(SET == "train")
+ set_name = SET
+
+ # average over all results
+ bin_results = average_per_batch_results(
+ per_batch_eval_results, idx=torch.where(ok_set)[0]
+ )
+ results.append(
+ {
+ "subset": set_name,
+ "subsubset": "diff=all",
+ "metrics": bin_results,
+ }
+ )
+
+ if is_multisequence:
+ # split based on n_src_views
+ n_src_views = batch_sizes - 1
+ for n_src in EVAL_N_SRC_VIEWS:
+ ok_src = ok_set & (n_src_views == n_src)
+ n_src_results = average_per_batch_results(
+ per_batch_eval_results,
+ idx=torch.where(ok_src)[0],
+ )
+ results.append(
+ {
+ "subset": set_name,
+ "subsubset": f"n_src={int(n_src)}",
+ "metrics": n_src_results,
+ }
+ )
+
+ aux_out = {"results": results}
+ return flatten_nvs_results(results), aux_out
+
+
+def _get_flat_nvs_metric_key(result, metric_name) -> str:
+ metric_key_postfix = f"|subset={result['subset']}|{result['subsubset']}"
+ metric_key = f"{metric_name}{metric_key_postfix}"
+ return metric_key
+
+
+def flatten_nvs_results(results) -> Dict[str, Any]:
+ """
+ Takes input `results` list of dicts of the form::
+
+ [
+ {
+ 'subset':'train/test/...',
+ 'subsubset': 'src=1/src=2/...',
+ 'metrics': nvs_eval_metrics}
+ },
+ ...
+ ]
+
+ And converts to a flat dict as follows::
+
+ {
+ 'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
+ ...
+ }
+ """
+ results_flat = {}
+ for result in results:
+ for metric_name, metric_val in result["metrics"].items():
+ metric_key = _get_flat_nvs_metric_key(result, metric_name)
+ assert metric_key not in results_flat
+ results_flat[metric_key] = metric_val
+ return results_flat
+
+
+def pretty_print_nvs_metrics(results) -> None:
+ subsets, subsubsets = [
+ _ordered_set([r[k] for r in results]) for k in ("subset", "subsubset")
+ ]
+ metrics = _ordered_set([metric for r in results for metric in r["metrics"]])
+
+ for subset in subsets:
+ tab = {}
+ for metric in metrics:
+ tab[metric] = []
+ header = ["metric"]
+ for subsubset in subsubsets:
+ metric_vals = [
+ r["metrics"][metric]
+ for r in results
+ if r["subsubset"] == subsubset and r["subset"] == subset
+ ]
+ if len(metric_vals) > 0:
+ tab[metric].extend(metric_vals)
+ header.extend(subsubsets)
+
+ if any(len(v) > 0 for v in tab.values()):
+ print(f"===== NVS results; subset={subset} =====")
+ print(
+ tabulate(
+ [[metric, *v] for metric, v in tab.items()],
+ # pyre-fixme[61]: `header` is undefined, or not always defined.
+ headers=header,
+ )
+ )
+
+
+def _ordered_set(list_):
+ return list(OrderedDict((i, 0) for i in list_).keys())
+
+
+def aggregate_nvs_results(task_results):
+ """
+ Aggregate nvs results.
+ For singlescene, this averages over all categories and scenes,
+ for multiscene, the average is over all per-category results.
+ """
+ task_results_cat = [r_ for r in task_results for r_ in r]
+ subsets, subsubsets = [
+ _ordered_set([r[k] for r in task_results_cat]) for k in ("subset", "subsubset")
+ ]
+ metrics = _ordered_set(
+ [metric for r in task_results_cat for metric in r["metrics"]]
+ )
+ average_results = []
+ for subset in subsets:
+ for subsubset in subsubsets:
+ metrics_lists = [
+ r["metrics"]
+ for r in task_results_cat
+ if r["subsubset"] == subsubset and r["subset"] == subset
+ ]
+ avg_metrics = {}
+ for metric in metrics:
+ avg_metrics[metric] = float(
+ np.nanmean(
+ np.array([metric_list[metric] for metric_list in metrics_lists])
+ )
+ )
+ average_results.append(
+ {
+ "subset": subset,
+ "subsubset": subsubset,
+ "metrics": avg_metrics,
+ }
+ )
+ return average_results
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..84306997cc414d8316cd62125570ff182ae3cbb0
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py
@@ -0,0 +1,160 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import copy
+import json
+import logging
+import os
+import warnings
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+
+import tqdm
+from pytorch3d.implicitron.evaluation import evaluate_new_view_synthesis as evaluate
+from pytorch3d.implicitron.models.base_model import EvaluationMode, ImplicitronModelBase
+from pytorch3d.implicitron.tools.config import (
+ registry,
+ ReplaceableBase,
+ run_auto_creation,
+)
+from torch.utils.data import DataLoader
+
+logger = logging.getLogger(__name__)
+
+
+class EvaluatorBase(ReplaceableBase):
+ """
+ Evaluate a trained model on given data. Returns a dict of loss/objective
+ names and their values.
+ """
+
+ is_multisequence: bool = False
+
+ def run(
+ self, model: ImplicitronModelBase, dataloader: DataLoader, **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Evaluate the results of Implicitron training.
+ """
+ raise NotImplementedError()
+
+
+@registry.register
+class ImplicitronEvaluator(EvaluatorBase):
+ """
+ Evaluate the results of Implicitron training.
+ """
+
+ # UNUSED; preserved for compatibility purposes
+ camera_difficulty_bin_breaks: Tuple[float, ...] = 0.97, 0.98
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ # pyre-fixme[14]: `run` overrides method defined in `EvaluatorBase` inconsistently.
+ def run(
+ self,
+ model: ImplicitronModelBase,
+ dataloader: DataLoader,
+ device: torch.device,
+ dump_to_json: bool = False,
+ exp_dir: Optional[str] = None,
+ epoch: Optional[int] = None,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Evaluate the results of Implicitron training. Optionally, dump results to
+ exp_dir/results_test.json.
+
+ Args:
+ model: A (trained) model to evaluate.
+ dataloader: A test dataloader.
+ device: A torch device.
+ dump_to_json: If True, will dump the results to a json file.
+ exp_dir: Root expeirment directory.
+ epoch: Evaluation epoch (to be stored in the results dict).
+
+ Returns:
+ A dictionary of results.
+ """
+ try:
+ import lpips
+
+ lpips_model = lpips.LPIPS(net="vgg")
+ lpips_model = lpips_model.to(device)
+ except ImportError:
+ warnings.warn(
+ "lpips library NOT FOUND. lpips losses will not be calculated"
+ )
+ lpips_model = None
+
+ model.eval()
+
+ per_batch_eval_results = []
+ logger.info("Evaluating model ...")
+ for frame_data in tqdm.tqdm(dataloader):
+ frame_data = frame_data.to(device)
+
+ # mask out the unknown images so that the model does not see them
+ frame_data_for_eval = _get_eval_frame_data(frame_data)
+
+ with torch.no_grad():
+ preds = model(
+ **{
+ **frame_data_for_eval,
+ "evaluation_mode": EvaluationMode.EVALUATION,
+ }
+ )
+ implicitron_render = copy.deepcopy(preds["implicitron_render"])
+ per_batch_eval_results.append(
+ evaluate.eval_batch(
+ frame_data,
+ implicitron_render,
+ bg_color="black",
+ lpips_model=lpips_model,
+ )
+ )
+
+ _, category_result = evaluate.summarize_nvs_eval_results(
+ per_batch_eval_results,
+ self.is_multisequence,
+ )
+
+ results = category_result["results"]
+ evaluate.pretty_print_nvs_metrics(results)
+ if dump_to_json:
+ _dump_to_json(epoch, exp_dir, results)
+
+ return category_result["results"]
+
+
+def _dump_to_json(
+ epoch: Optional[int], exp_dir: Optional[str], results: List[Dict[str, Any]]
+) -> None:
+ if epoch is not None:
+ for r in results:
+ r["eval_epoch"] = int(epoch)
+ logger.info("Evaluation results")
+
+ if exp_dir is None:
+ raise ValueError("Cannot save results to json without a specified save path.")
+ with open(os.path.join(exp_dir, "results_test.json"), "w") as f:
+ json.dump(results, f)
+
+
+def _get_eval_frame_data(frame_data: Any) -> Any:
+ """
+ Masks the target image data to make sure we cannot use it at model evaluation
+ time. Assumes the first batch element is target, the rest are source.
+ """
+ frame_data_for_eval = copy.deepcopy(frame_data)
+ for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
+ value = getattr(frame_data_for_eval, k)
+ value[0].zero_()
+ return frame_data_for_eval
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dc294bfb87b1948f305f0291726535266035861
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+# Allows to register the models
+# see: pytorch3d.implicitron.tools.config.registry:register
+from pytorch3d.implicitron.models.generic_model import GenericModel
+from pytorch3d.implicitron.models.overfit_model import OverfitModel
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/base_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/base_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff645fd709ed20f61e0b68064b47662acb0f839c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/base_model.py
@@ -0,0 +1,93 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Optional
+
+import torch
+
+from pytorch3d.implicitron.models.renderer.base import EvaluationMode
+from pytorch3d.implicitron.tools.config import ReplaceableBase
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+@dataclass
+class ImplicitronRender:
+ """
+ Holds the tensors that describe a result of rendering.
+ """
+
+ depth_render: Optional[torch.Tensor] = None
+ image_render: Optional[torch.Tensor] = None
+ mask_render: Optional[torch.Tensor] = None
+ camera_distance: Optional[torch.Tensor] = None
+
+ def clone(self) -> "ImplicitronRender":
+ def safe_clone(t: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
+ return t.detach().clone() if t is not None else None
+
+ return ImplicitronRender(
+ depth_render=safe_clone(self.depth_render),
+ image_render=safe_clone(self.image_render),
+ mask_render=safe_clone(self.mask_render),
+ camera_distance=safe_clone(self.camera_distance),
+ )
+
+
+class ImplicitronModelBase(ReplaceableBase, torch.nn.Module):
+ """
+ Replaceable abstract base for all image generation / rendering models.
+ `forward()` method produces a render with a depth map. Derives from Module
+ so we can rely on basic functionality provided to torch for model
+ optimization.
+ """
+
+ # The keys from `preds` (output of ImplicitronModelBase.forward) to be logged in
+ # the training loop.
+ log_vars: List[str] = field(default_factory=lambda: ["objective"])
+
+ def forward(
+ self,
+ *, # force keyword-only arguments
+ image_rgb: Optional[torch.Tensor],
+ camera: CamerasBase,
+ fg_probability: Optional[torch.Tensor],
+ mask_crop: Optional[torch.Tensor],
+ depth_map: Optional[torch.Tensor],
+ sequence_name: Optional[List[str]],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Args:
+ image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images;
+ the first `min(B, n_train_target_views)` images are considered targets and
+ are used to supervise the renders; the rest corresponding to the source
+ viewpoints from which features will be extracted.
+ camera: An instance of CamerasBase containing a batch of `B` cameras corresponding
+ to the viewpoints of target images, from which the rays will be sampled,
+ and source images, which will be used for intersecting with target rays.
+ fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of
+ foreground masks.
+ mask_crop: A binary tensor of shape `(B, 1, H, W)` deonting valid
+ regions in the input images (i.e. regions that do not correspond
+ to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to
+ "mask_sample", rays will be sampled in the non zero regions.
+ depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
+ sequence_name: A list of `B` strings corresponding to the sequence names
+ from which images `image_rgb` were extracted. They are used to match
+ target frames with relevant source frames.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering.
+
+ Returns:
+ preds: A dictionary containing all outputs of the forward pass. All models should
+ output an instance of `ImplicitronRender` in `preds["implicitron_render"]`.
+ """
+ raise NotImplementedError()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c47703b66349df4c048e5040b931ff9b67a925b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from .feature_extractor import FeatureExtractorBase
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd71b1e20d718408be6d553d3d1d38c1347a990a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py
@@ -0,0 +1,43 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Any, Dict, Optional
+
+import torch
+from pytorch3d.implicitron.tools.config import ReplaceableBase
+
+
+class FeatureExtractorBase(ReplaceableBase, torch.nn.Module):
+ """
+ Base class for an extractor of a set of features from images.
+ """
+
+ def get_feat_dims(self) -> int:
+ """
+ Returns:
+ total number of feature dimensions of the output.
+ (i.e. sum_i(dim_i))
+ """
+ raise NotImplementedError
+
+ def forward(
+ self,
+ imgs: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Dict[Any, torch.Tensor]:
+ """
+ Args:
+ imgs: A batch of input images of shape `(B, 3, H, W)`.
+ masks: A batch of input masks of shape `(B, 3, H, W)`.
+
+ Returns:
+ out_feats: A dict `{f_i: t_i}` keyed by predicted feature names `f_i`
+ and their corresponding tensors `t_i` of shape `(B, dim_i, H_i, W_i)`.
+ """
+ raise NotImplementedError
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..08c86489150b6577236e059d6b2ca90f8fc7beb9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py
@@ -0,0 +1,214 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import logging
+import math
+from typing import Any, Dict, Optional, Tuple
+
+import torch
+import torch.nn.functional as Fu
+import torchvision
+from pytorch3d.implicitron.tools.config import registry
+
+from . import FeatureExtractorBase
+
+
+logger = logging.getLogger(__name__)
+
+MASK_FEATURE_NAME = "mask"
+IMAGE_FEATURE_NAME = "image"
+
+_FEAT_DIMS = {
+ "resnet18": (64, 128, 256, 512),
+ "resnet34": (64, 128, 256, 512),
+ "resnet50": (256, 512, 1024, 2048),
+ "resnet101": (256, 512, 1024, 2048),
+ "resnet152": (256, 512, 1024, 2048),
+}
+
+_RESNET_MEAN = [0.485, 0.456, 0.406]
+_RESNET_STD = [0.229, 0.224, 0.225]
+
+
+@registry.register
+class ResNetFeatureExtractor(FeatureExtractorBase):
+ """
+ Implements an image feature extractor. Depending on the settings allows
+ to extract:
+ - deep features: A CNN ResNet backbone from torchvision (with/without
+ pretrained weights) which extracts deep features.
+ - masks: Segmentation masks.
+ - images: Raw input RGB images.
+
+ Settings:
+ name: name of the resnet backbone (from torchvision)
+ pretrained: If true, will load the pretrained weights
+ stages: List of stages from which to extract features.
+ Features from each stage are returned as key value
+ pairs in the forward function
+ normalize_image: If set will normalize the RGB values of
+ the image based on the Resnet mean/std
+ image_rescale: If not 1.0, this rescale factor will be
+ used to resize the image
+ first_max_pool: If set, a max pool layer is added after the first
+ convolutional layer
+ proj_dim: The number of output channels for the convolutional layers
+ l2_norm: If set, l2 normalization is applied to the extracted features
+ add_masks: If set, the masks will be saved in the output dictionary
+ add_images: If set, the images will be saved in the output dictionary
+ global_average_pool: If set, global average pooling step is performed
+ feature_rescale: If not 1.0, this rescale factor will be used to
+ rescale the output features
+ """
+
+ name: str = "resnet34"
+ pretrained: bool = True
+ stages: Tuple[int, ...] = (1, 2, 3, 4)
+ normalize_image: bool = True
+ image_rescale: float = 128 / 800.0
+ first_max_pool: bool = True
+ proj_dim: int = 32
+ l2_norm: bool = True
+ add_masks: bool = True
+ add_images: bool = True
+ global_average_pool: bool = False # this can simulate global/non-spacial features
+ feature_rescale: float = 1.0
+
+ def __post_init__(self):
+ if self.normalize_image:
+ # register buffers needed to normalize the image
+ for k, v in (("_resnet_mean", _RESNET_MEAN), ("_resnet_std", _RESNET_STD)):
+ self.register_buffer(
+ k,
+ torch.FloatTensor(v).view(1, 3, 1, 1),
+ persistent=False,
+ )
+
+ self._feat_dim = {}
+
+ if len(self.stages) == 0:
+ # do not extract any resnet features
+ pass
+ else:
+ net = getattr(torchvision.models, self.name)(pretrained=self.pretrained)
+ if self.first_max_pool:
+ self.stem = torch.nn.Sequential(
+ net.conv1, net.bn1, net.relu, net.maxpool
+ )
+ else:
+ self.stem = torch.nn.Sequential(net.conv1, net.bn1, net.relu)
+ self.max_stage = max(self.stages)
+ self.layers = torch.nn.ModuleList()
+ self.proj_layers = torch.nn.ModuleList()
+ for stage in range(self.max_stage):
+ stage_name = f"layer{stage+1}"
+ feature_name = self._get_resnet_stage_feature_name(stage)
+ if (stage + 1) in self.stages:
+ if (
+ self.proj_dim > 0
+ and _FEAT_DIMS[self.name][stage] > self.proj_dim
+ ):
+ proj = torch.nn.Conv2d(
+ _FEAT_DIMS[self.name][stage],
+ self.proj_dim,
+ 1,
+ 1,
+ bias=True,
+ )
+ self._feat_dim[feature_name] = self.proj_dim
+ else:
+ proj = torch.nn.Identity()
+ self._feat_dim[feature_name] = _FEAT_DIMS[self.name][stage]
+ else:
+ proj = torch.nn.Identity()
+ self.proj_layers.append(proj)
+ self.layers.append(getattr(net, stage_name))
+
+ if self.add_masks:
+ self._feat_dim[MASK_FEATURE_NAME] = 1
+
+ if self.add_images:
+ self._feat_dim[IMAGE_FEATURE_NAME] = 3
+
+ logger.info(f"Feat extractor total dim = {self.get_feat_dims()}")
+ self.stages = set(self.stages) # convert to set for faster "in"
+
+ def _get_resnet_stage_feature_name(self, stage) -> str:
+ return f"res_layer_{stage+1}"
+
+ def _resnet_normalize_image(self, img: torch.Tensor) -> torch.Tensor:
+ return (img - self._resnet_mean) / self._resnet_std
+
+ def get_feat_dims(self) -> int:
+ return sum(self._feat_dim.values())
+
+ def forward(
+ self,
+ imgs: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Dict[Any, torch.Tensor]:
+ """
+ Args:
+ imgs: A batch of input images of shape `(B, 3, H, W)`.
+ masks: A batch of input masks of shape `(B, 3, H, W)`.
+
+ Returns:
+ out_feats: A dict `{f_i: t_i}` keyed by predicted feature names `f_i`
+ and their corresponding tensors `t_i` of shape `(B, dim_i, H_i, W_i)`.
+ """
+
+ out_feats = {}
+
+ imgs_input = imgs
+ if self.image_rescale != 1.0 and imgs_input is not None:
+ imgs_resized = Fu.interpolate(
+ imgs_input,
+ scale_factor=self.image_rescale,
+ mode="bilinear",
+ )
+ else:
+ imgs_resized = imgs_input
+
+ if len(self.stages) > 0:
+ assert imgs_resized is not None
+
+ if self.normalize_image:
+ imgs_normed = self._resnet_normalize_image(imgs_resized)
+ else:
+ imgs_normed = imgs_resized
+ # is not a function.
+ feats = self.stem(imgs_normed)
+ for stage, (layer, proj) in enumerate(zip(self.layers, self.proj_layers)):
+ feats = layer(feats)
+ # just a sanity check below
+ assert feats.shape[1] == _FEAT_DIMS[self.name][stage]
+ if (stage + 1) in self.stages:
+ f = proj(feats)
+ if self.global_average_pool:
+ f = f.mean(dims=(2, 3))
+ if self.l2_norm:
+ normfac = 1.0 / math.sqrt(len(self.stages))
+ f = Fu.normalize(f, dim=1) * normfac
+ feature_name = self._get_resnet_stage_feature_name(stage)
+ out_feats[feature_name] = f
+
+ if self.add_masks:
+ assert masks is not None
+ out_feats[MASK_FEATURE_NAME] = masks
+
+ if self.add_images:
+ assert imgs_resized is not None
+ out_feats[IMAGE_FEATURE_NAME] = imgs_resized
+
+ if self.feature_rescale != 1.0:
+ out_feats = {k: self.feature_rescale * f for k, f in out_feats.items()}
+
+ # pyre-fixme[7]: Incompatible return type, expected `Dict[typing.Any, Tensor]`
+ # but got `Dict[typing.Any, float]`
+ return out_feats
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/generic_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/generic_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..428b4ccc3aef6f1def61a1033037a516c5d66c15
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/generic_model.py
@@ -0,0 +1,780 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+# Note: The #noqa comments below are for unused imports of pluggable implementations
+# which are part of implicitron. They ensure that the registry is prepopulated.
+
+import logging
+from dataclasses import field
+from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
+
+import torch
+from omegaconf import DictConfig
+
+from pytorch3d.implicitron.models.base_model import (
+ ImplicitronModelBase,
+ ImplicitronRender,
+)
+from pytorch3d.implicitron.models.feature_extractor import FeatureExtractorBase
+from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase
+from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
+from pytorch3d.implicitron.models.metrics import (
+ RegularizationMetricsBase,
+ ViewMetricsBase,
+)
+
+from pytorch3d.implicitron.models.renderer.base import (
+ BaseRenderer,
+ EvaluationMode,
+ ImplicitFunctionWrapper,
+ ImplicitronRayBundle,
+ RendererOutput,
+ RenderSamplingMode,
+)
+from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase
+
+from pytorch3d.implicitron.models.utils import (
+ apply_chunked,
+ chunk_generator,
+ log_loss_weights,
+ preprocess_input,
+ weighted_sum_losses,
+)
+from pytorch3d.implicitron.models.view_pooler.view_pooler import ViewPooler
+from pytorch3d.implicitron.tools import vis_utils
+from pytorch3d.implicitron.tools.config import (
+ expand_args_fields,
+ registry,
+ run_auto_creation,
+)
+
+from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
+from pytorch3d.renderer import utils as rend_utils
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+if TYPE_CHECKING:
+ from visdom import Visdom
+logger = logging.getLogger(__name__)
+
+
+@registry.register
+class GenericModel(ImplicitronModelBase):
+ """
+ GenericModel is a wrapper for the neural implicit
+ rendering and reconstruction pipeline which consists
+ of the following sequence of 7 steps (steps 2–4 are normally
+ skipped in overfitting scenario, since conditioning on source views
+ does not add much information; otherwise they should be present altogether):
+
+
+ (1) Ray Sampling
+ ------------------
+ Rays are sampled from an image grid based on the target view(s).
+ │_____________
+ │ │
+ │ ▼
+ │ (2) Feature Extraction (optional)
+ │ -----------------------
+ │ A feature extractor (e.g. a convolutional
+ │ neural net) is used to extract image features
+ │ from the source view(s).
+ │ │
+ │ ▼
+ │ (3) View Sampling (optional)
+ │ ------------------
+ │ Image features are sampled at the 2D projections
+ │ of a set of 3D points along each of the sampled
+ │ target rays from (1).
+ │ │
+ │ ▼
+ │ (4) Feature Aggregation (optional)
+ │ ------------------
+ │ Aggregate features and masks sampled from
+ │ image view(s) in (3).
+ │ │
+ │____________▼
+ │
+ ▼
+ (5) Implicit Function Evaluation
+ ------------------
+ Evaluate the implicit function(s) at the sampled ray points
+ (optionally pass in the aggregated image features from (4)).
+ (also optionally pass in a global encoding from global_encoder).
+ │
+ ▼
+ (6) Rendering
+ ------------------
+ Render the image into the target cameras by raymarching along
+ the sampled rays and aggregating the colors and densities
+ output by the implicit function in (5).
+ │
+ ▼
+ (7) Loss Computation
+ ------------------
+ Compute losses based on the predicted target image(s).
+
+
+ The `forward` function of GenericModel executes
+ this sequence of steps. Currently, steps 1, 3, 4, 5, 6
+ can be customized by intializing a subclass of the appropriate
+ baseclass and adding the newly created module to the registry.
+ Please see https://github.com/facebookresearch/pytorch3d/blob/main/projects/implicitron_trainer/README.md#custom-plugins
+ for more details on how to create and register a custom component.
+
+ In the config .yaml files for experiments, the parameters below are
+ contained in the
+ `model_factory_ImplicitronModelFactory_args.model_GenericModel_args`
+ node. As GenericModel derives from ReplaceableBase, the input arguments are
+ parsed by the run_auto_creation function to initialize the
+ necessary member modules. Please see implicitron_trainer/README.md
+ for more details on this process.
+
+ Args:
+ mask_images: Whether or not to mask the RGB image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ mask_depths: Whether or not to mask the depth image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ render_image_width: Width of the output image to render
+ render_image_height: Height of the output image to render
+ mask_threshold: If greater than 0.0, the foreground mask is
+ thresholded by this value before being applied to the RGB/Depth images
+ output_rasterized_mc: If True, visualize the Monte-Carlo pixel renders by
+ splatting onto an image grid. Default: False.
+ bg_color: RGB values for setting the background color of input image
+ if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own
+ way to determine the background color of its output, unrelated to this.
+ num_passes: The specified implicit_function is initialized num_passes
+ times and run sequentially.
+ chunk_size_grid: The total number of points which can be rendered
+ per chunk. This is used to compute the number of rays used
+ per chunk when the chunked version of the renderer is used (in order
+ to fit rendering on all rays in memory)
+ render_features_dimensions: The number of output features to render.
+ Defaults to 3, corresponding to RGB images.
+ n_train_target_views: The number of cameras to render into at training
+ time; first `n_train_target_views` in the batch are considered targets,
+ the rest are sources.
+ sampling_mode_training: The sampling method to use during training. Must be
+ a value from the RenderSamplingMode Enum.
+ sampling_mode_evaluation: Same as above but for evaluation.
+ global_encoder_class_type: The name of the class to use for global_encoder,
+ which must be available in the registry. Or `None` to disable global encoder.
+ global_encoder: An instance of `GlobalEncoder`. This is used to generate an encoding
+ of the image (referred to as the global_code) that can be used to model aspects of
+ the scene such as multiple objects or morphing objects. It is up to the implicit
+ function definition how to use it, but the most typical way is to broadcast and
+ concatenate to the other inputs for the implicit function.
+ raysampler_class_type: The name of the raysampler class which is available
+ in the global registry.
+ raysampler: An instance of RaySampler which is used to emit
+ rays from the target view(s).
+ renderer_class_type: The name of the renderer class which is available in the global
+ registry.
+ renderer: A renderer class which inherits from BaseRenderer. This is used to
+ generate the images from the target view(s).
+ image_feature_extractor_class_type: If a str, constructs and enables
+ the `image_feature_extractor` object of this type. Or None if not needed.
+ image_feature_extractor: A module for extrating features from an input image.
+ view_pooler_enabled: If `True`, constructs and enables the `view_pooler` object.
+ This means features are sampled from the source image(s)
+ at the projected 2d locations of the sampled 3d ray points from the target
+ view(s), i.e. this activates step (3) above.
+ view_pooler: An instance of ViewPooler which is used for sampling of
+ image-based features at the 2D projections of a set
+ of 3D points and aggregating the sampled features.
+ implicit_function_class_type: The type of implicit function to use which
+ is available in the global registry.
+ implicit_function: An instance of ImplicitFunctionBase. The actual implicit functions
+ are initialised to be in self._implicit_functions.
+ view_metrics: An instance of ViewMetricsBase used to compute loss terms which
+ are independent of the model's parameters.
+ view_metrics_class_type: The type of view metrics to use, must be available in
+ the global registry.
+ regularization_metrics: An instance of RegularizationMetricsBase used to compute
+ regularization terms which can depend on the model's parameters.
+ regularization_metrics_class_type: The type of regularization metrics to use,
+ must be available in the global registry.
+ loss_weights: A dictionary with a {loss_name: weight} mapping; see documentation
+ for `ViewMetrics` class for available loss functions.
+ log_vars: A list of variable names which should be logged.
+ The names should correspond to a subset of the keys of the
+ dict `preds` output by the `forward` function.
+ """ # noqa: B950
+
+ mask_images: bool = True
+ mask_depths: bool = True
+ render_image_width: int = 400
+ render_image_height: int = 400
+ mask_threshold: float = 0.5
+ output_rasterized_mc: bool = False
+ bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
+ num_passes: int = 1
+ chunk_size_grid: int = 4096
+ render_features_dimensions: int = 3
+ tqdm_trigger_threshold: int = 16
+
+ n_train_target_views: int = 1
+ sampling_mode_training: str = "mask_sample"
+ sampling_mode_evaluation: str = "full_grid"
+
+ # ---- global encoder settings
+ global_encoder_class_type: Optional[str] = None
+ # pyre-fixme[13]: Attribute `global_encoder` is never initialized.
+ global_encoder: Optional[GlobalEncoderBase]
+
+ # ---- raysampler
+ raysampler_class_type: str = "AdaptiveRaySampler"
+ # pyre-fixme[13]: Attribute `raysampler` is never initialized.
+ raysampler: RaySamplerBase
+
+ # ---- renderer configs
+ renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
+ # pyre-fixme[13]: Attribute `renderer` is never initialized.
+ renderer: BaseRenderer
+
+ # ---- image feature extractor settings
+ # (This is only created if view_pooler is enabled)
+ # pyre-fixme[13]: Attribute `image_feature_extractor` is never initialized.
+ image_feature_extractor: Optional[FeatureExtractorBase]
+ image_feature_extractor_class_type: Optional[str] = None
+ # ---- view pooler settings
+ view_pooler_enabled: bool = False
+ # pyre-fixme[13]: Attribute `view_pooler` is never initialized.
+ view_pooler: Optional[ViewPooler]
+
+ # ---- implicit function settings
+ implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
+ # This is just a model, never constructed.
+ # The actual implicit functions live in self._implicit_functions
+ # pyre-fixme[13]: Attribute `implicit_function` is never initialized.
+ implicit_function: ImplicitFunctionBase
+
+ # ----- metrics
+ # pyre-fixme[13]: Attribute `view_metrics` is never initialized.
+ view_metrics: ViewMetricsBase
+ view_metrics_class_type: str = "ViewMetrics"
+
+ # pyre-fixme[13]: Attribute `regularization_metrics` is never initialized.
+ regularization_metrics: RegularizationMetricsBase
+ regularization_metrics_class_type: str = "RegularizationMetrics"
+
+ # ---- loss weights
+ loss_weights: Dict[str, float] = field(
+ default_factory=lambda: {
+ "loss_rgb_mse": 1.0,
+ "loss_prev_stage_rgb_mse": 1.0,
+ "loss_mask_bce": 0.0,
+ "loss_prev_stage_mask_bce": 0.0,
+ }
+ )
+
+ # ---- variables to be logged (logger automatically ignores if not computed)
+ log_vars: List[str] = field(
+ default_factory=lambda: [
+ "loss_rgb_psnr_fg",
+ "loss_rgb_psnr",
+ "loss_rgb_mse",
+ "loss_rgb_huber",
+ "loss_depth_abs",
+ "loss_depth_abs_fg",
+ "loss_mask_neg_iou",
+ "loss_mask_bce",
+ "loss_mask_beta_prior",
+ "loss_eikonal",
+ "loss_density_tv",
+ "loss_depth_neg_penalty",
+ "loss_autodecoder_norm",
+ # metrics that are only logged in 2+stage renderes
+ "loss_prev_stage_rgb_mse",
+ "loss_prev_stage_rgb_psnr_fg",
+ "loss_prev_stage_rgb_psnr",
+ "loss_prev_stage_mask_bce",
+ # basic metrics
+ "objective",
+ "epoch",
+ "sec/it",
+ ]
+ )
+
+ @classmethod
+ def pre_expand(cls) -> None:
+ # use try/finally to bypass cinder's lazy imports
+ try:
+ from pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor import ( # noqa: F401, B950
+ ResNetFeatureExtractor,
+ )
+ from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950
+ IdrFeatureField,
+ )
+ from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950
+ NeRFormerImplicitFunction,
+ )
+ from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950
+ SRNHyperNetImplicitFunction,
+ )
+ from pytorch3d.implicitron.models.implicit_function.voxel_grid_implicit_function import ( # noqa: F401, B950
+ VoxelGridImplicitFunction,
+ )
+ from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401
+ LSTMRenderer,
+ )
+ from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa
+ MultiPassEmissionAbsorptionRenderer,
+ )
+ from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401
+ SignedDistanceFunctionRenderer,
+ )
+ finally:
+ pass
+
+ def __post_init__(self):
+ if self.view_pooler_enabled:
+ if self.image_feature_extractor_class_type is None:
+ raise ValueError(
+ "image_feature_extractor must be present for view pooling."
+ )
+ run_auto_creation(self)
+
+ self._implicit_functions = self._construct_implicit_functions()
+
+ log_loss_weights(self.loss_weights, logger)
+
+ def forward(
+ self,
+ *, # force keyword-only arguments
+ image_rgb: Optional[torch.Tensor],
+ camera: CamerasBase,
+ fg_probability: Optional[torch.Tensor] = None,
+ mask_crop: Optional[torch.Tensor] = None,
+ depth_map: Optional[torch.Tensor] = None,
+ sequence_name: Optional[List[str]] = None,
+ frame_timestamp: Optional[torch.Tensor] = None,
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Args:
+ image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images;
+ the first `min(B, n_train_target_views)` images are considered targets and
+ are used to supervise the renders; the rest corresponding to the source
+ viewpoints from which features will be extracted.
+ camera: An instance of CamerasBase containing a batch of `B` cameras corresponding
+ to the viewpoints of target images, from which the rays will be sampled,
+ and source images, which will be used for intersecting with target rays.
+ fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of
+ foreground masks.
+ mask_crop: A binary tensor of shape `(B, 1, H, W)` denoting valid
+ regions in the input images (i.e. regions that do not correspond
+ to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to
+ "mask_sample", rays will be sampled in the non zero regions.
+ depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
+ sequence_name: A list of `B` strings corresponding to the sequence names
+ from which images `image_rgb` were extracted. They are used to match
+ target frames with relevant source frames.
+ frame_timestamp: Optionally a tensor of shape `(B,)` containing a batch
+ of frame timestamps.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering.
+
+ Returns:
+ preds: A dictionary containing all outputs of the forward pass including the
+ rendered images, depths, masks, losses and other metrics.
+ """
+ image_rgb, fg_probability, depth_map = preprocess_input(
+ image_rgb,
+ fg_probability,
+ depth_map,
+ self.mask_images,
+ self.mask_depths,
+ self.mask_threshold,
+ self.bg_color,
+ )
+
+ # Obtain the batch size from the camera as this is the only required input.
+ batch_size = camera.R.shape[0]
+
+ # Determine the number of target views, i.e. cameras we render into.
+ n_targets = (
+ 1
+ if evaluation_mode == EvaluationMode.EVALUATION
+ else (
+ batch_size
+ if self.n_train_target_views <= 0
+ else min(self.n_train_target_views, batch_size)
+ )
+ )
+
+ # A helper function for selecting n_target first elements from the input
+ # where the latter can be None.
+ def safe_slice_targets(
+ tensor: Optional[Union[torch.Tensor, List[str]]],
+ ) -> Optional[Union[torch.Tensor, List[str]]]:
+ return None if tensor is None else tensor[:n_targets]
+
+ # Select the target cameras.
+ target_cameras = camera[list(range(n_targets))]
+
+ # Determine the used ray sampling mode.
+ sampling_mode = RenderSamplingMode(
+ self.sampling_mode_training
+ if evaluation_mode == EvaluationMode.TRAINING
+ else self.sampling_mode_evaluation
+ )
+
+ # (1) Sample rendering rays with the ray sampler.
+ # pyre-ignore[29]
+ ray_bundle: ImplicitronRayBundle = self.raysampler(
+ target_cameras,
+ evaluation_mode,
+ mask=(
+ mask_crop[:n_targets]
+ if mask_crop is not None
+ and sampling_mode == RenderSamplingMode.MASK_SAMPLE
+ else None
+ ),
+ )
+
+ # custom_args hold additional arguments to the implicit function.
+ custom_args = {}
+
+ if self.image_feature_extractor is not None:
+ # (2) Extract features for the image
+ img_feats = self.image_feature_extractor(image_rgb, fg_probability)
+ else:
+ img_feats = None
+
+ if self.view_pooler_enabled:
+ if sequence_name is None:
+ raise ValueError("sequence_name must be provided for view pooling")
+ assert img_feats is not None
+
+ # (3-4) Sample features and masks at the ray points.
+ # Aggregate features from multiple views.
+ def curried_viewpooler(pts):
+ return self.view_pooler(
+ pts=pts,
+ seq_id_pts=sequence_name[:n_targets],
+ camera=camera,
+ seq_id_camera=sequence_name,
+ feats=img_feats,
+ masks=mask_crop,
+ )
+
+ custom_args["fun_viewpool"] = curried_viewpooler
+
+ global_code = None
+ if self.global_encoder is not None:
+ global_code = self.global_encoder( # pyre-fixme[29]
+ sequence_name=safe_slice_targets(sequence_name),
+ frame_timestamp=safe_slice_targets(frame_timestamp),
+ )
+ custom_args["global_code"] = global_code
+
+ for func in self._implicit_functions:
+ func.bind_args(**custom_args)
+
+ inputs_to_be_chunked = {}
+ if fg_probability is not None and self.renderer.requires_object_mask():
+ sampled_fb_prob = rend_utils.ndc_grid_sample(
+ fg_probability[:n_targets], ray_bundle.xys, mode="nearest"
+ )
+ inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5
+
+ # (5)-(6) Implicit function evaluation and Rendering
+ rendered = self._render(
+ ray_bundle=ray_bundle,
+ sampling_mode=sampling_mode,
+ evaluation_mode=evaluation_mode,
+ implicit_functions=self._implicit_functions,
+ inputs_to_be_chunked=inputs_to_be_chunked,
+ )
+
+ # Unbind the custom arguments to prevent pytorch from storing
+ # large buffers of intermediate results due to points in the
+ # bound arguments.
+ for func in self._implicit_functions:
+ func.unbind_args()
+
+ # A dict to store losses as well as rendering results.
+ preds: Dict[str, Any] = {}
+
+ preds.update(
+ self.view_metrics(
+ results=preds,
+ raymarched=rendered,
+ ray_bundle=ray_bundle,
+ image_rgb=safe_slice_targets(image_rgb),
+ depth_map=safe_slice_targets(depth_map),
+ fg_probability=safe_slice_targets(fg_probability),
+ mask_crop=safe_slice_targets(mask_crop),
+ )
+ )
+
+ preds.update(
+ self.regularization_metrics(
+ results=preds,
+ model=self,
+ )
+ )
+
+ if sampling_mode == RenderSamplingMode.MASK_SAMPLE:
+ if self.output_rasterized_mc:
+ # Visualize the monte-carlo pixel renders by splatting onto
+ # an image grid.
+ (
+ preds["images_render"],
+ preds["depths_render"],
+ preds["masks_render"],
+ ) = rasterize_sparse_ray_bundle(
+ ray_bundle,
+ rendered.features,
+ (self.render_image_height, self.render_image_width),
+ rendered.depths,
+ masks=rendered.masks,
+ )
+ elif sampling_mode == RenderSamplingMode.FULL_GRID:
+ preds["images_render"] = rendered.features.permute(0, 3, 1, 2)
+ preds["depths_render"] = rendered.depths.permute(0, 3, 1, 2)
+ preds["masks_render"] = rendered.masks.permute(0, 3, 1, 2)
+
+ preds["implicitron_render"] = ImplicitronRender(
+ image_render=preds["images_render"],
+ depth_render=preds["depths_render"],
+ mask_render=preds["masks_render"],
+ )
+ else:
+ raise AssertionError("Unreachable state")
+
+ # (7) Compute losses
+ objective = self._get_objective(preds)
+ if objective is not None:
+ preds["objective"] = objective
+
+ return preds
+
+ def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]:
+ """
+ A helper function to compute the overall loss as the dot product
+ of individual loss functions with the corresponding weights.
+ """
+ return weighted_sum_losses(preds, self.loss_weights)
+
+ def visualize(
+ self,
+ viz: Optional["Visdom"],
+ visdom_env_imgs: str,
+ preds: Dict[str, Any],
+ prefix: str,
+ ) -> None:
+ """
+ Helper function to visualize the predictions generated
+ in the forward pass.
+
+ Args:
+ viz: Visdom connection object
+ visdom_env_imgs: name of visdom environment for the images.
+ preds: predictions dict like returned by forward()
+ prefix: prepended to the names of images
+ """
+ if viz is None or not viz.check_connection():
+ logger.info("no visdom server! -> skipping batch vis")
+ return
+
+ idx_image = 0
+ title = f"{prefix}_im{idx_image}"
+
+ vis_utils.visualize_basics(viz, preds, visdom_env_imgs, title=title)
+
+ def _render(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ inputs_to_be_chunked: Dict[str, torch.Tensor],
+ sampling_mode: RenderSamplingMode,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Args:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g.
+ SignedDistanceFunctionRenderer requires "object_mask", shape
+ (B, 1, H, W), the silhouette of the object in the image. When
+ chunking, they are passed to the renderer as shape
+ `(B, _, chunksize)`.
+ sampling_mode: The sampling method to use. Must be a value from the
+ RenderSamplingMode Enum.
+
+ Returns:
+ An instance of RendererOutput
+ """
+ if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0:
+ return apply_chunked(
+ self.renderer,
+ chunk_generator(
+ self.chunk_size_grid,
+ ray_bundle,
+ inputs_to_be_chunked,
+ self.tqdm_trigger_threshold,
+ **kwargs,
+ ),
+ lambda batch: torch.cat(batch, dim=1).reshape(
+ *ray_bundle.lengths.shape[:-1], -1
+ ),
+ )
+ else:
+ # pyre-fixme[29]: `BaseRenderer` is not a function.
+ return self.renderer(
+ ray_bundle=ray_bundle,
+ **inputs_to_be_chunked,
+ **kwargs,
+ )
+
+ def _get_viewpooled_feature_dim(self) -> int:
+ if self.view_pooler is None:
+ return 0
+ assert self.image_feature_extractor is not None
+ return self.view_pooler.get_aggregated_feature_dim(
+ self.image_feature_extractor.get_feat_dims()
+ )
+
+ @classmethod
+ def raysampler_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain fields of the raysampler because we want to set
+ them from our own members.
+ """
+ del args["sampling_mode_training"]
+ del args["sampling_mode_evaluation"]
+ del args["image_width"]
+ del args["image_height"]
+
+ def create_raysampler(self):
+ extra_args = {
+ "sampling_mode_training": self.sampling_mode_training,
+ "sampling_mode_evaluation": self.sampling_mode_evaluation,
+ "image_width": self.render_image_width,
+ "image_height": self.render_image_height,
+ }
+ raysampler_args = getattr(
+ self, "raysampler_" + self.raysampler_class_type + "_args"
+ )
+ self.raysampler = registry.get(RaySamplerBase, self.raysampler_class_type)(
+ **raysampler_args, **extra_args
+ )
+
+ @classmethod
+ def renderer_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain fields of the renderer because we want to set
+ them based on other inputs.
+ """
+ args.pop("render_features_dimensions", None)
+ args.pop("object_bounding_sphere", None)
+
+ def create_renderer(self):
+ extra_args = {}
+
+ if self.renderer_class_type == "SignedDistanceFunctionRenderer":
+ extra_args["render_features_dimensions"] = self.render_features_dimensions
+ if not hasattr(self.raysampler, "scene_extent"):
+ raise ValueError(
+ "SignedDistanceFunctionRenderer requires"
+ + " a raysampler that defines the 'scene_extent' field"
+ + " (this field is supported by, e.g., the adaptive raysampler - "
+ + " self.raysampler_class_type='AdaptiveRaySampler')."
+ )
+ extra_args["object_bounding_sphere"] = self.raysampler.scene_extent
+
+ renderer_args = getattr(self, "renderer_" + self.renderer_class_type + "_args")
+ self.renderer = registry.get(BaseRenderer, self.renderer_class_type)(
+ **renderer_args, **extra_args
+ )
+
+ def create_implicit_function(self) -> None:
+ """
+ No-op called by run_auto_creation so that self.implicit_function
+ does not get created. __post_init__ creates the implicit function(s)
+ in wrappers explicitly in self._implicit_functions.
+ """
+ pass
+
+ @classmethod
+ def implicit_function_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain implicit_function fields because we want to set
+ them based on other inputs.
+ """
+ args.pop("feature_vector_size", None)
+ args.pop("encoding_dim", None)
+ args.pop("latent_dim", None)
+ args.pop("latent_dim_hypernet", None)
+ args.pop("color_dim", None)
+
+ def _construct_implicit_functions(self):
+ """
+ After run_auto_creation has been called, the arguments
+ for each of the possible implicit function methods are
+ available. `GenericModel` arguments are first validated
+ based on the custom requirements for each specific
+ implicit function method. Then the required implicit
+ function(s) are initialized.
+ """
+ extra_args = {}
+ global_encoder_dim = (
+ 0 if self.global_encoder is None else self.global_encoder.get_encoding_dim()
+ )
+ viewpooled_feature_dim = self._get_viewpooled_feature_dim()
+
+ if self.implicit_function_class_type in (
+ "NeuralRadianceFieldImplicitFunction",
+ "NeRFormerImplicitFunction",
+ ):
+ extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim
+ extra_args["color_dim"] = self.render_features_dimensions
+
+ if self.implicit_function_class_type == "IdrFeatureField":
+ extra_args["feature_vector_size"] = self.render_features_dimensions
+ extra_args["encoding_dim"] = global_encoder_dim
+
+ if self.implicit_function_class_type == "SRNImplicitFunction":
+ extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim
+
+ # srn_hypernet preprocessing
+ if self.implicit_function_class_type == "SRNHyperNetImplicitFunction":
+ extra_args["latent_dim"] = viewpooled_feature_dim
+ extra_args["latent_dim_hypernet"] = global_encoder_dim
+
+ # check that for srn, srn_hypernet, idr we have self.num_passes=1
+ implicit_function_type = registry.get(
+ ImplicitFunctionBase, self.implicit_function_class_type
+ )
+ expand_args_fields(implicit_function_type)
+ if self.num_passes != 1 and not implicit_function_type.allows_multiple_passes():
+ raise ValueError(
+ self.implicit_function_class_type
+ + f"requires num_passes=1 not {self.num_passes}"
+ )
+
+ if implicit_function_type.requires_pooling_without_aggregation():
+ if self.view_pooler_enabled and self.view_pooler.has_aggregation():
+ raise ValueError(
+ "The chosen implicit function requires view pooling without aggregation."
+ )
+ config_name = f"implicit_function_{self.implicit_function_class_type}_args"
+ config = getattr(self, config_name, None)
+ if config is None:
+ raise ValueError(f"{config_name} not present")
+ implicit_functions_list = [
+ ImplicitFunctionWrapper(implicit_function_type(**config, **extra_args))
+ for _ in range(self.num_passes)
+ ]
+ return torch.nn.ModuleList(implicit_functions_list)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d941bf6f64ab8a003662ad1dbe0cfe7c881a9fa
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py
@@ -0,0 +1,163 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import warnings
+from collections import defaultdict
+from typing import Dict, List, Optional, Union
+
+import torch
+from pytorch3d.implicitron.tools.config import Configurable
+
+
+class Autodecoder(Configurable, torch.nn.Module):
+ """
+ Autodecoder which maps a list of integer or string keys to optimizable embeddings.
+
+ Settings:
+ encoding_dim: Embedding dimension for the decoder.
+ n_instances: The maximum number of instances stored by the autodecoder.
+ init_scale: Scale factor for the initial autodecoder weights.
+ ignore_input: If `True`, optimizes a single code for any input.
+ """
+
+ encoding_dim: int = 0
+ n_instances: int = 1
+ init_scale: float = 1.0
+ ignore_input: bool = False
+
+ def __post_init__(self):
+ if self.n_instances <= 0:
+ raise ValueError(f"Invalid n_instances {self.n_instances}")
+
+ self._autodecoder_codes = torch.nn.Embedding(
+ self.n_instances,
+ self.encoding_dim,
+ scale_grad_by_freq=True,
+ )
+ with torch.no_grad():
+ # weight has been initialised from Normal(0, 1)
+ self._autodecoder_codes.weight *= self.init_scale
+
+ self._key_map = self._build_key_map()
+ # Make sure to register hooks for correct handling of saving/loading
+ # the module's _key_map.
+ self._register_load_state_dict_pre_hook(self._load_key_map_hook)
+ self._register_state_dict_hook(_save_key_map_hook)
+
+ def _build_key_map(
+ self, key_map_dict: Optional[Dict[str, int]] = None
+ ) -> Dict[str, int]:
+ """
+ Args:
+ key_map_dict: A dictionary used to initialize the key_map.
+
+ Returns:
+ key_map: a dictionary of key: id pairs.
+ """
+ # increments the counter when asked for a new value
+ key_map = defaultdict(iter(range(self.n_instances)).__next__)
+ if key_map_dict is not None:
+ # Assign all keys from the loaded key_map_dict to self._key_map.
+ # Since this is done in the original order, it should generate
+ # the same set of key:id pairs. We check this with an assert to be sure.
+ for x, x_id in key_map_dict.items():
+ x_id_ = key_map[x]
+ assert x_id == x_id_
+ return key_map
+
+ def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]:
+ return (self._autodecoder_codes.weight**2).mean()
+
+ def get_encoding_dim(self) -> int:
+ return self.encoding_dim
+
+ def forward(self, x: Union[torch.LongTensor, List[str]]) -> Optional[torch.Tensor]:
+ """
+ Args:
+ x: A batch of `N` identifiers. Either a long tensor of size
+ `(N,)` keys in [0, n_instances), or a list of `N` string keys that
+ are hashed to codes (without collisions).
+
+ Returns:
+ codes: A tensor of shape `(N, self.encoding_dim)` containing the
+ key-specific autodecoder codes.
+ """
+ if self.ignore_input:
+ x = ["singleton"]
+
+ if isinstance(x[0], str):
+ try:
+ # pyre-fixme[9]: x has type `Union[List[str], LongTensor]`; used as
+ # `Tensor`.
+ x = torch.tensor(
+ [self._key_map[elem] for elem in x],
+ dtype=torch.long,
+ device=next(self.parameters()).device,
+ )
+ except StopIteration:
+ raise ValueError("Not enough n_instances in the autodecoder") from None
+
+ return self._autodecoder_codes(x)
+
+ def _load_key_map_hook(
+ self,
+ state_dict,
+ prefix,
+ local_metadata,
+ strict,
+ missing_keys,
+ unexpected_keys,
+ error_msgs,
+ ):
+ """
+ Args:
+ state_dict (dict): a dict containing parameters and
+ persistent buffers.
+ prefix (str): the prefix for parameters and buffers used in this
+ module
+ local_metadata (dict): a dict containing the metadata for this module.
+ strict (bool): whether to strictly enforce that the keys in
+ :attr:`state_dict` with :attr:`prefix` match the names of
+ parameters and buffers in this module
+ missing_keys (list of str): if ``strict=True``, add missing keys to
+ this list
+ unexpected_keys (list of str): if ``strict=True``, add unexpected
+ keys to this list
+ error_msgs (list of str): error messages should be added to this
+ list, and will be reported together in
+ :meth:`~torch.nn.Module.load_state_dict`
+
+ Returns:
+ Constructed key_map if it exists in the state_dict
+ else raises a warning only.
+ """
+ key_map_key = prefix + "_key_map"
+ if key_map_key in state_dict:
+ key_map_dict = state_dict.pop(key_map_key)
+ self._key_map = self._build_key_map(key_map_dict=key_map_dict)
+ else:
+ warnings.warn("No key map in Autodecoder state dict!")
+
+
+def _save_key_map_hook(
+ self,
+ state_dict,
+ prefix,
+ local_metadata,
+) -> None:
+ """
+ Args:
+ state_dict (dict): a dict containing parameters and
+ persistent buffers.
+ prefix (str): the prefix for parameters and buffers used in this
+ module
+ local_metadata (dict): a dict containing the metadata for this module.
+ """
+ key_map_key = prefix + "_key_map"
+ key_map_dict = dict(self._key_map.items())
+ state_dict[key_map_key] = key_map_dict
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c0f2b9b419c9b3a3f37d949ae71faae23ad82ca
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py
@@ -0,0 +1,128 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import List, Optional, Union
+
+import torch
+from pytorch3d.implicitron.tools.config import (
+ registry,
+ ReplaceableBase,
+ run_auto_creation,
+)
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+
+from .autodecoder import Autodecoder
+
+
+class GlobalEncoderBase(ReplaceableBase):
+ """
+ A base class for implementing encoders of global frame-specific quantities.
+
+ The latter includes e.g. the harmonic encoding of a frame timestamp
+ (`HarmonicTimeEncoder`), or an autodecoder encoding of the frame's sequence
+ (`SequenceAutodecoder`).
+ """
+
+ def get_encoding_dim(self):
+ """
+ Returns the dimensionality of the returned encoding.
+ """
+ raise NotImplementedError()
+
+ def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]:
+ """
+ Calculates the squared norm of the encoding to report as the
+ `autodecoder_norm` loss of the model, as a zero dimensional tensor.
+ """
+ raise NotImplementedError()
+
+ def forward(
+ self,
+ *,
+ frame_timestamp: Optional[torch.Tensor] = None,
+ sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ Given a set of inputs to encode, generates a tensor containing the encoding.
+
+ Returns:
+ encoding: The tensor containing the global encoding.
+ """
+ raise NotImplementedError()
+
+
+# TODO: probabilistic embeddings?
+@registry.register
+class SequenceAutodecoder(GlobalEncoderBase, torch.nn.Module):
+ """
+ A global encoder implementation which provides an autodecoder encoding
+ of the frame's sequence identifier.
+ """
+
+ # pyre-fixme[13]: Attribute `autodecoder` is never initialized.
+ autodecoder: Autodecoder
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def get_encoding_dim(self):
+ return self.autodecoder.get_encoding_dim()
+
+ def forward(
+ self,
+ *,
+ frame_timestamp: Optional[torch.Tensor] = None,
+ sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ if sequence_name is None:
+ raise ValueError("sequence_name must be provided.")
+ # run dtype checks and pass sequence_name to self.autodecoder
+ return self.autodecoder(sequence_name)
+
+ def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]:
+ return self.autodecoder.calculate_squared_encoding_norm()
+
+
+@registry.register
+class HarmonicTimeEncoder(GlobalEncoderBase, torch.nn.Module):
+ """
+ A global encoder implementation which provides harmonic embeddings
+ of each frame's timestamp.
+ """
+
+ n_harmonic_functions: int = 10
+ append_input: bool = True
+ time_divisor: float = 1.0
+
+ def __post_init__(self):
+ self._harmonic_embedding = HarmonicEmbedding(
+ n_harmonic_functions=self.n_harmonic_functions,
+ append_input=self.append_input,
+ )
+
+ def get_encoding_dim(self):
+ return self._harmonic_embedding.get_output_dim(1)
+
+ def forward(
+ self,
+ *,
+ frame_timestamp: Optional[torch.Tensor] = None,
+ sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ if frame_timestamp is None:
+ raise ValueError("frame_timestamp must be provided.")
+ if frame_timestamp.shape[-1] != 1:
+ raise ValueError("Frame timestamp's last dimensions should be one.")
+ time = frame_timestamp / self.time_divisor
+ return self._harmonic_embedding(time)
+
+ def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]:
+ return None
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/base.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8d182dd6ee1fb1d41b87af27529902053c7f3dd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/base.py
@@ -0,0 +1,51 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from abc import ABC, abstractmethod
+from typing import Optional
+
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+
+from pytorch3d.implicitron.tools.config import ReplaceableBase
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+class ImplicitFunctionBase(ABC, ReplaceableBase):
+ @abstractmethod
+ def forward(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ **kwargs,
+ ):
+ raise NotImplementedError()
+
+ @staticmethod
+ def allows_multiple_passes() -> bool:
+ """
+ Returns True if this implicit function allows
+ multiple passes.
+ """
+ return False
+
+ @staticmethod
+ def requires_pooling_without_aggregation() -> bool:
+ """
+ Returns True if this implicit function needs
+ pooling without aggregation.
+ """
+ return False
+
+ def on_bind_args(self) -> None:
+ """
+ Called when the custom args are fixed in the main model forward pass.
+ """
+ pass
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/decoding_functions.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/decoding_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4c125e8b56ec8188efa72ba03cdb4964fd2d7c3
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/decoding_functions.py
@@ -0,0 +1,491 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+"""
+This file contains
+ - modules which get used by ImplicitFunction objects for decoding an embedding defined in
+ space, e.g. to color or opacity.
+ - DecoderFunctionBase and its subclasses, which wrap some of those modules, providing
+ some such modules as an extension point which an ImplicitFunction object could use.
+"""
+
+import logging
+from dataclasses import field
+
+from enum import Enum
+from typing import Dict, Optional, Tuple
+
+import torch
+
+from omegaconf import DictConfig
+
+from pytorch3d.implicitron.tools.config import (
+ Configurable,
+ registry,
+ ReplaceableBase,
+ run_auto_creation,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class DecoderActivation(Enum):
+ RELU = "relu"
+ SOFTPLUS = "softplus"
+ SIGMOID = "sigmoid"
+ IDENTITY = "identity"
+
+
+class DecoderFunctionBase(ReplaceableBase, torch.nn.Module):
+ """
+ Decoding function is a torch.nn.Module which takes the embedding of a location in
+ space and transforms it into the required quantity (for example density and color).
+ """
+
+ def forward(
+ self, features: torch.Tensor, z: Optional[torch.Tensor] = None
+ ) -> torch.Tensor:
+ """
+ Args:
+ features (torch.Tensor): tensor of shape (batch, ..., num_in_features)
+ z: optional tensor to append to parts of the decoding function
+ Returns:
+ decoded_features (torch.Tensor) : tensor of
+ shape (batch, ..., num_out_features)
+ """
+ raise NotImplementedError()
+
+
+@registry.register
+class ElementwiseDecoder(DecoderFunctionBase):
+ """
+ Decoding function which scales the input, adds shift and then applies
+ `relu`, `softplus`, `sigmoid` or nothing on its input:
+ `result = operation(input * scale + shift)`
+
+ Members:
+ scale: a scalar with which input is multiplied before being shifted.
+ Defaults to 1.
+ shift: a scalar which is added to the scaled input before performing
+ the operation. Defaults to 0.
+ operation: which operation to perform on the transformed input. Options are:
+ `RELU`, `SOFTPLUS`, `SIGMOID` or `IDENTITY`. Defaults to `IDENTITY`.
+ """
+
+ scale: float = 1
+ shift: float = 0
+ operation: DecoderActivation = DecoderActivation.IDENTITY
+
+ def __post_init__(self):
+ if self.operation not in [
+ DecoderActivation.RELU,
+ DecoderActivation.SOFTPLUS,
+ DecoderActivation.SIGMOID,
+ DecoderActivation.IDENTITY,
+ ]:
+ raise ValueError(
+ "`operation` can only be `RELU`, `SOFTPLUS`, `SIGMOID` or `IDENTITY`."
+ )
+
+ def forward(
+ self, features: torch.Tensor, z: Optional[torch.Tensor] = None
+ ) -> torch.Tensor:
+ transfomed_input = features * self.scale + self.shift
+ if self.operation == DecoderActivation.SOFTPLUS:
+ return torch.nn.functional.softplus(transfomed_input)
+ if self.operation == DecoderActivation.RELU:
+ return torch.nn.functional.relu(transfomed_input)
+ if self.operation == DecoderActivation.SIGMOID:
+ return torch.nn.functional.sigmoid(transfomed_input)
+ return transfomed_input
+
+
+class MLPWithInputSkips(Configurable, torch.nn.Module):
+ """
+ Implements the multi-layer perceptron architecture of the Neural Radiance Field.
+
+ As such, `MLPWithInputSkips` is a multi layer perceptron consisting
+ of a sequence of linear layers with ReLU activations.
+
+ Additionally, for a set of predefined layers `input_skips`, the forward pass
+ appends a skip tensor `z` to the output of the preceding layer.
+
+ Note that this follows the architecture described in the Supplementary
+ Material (Fig. 7) of [1], for which keep the defaults for:
+ - `last_layer_bias_init` to None
+ - `last_activation` to "relu"
+ - `use_xavier_init` to `true`
+
+ If you want to use this as a part of the color prediction in TensoRF model set:
+ - `last_layer_bias_init` to 0
+ - `last_activation` to "sigmoid"
+ - `use_xavier_init` to `False`
+
+ References:
+ [1] Ben Mildenhall and Pratul P. Srinivasan and Matthew Tancik
+ and Jonathan T. Barron and Ravi Ramamoorthi and Ren Ng:
+ NeRF: Representing Scenes as Neural Radiance Fields for View
+ Synthesis, ECCV2020
+
+ Members:
+ n_layers: The number of linear layers of the MLP.
+ input_dim: The number of channels of the input tensor.
+ output_dim: The number of channels of the output.
+ skip_dim: The number of channels of the tensor `z` appended when
+ evaluating the skip layers.
+ hidden_dim: The number of hidden units of the MLP.
+ input_skips: The list of layer indices at which we append the skip
+ tensor `z`.
+ last_layer_bias_init: If set then all the biases in the last layer
+ are initialized to that value.
+ last_activation: Which activation to use in the last layer. Options are:
+ "relu", "softplus", "sigmoid" and "identity". Default is "relu".
+ use_xavier_init: If True uses xavier init for all linear layer weights.
+ Otherwise the default PyTorch initialization is used. Default True.
+ """
+
+ n_layers: int = 8
+ input_dim: int = 39
+ output_dim: int = 256
+ skip_dim: int = 39
+ hidden_dim: int = 256
+ input_skips: Tuple[int, ...] = (5,)
+ skip_affine_trans: bool = False
+ last_layer_bias_init: Optional[float] = None
+ last_activation: DecoderActivation = DecoderActivation.RELU
+ use_xavier_init: bool = True
+
+ def __post_init__(self):
+ try:
+ last_activation = {
+ DecoderActivation.RELU: torch.nn.ReLU(True),
+ DecoderActivation.SOFTPLUS: torch.nn.Softplus(),
+ DecoderActivation.SIGMOID: torch.nn.Sigmoid(),
+ DecoderActivation.IDENTITY: torch.nn.Identity(),
+ }[self.last_activation]
+ except KeyError as e:
+ raise ValueError(
+ "`last_activation` can only be `RELU`,"
+ " `SOFTPLUS`, `SIGMOID` or `IDENTITY`."
+ ) from e
+
+ layers = []
+ skip_affine_layers = []
+ for layeri in range(self.n_layers):
+ dimin = self.hidden_dim if layeri > 0 else self.input_dim
+ dimout = self.hidden_dim if layeri + 1 < self.n_layers else self.output_dim
+
+ if layeri > 0 and layeri in self.input_skips:
+ if self.skip_affine_trans:
+ skip_affine_layers.append(
+ self._make_affine_layer(self.skip_dim, self.hidden_dim)
+ )
+ else:
+ dimin = self.hidden_dim + self.skip_dim
+
+ linear = torch.nn.Linear(dimin, dimout)
+ if self.use_xavier_init:
+ _xavier_init(linear)
+ if layeri == self.n_layers - 1 and self.last_layer_bias_init is not None:
+ torch.nn.init.constant_(linear.bias, self.last_layer_bias_init)
+ layers.append(
+ torch.nn.Sequential(linear, torch.nn.ReLU(True))
+ if not layeri + 1 < self.n_layers
+ else torch.nn.Sequential(linear, last_activation)
+ )
+ self.mlp = torch.nn.ModuleList(layers)
+ if self.skip_affine_trans:
+ self.skip_affines = torch.nn.ModuleList(skip_affine_layers)
+ self._input_skips = set(self.input_skips)
+ self._skip_affine_trans = self.skip_affine_trans
+
+ def _make_affine_layer(self, input_dim, hidden_dim):
+ l1 = torch.nn.Linear(input_dim, hidden_dim * 2)
+ l2 = torch.nn.Linear(hidden_dim * 2, hidden_dim * 2)
+ if self.use_xavier_init:
+ _xavier_init(l1)
+ _xavier_init(l2)
+ return torch.nn.Sequential(l1, torch.nn.ReLU(True), l2)
+
+ def _apply_affine_layer(self, layer, x, z):
+ mu_log_std = layer(z)
+ mu, log_std = mu_log_std.split(mu_log_std.shape[-1] // 2, dim=-1)
+ std = torch.nn.functional.softplus(log_std)
+ return (x - mu) * std
+
+ def forward(self, x: torch.Tensor, z: Optional[torch.Tensor] = None):
+ """
+ Args:
+ x: The input tensor of shape `(..., input_dim)`.
+ z: The input skip tensor of shape `(..., skip_dim)` which is appended
+ to layers whose indices are specified by `input_skips`.
+ Returns:
+ y: The output tensor of shape `(..., output_dim)`.
+ """
+ y = x
+ if z is None:
+ # if the skip tensor is None, we use `x` instead.
+ z = x
+ skipi = 0
+ for li, layer in enumerate(self.mlp):
+ if li in self._input_skips:
+ if self._skip_affine_trans:
+ y = self._apply_affine_layer(self.skip_affines[skipi], y, z)
+ else:
+ y = torch.cat((y, z), dim=-1)
+ skipi += 1
+ y = layer(y)
+ return y
+
+
+@registry.register
+class MLPDecoder(DecoderFunctionBase):
+ """
+ Decoding function which uses `MLPWithIputSkips` to convert the embedding to output.
+ The `input_dim` of the `network` is set from the value of `input_dim` member.
+
+ Members:
+ input_dim: dimension of input.
+ param_groups: dictionary where keys are names of individual parameters
+ or module members and values are the parameter group where the
+ parameter/member will be sorted to. "self" key is used to denote the
+ parameter group at the module level. Possible keys, including the "self" key
+ do not have to be defined. By default all parameters are put into "default"
+ parameter group and have the learning rate defined in the optimizer,
+ it can be overridden at the:
+ - module level with “self” key, all the parameters and child
+ module's parameters will be put to that parameter group
+ - member level, which is the same as if the `param_groups` in that
+ member has key=“self” and value equal to that parameter group.
+ This is useful if members do not have `param_groups`, for
+ example torch.nn.Linear.
+ - parameter level, parameter with the same name as the key
+ will be put to that parameter group.
+ network_args: configuration for MLPWithInputSkips
+ """
+
+ input_dim: int = 3
+ param_groups: Dict[str, str] = field(default_factory=lambda: {})
+ # pyre-fixme[13]: Attribute `network` is never initialized.
+ network: MLPWithInputSkips
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def forward(
+ self, features: torch.Tensor, z: Optional[torch.Tensor] = None
+ ) -> torch.Tensor:
+ return self.network(features, z)
+
+ @classmethod
+ def network_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ Special method to stop get_default_args exposing member's `input_dim`.
+ """
+ args.pop("input_dim", None)
+
+ def create_network_impl(self, type, args: DictConfig) -> None:
+ """
+ Set the input dimension of the `network` to the input dimension of the
+ decoding function.
+ """
+ self.network = MLPWithInputSkips(input_dim=self.input_dim, **args)
+
+
+class TransformerWithInputSkips(torch.nn.Module):
+ def __init__(
+ self,
+ n_layers: int = 8,
+ input_dim: int = 39,
+ output_dim: int = 256,
+ skip_dim: int = 39,
+ hidden_dim: int = 64,
+ input_skips: Tuple[int, ...] = (5,),
+ dim_down_factor: float = 1,
+ ):
+ """
+ Args:
+ n_layers: The number of linear layers of the MLP.
+ input_dim: The number of channels of the input tensor.
+ output_dim: The number of channels of the output.
+ skip_dim: The number of channels of the tensor `z` appended when
+ evaluating the skip layers.
+ hidden_dim: The number of hidden units of the MLP.
+ input_skips: The list of layer indices at which we append the skip
+ tensor `z`.
+ """
+ super().__init__()
+
+ self.first = torch.nn.Linear(input_dim, hidden_dim)
+ _xavier_init(self.first)
+
+ self.skip_linear = torch.nn.ModuleList()
+
+ layers_pool, layers_ray = [], []
+ dimout = 0
+ for layeri in range(n_layers):
+ dimin = int(round(hidden_dim / (dim_down_factor**layeri)))
+ dimout = int(round(hidden_dim / (dim_down_factor ** (layeri + 1))))
+ logger.info(f"Tr: {dimin} -> {dimout}")
+ for _i, l in enumerate((layers_pool, layers_ray)):
+ l.append(
+ TransformerEncoderLayer(
+ d_model=[dimin, dimout][_i],
+ nhead=4,
+ dim_feedforward=hidden_dim,
+ dropout=0.0,
+ d_model_out=dimout,
+ )
+ )
+
+ if layeri in input_skips:
+ self.skip_linear.append(torch.nn.Linear(input_dim, dimin))
+
+ self.last = torch.nn.Linear(dimout, output_dim)
+ _xavier_init(self.last)
+
+ # pyre-fixme[8]: Attribute has type `Tuple[ModuleList, ModuleList]`; used as
+ # `ModuleList`.
+ self.layers_pool, self.layers_ray = (
+ torch.nn.ModuleList(layers_pool),
+ torch.nn.ModuleList(layers_ray),
+ )
+ self._input_skips = set(input_skips)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ z: Optional[torch.Tensor] = None,
+ ):
+ """
+ Args:
+ x: The input tensor of shape
+ `(minibatch, n_pooled_feats, ..., n_ray_pts, input_dim)`.
+ z: The input skip tensor of shape
+ `(minibatch, n_pooled_feats, ..., n_ray_pts, skip_dim)`
+ which is appended to layers whose indices are specified by `input_skips`.
+ Returns:
+ y: The output tensor of shape
+ `(minibatch, 1, ..., n_ray_pts, input_dim)`.
+ """
+
+ if z is None:
+ # if the skip tensor is None, we use `x` instead.
+ z = x
+
+ y = self.first(x)
+
+ B, n_pool, n_rays, n_pts, dim = y.shape
+
+ # y_p in n_pool, n_pts, B x n_rays x dim
+ y_p = y.permute(1, 3, 0, 2, 4)
+
+ skipi = 0
+ dimh = dim
+ for li, (layer_pool, layer_ray) in enumerate(
+ zip(self.layers_pool, self.layers_ray)
+ ):
+ y_pool_attn = y_p.reshape(n_pool, n_pts * B * n_rays, dimh)
+ if li in self._input_skips:
+ z_skip = self.skip_linear[skipi](z)
+ y_pool_attn = y_pool_attn + z_skip.permute(1, 3, 0, 2, 4).reshape(
+ n_pool, n_pts * B * n_rays, dimh
+ )
+ skipi += 1
+ # n_pool x B*n_rays*n_pts x dim
+ y_pool_attn, pool_attn = layer_pool(y_pool_attn, src_key_padding_mask=None)
+ dimh = y_pool_attn.shape[-1]
+
+ y_ray_attn = (
+ y_pool_attn.view(n_pool, n_pts, B * n_rays, dimh)
+ .permute(1, 0, 2, 3)
+ .reshape(n_pts, n_pool * B * n_rays, dimh)
+ )
+ # n_pts x n_pool*B*n_rays x dim
+ y_ray_attn, ray_attn = layer_ray(
+ y_ray_attn,
+ src_key_padding_mask=None,
+ )
+
+ y_p = y_ray_attn.view(n_pts, n_pool, B * n_rays, dimh).permute(1, 0, 2, 3)
+
+ y = y_p.view(n_pool, n_pts, B, n_rays, dimh).permute(2, 0, 3, 1, 4)
+
+ W = torch.softmax(y[..., :1], dim=1)
+ y = (y * W).sum(dim=1)
+ y = self.last(y)
+
+ return y
+
+
+class TransformerEncoderLayer(torch.nn.Module):
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
+ This standard encoder layer is based on the paper "Attention Is All You Need".
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
+ in a different way during application.
+
+ Args:
+ d_model: the number of expected features in the input (required).
+ nhead: the number of heads in the multiheadattention models (required).
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
+ dropout: the dropout value (default=0.1).
+ activation: the activation function of intermediate layer, relu or gelu (default=relu).
+
+ Examples::
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
+ >>> src = torch.rand(10, 32, 512)
+ >>> out = encoder_layer(src)
+ """
+
+ def __init__(
+ self, d_model, nhead, dim_feedforward=2048, dropout=0.1, d_model_out=-1
+ ):
+ super(TransformerEncoderLayer, self).__init__()
+ self.self_attn = torch.nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
+ self.dropout = torch.nn.Dropout(dropout)
+ d_model_out = d_model if d_model_out <= 0 else d_model_out
+ self.linear2 = torch.nn.Linear(dim_feedforward, d_model_out)
+ self.norm1 = torch.nn.LayerNorm(d_model)
+ self.norm2 = torch.nn.LayerNorm(d_model_out)
+ self.dropout1 = torch.nn.Dropout(dropout)
+ self.dropout2 = torch.nn.Dropout(dropout)
+
+ self.activation = torch.nn.functional.relu
+
+ def forward(self, src, src_mask=None, src_key_padding_mask=None):
+ r"""Pass the input through the encoder layer.
+
+ Args:
+ src: the sequence to the encoder layer (required).
+ src_mask: the mask for the src sequence (optional).
+ src_key_padding_mask: the mask for the src keys per batch (optional).
+
+ Shape:
+ see the docs in Transformer class.
+ """
+ src2, attn = self.self_attn(
+ src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
+ )
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ d_out = src2.shape[-1]
+ src = src[..., :d_out] + self.dropout2(src2)[..., :d_out]
+ src = self.norm2(src)
+ return src, attn
+
+
+def _xavier_init(linear) -> None:
+ """
+ Performs the Xavier weight initialization of the linear layer `linear`.
+ """
+ torch.nn.init.xavier_uniform_(linear.weight.data)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py
new file mode 100644
index 0000000000000000000000000000000000000000..be98b878489872b3e43a69b1956f3e59f02e1f03
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py
@@ -0,0 +1,176 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from https://github.com/lioryariv/idr/blob/main/code/model/
+# implicit_differentiable_renderer.py
+# Copyright (c) 2020 Lior Yariv
+
+# pyre-unsafe
+import math
+from typing import Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import registry
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+
+from torch import nn
+
+from .base import ImplicitFunctionBase
+from .utils import get_rays_points_world
+
+
+@registry.register
+class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module):
+ """
+ Implicit function as used in http://github.com/lioryariv/idr.
+
+ Members:
+ d_in: dimension of the input point.
+ n_harmonic_functions_xyz: If -1, do not embed the point.
+ If >=0, use a harmonic embedding with this number of
+ harmonic functions. (The harmonic embedding includes the input
+ itself, so a value of 0 means the point is used but without
+ any harmonic functions.)
+ d_out and feature_vector_size: Sum of these is the output
+ dimension. This implicit function thus returns a concatenation
+ of `d_out` signed distance function values and `feature_vector_size`
+ features (such as colors). When used in `GenericModel`,
+ `feature_vector_size` corresponds is automatically set to
+ `render_features_dimensions`.
+ dims: list of hidden layer sizes.
+ geometric_init: whether to use custom weight initialization
+ in linear layers. If False, pytorch default (uniform sampling)
+ is used.
+ bias: if geometric_init=True, initial value for bias subtracted
+ in the last layer.
+ skip_in: List of indices of layers that receive as input the initial
+ value concatenated with the output of the previous layers.
+ weight_norm: whether to apply weight normalization to each layer.
+ pooled_feature_dim: If view pooling is in use (provided as
+ fun_viewpool to forward()) this must be its number of features.
+ Otherwise this must be set to 0. (If used from GenericModel,
+ this config value will be overridden automatically.)
+ encoding_dim: If global coding is in use (provided as global_code
+ to forward()) this must be its number of featuress.
+ Otherwise this must be set to 0. (If used from GenericModel,
+ this config value will be overridden automatically.)
+ """
+
+ feature_vector_size: int = 3
+ d_in: int = 3
+ d_out: int = 1
+ dims: Tuple[int, ...] = (512, 512, 512, 512, 512, 512, 512, 512)
+ geometric_init: bool = True
+ bias: float = 1.0
+ skip_in: Tuple[int, ...] = ()
+ weight_norm: bool = True
+ n_harmonic_functions_xyz: int = 0
+ pooled_feature_dim: int = 0
+ encoding_dim: int = 0
+
+ def __post_init__(self):
+ dims = [self.d_in] + list(self.dims) + [self.d_out + self.feature_vector_size]
+
+ self.embed_fn = None
+ if self.n_harmonic_functions_xyz >= 0:
+ self.embed_fn = HarmonicEmbedding(
+ self.n_harmonic_functions_xyz, append_input=True
+ )
+ dims[0] = self.embed_fn.get_output_dim()
+ if self.pooled_feature_dim > 0:
+ dims[0] += self.pooled_feature_dim
+ if self.encoding_dim > 0:
+ dims[0] += self.encoding_dim
+
+ self.num_layers = len(dims)
+
+ out_dim = 0
+ layers = []
+ for layer_idx in range(self.num_layers - 1):
+ if layer_idx + 1 in self.skip_in:
+ out_dim = dims[layer_idx + 1] - dims[0]
+ else:
+ out_dim = dims[layer_idx + 1]
+
+ lin = nn.Linear(dims[layer_idx], out_dim)
+
+ if self.geometric_init:
+ if layer_idx == self.num_layers - 2:
+ torch.nn.init.normal_(
+ lin.weight,
+ mean=math.pi**0.5 / dims[layer_idx] ** 0.5,
+ std=0.0001,
+ )
+ torch.nn.init.constant_(lin.bias, -self.bias)
+ elif self.n_harmonic_functions_xyz >= 0 and layer_idx == 0:
+ torch.nn.init.constant_(lin.bias, 0.0)
+ torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
+ torch.nn.init.normal_(lin.weight[:, :3], 0.0, 2**0.5 / out_dim**0.5)
+ elif self.n_harmonic_functions_xyz >= 0 and layer_idx in self.skip_in:
+ torch.nn.init.constant_(lin.bias, 0.0)
+ torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5)
+ torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0)
+ else:
+ torch.nn.init.constant_(lin.bias, 0.0)
+ torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5)
+
+ if self.weight_norm:
+ lin = nn.utils.weight_norm(lin)
+
+ layers.append(lin)
+
+ self.linear_layers = torch.nn.ModuleList(layers)
+ self.out_dim = out_dim
+ self.softplus = nn.Softplus(beta=100)
+
+ # pyre-fixme[14]: `forward` overrides method defined in `ImplicitFunctionBase`
+ # inconsistently.
+ def forward(
+ self,
+ *,
+ ray_bundle: Optional[ImplicitronRayBundle] = None,
+ rays_points_world: Optional[torch.Tensor] = None,
+ fun_viewpool=None,
+ global_code=None,
+ **kwargs,
+ ):
+ # this field only uses point locations
+ # rays_points_world.shape = [minibatch x ... x pts_per_ray x 3]
+ rays_points_world = get_rays_points_world(ray_bundle, rays_points_world)
+
+ if rays_points_world.numel() == 0 or (
+ self.embed_fn is None and fun_viewpool is None and global_code is None
+ ):
+ return torch.tensor(
+ [], device=rays_points_world.device, dtype=rays_points_world.dtype
+ ).view(0, self.out_dim)
+
+ embeddings = []
+ if self.embed_fn is not None:
+ embeddings.append(self.embed_fn(rays_points_world))
+
+ if fun_viewpool is not None:
+ assert rays_points_world.ndim == 2
+ pooled_feature = fun_viewpool(rays_points_world[None])
+ # TODO: pooled features are 4D!
+ embeddings.append(pooled_feature)
+
+ if global_code is not None:
+ assert global_code.shape[0] == 1 # TODO: generalize to batches!
+ # This will require changing raytracer code
+ # embedding = embedding[None].expand(global_code.shape[0], *embedding.shape)
+ embeddings.append(
+ global_code[0, None, :].expand(rays_points_world.shape[0], -1)
+ )
+
+ embedding = torch.cat(embeddings, dim=-1)
+ x = embedding
+ for layer_idx in range(self.num_layers - 1):
+ if layer_idx in self.skip_in:
+ x = torch.cat([x, embedding], dim=-1) / 2**0.5
+
+ x = self.linear_layers[layer_idx](x)
+
+ if layer_idx < self.num_layers - 2:
+ x = self.softplus(x)
+
+ return x
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py
new file mode 100644
index 0000000000000000000000000000000000000000..00f3aa65f2ef42012071cc85bffeaf39e241bb2b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py
@@ -0,0 +1,275 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import logging
+from typing import Optional, Tuple
+
+import torch
+from pytorch3d.common.linear_with_repeat import LinearWithRepeat
+from pytorch3d.implicitron.models.renderer.base import (
+ conical_frustum_to_gaussian,
+ ImplicitronRayBundle,
+)
+from pytorch3d.implicitron.tools.config import expand_args_fields, registry
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+from pytorch3d.renderer.implicit.utils import ray_bundle_to_ray_points
+
+from .base import ImplicitFunctionBase
+
+from .decoding_functions import ( # noqa
+ _xavier_init,
+ MLPWithInputSkips,
+ TransformerWithInputSkips,
+)
+from .utils import create_embeddings_for_implicit_function
+
+
+logger = logging.getLogger(__name__)
+
+
+class NeuralRadianceFieldBase(ImplicitFunctionBase, torch.nn.Module):
+ n_harmonic_functions_xyz: int = 10
+ n_harmonic_functions_dir: int = 4
+ n_hidden_neurons_dir: int = 128
+ latent_dim: int = 0
+ input_xyz: bool = True
+ xyz_ray_dir_in_camera_coords: bool = False
+ color_dim: int = 3
+ use_integrated_positional_encoding: bool = False
+ """
+ Args:
+ n_harmonic_functions_xyz: The number of harmonic functions
+ used to form the harmonic embedding of 3D point locations.
+ n_harmonic_functions_dir: The number of harmonic functions
+ used to form the harmonic embedding of the ray directions.
+ n_hidden_neurons_xyz: The number of hidden units in the
+ fully connected layers of the MLP that accepts the 3D point
+ locations and outputs the occupancy field with the intermediate
+ features.
+ n_hidden_neurons_dir: The number of hidden units in the
+ fully connected layers of the MLP that accepts the intermediate
+ features and ray directions and outputs the radiance field
+ (per-point colors).
+ n_layers_xyz: The number of layers of the MLP that outputs the
+ occupancy field.
+ append_xyz: The list of indices of the skip layers of the occupancy MLP.
+ use_integrated_positional_encoding: If True, use integrated positional enoding
+ as defined in `MIP-NeRF `_.
+ If False, use the classical harmonic embedding
+ defined in `NeRF `_.
+ """
+
+ def __post_init__(self):
+ # The harmonic embedding layer converts input 3D coordinates
+ # to a representation that is more suitable for
+ # processing with a deep neural network.
+ self.harmonic_embedding_xyz = HarmonicEmbedding(
+ self.n_harmonic_functions_xyz, append_input=True
+ )
+ self.harmonic_embedding_dir = HarmonicEmbedding(
+ self.n_harmonic_functions_dir, append_input=True
+ )
+ if not self.input_xyz and self.latent_dim <= 0:
+ raise ValueError("The latent dimension has to be > 0 if xyz is not input!")
+
+ embedding_dim_dir = self.harmonic_embedding_dir.get_output_dim()
+
+ self.xyz_encoder = self._construct_xyz_encoder(
+ input_dim=self.get_xyz_embedding_dim()
+ )
+
+ self.intermediate_linear = torch.nn.Linear(
+ self.n_hidden_neurons_xyz, self.n_hidden_neurons_xyz
+ )
+ _xavier_init(self.intermediate_linear)
+
+ self.density_layer = torch.nn.Linear(self.n_hidden_neurons_xyz, 1)
+ _xavier_init(self.density_layer)
+
+ # Zero the bias of the density layer to avoid
+ # a completely transparent initialization.
+ self.density_layer.bias.data[:] = 0.0 # fixme: Sometimes this is not enough
+
+ self.color_layer = torch.nn.Sequential(
+ LinearWithRepeat(
+ self.n_hidden_neurons_xyz + embedding_dim_dir, self.n_hidden_neurons_dir
+ ),
+ torch.nn.ReLU(True),
+ torch.nn.Linear(self.n_hidden_neurons_dir, self.color_dim),
+ torch.nn.Sigmoid(),
+ )
+
+ def get_xyz_embedding_dim(self):
+ return (
+ self.harmonic_embedding_xyz.get_output_dim() * int(self.input_xyz)
+ + self.latent_dim
+ )
+
+ def _construct_xyz_encoder(self, input_dim: int):
+ raise NotImplementedError()
+
+ def _get_colors(self, features: torch.Tensor, rays_directions: torch.Tensor):
+ """
+ This function takes per-point `features` predicted by `self.xyz_encoder`
+ and evaluates the color model in order to attach to each
+ point a 3D vector of its RGB color.
+ """
+ # Normalize the ray_directions to unit l2 norm.
+ rays_directions_normed = torch.nn.functional.normalize(rays_directions, dim=-1)
+ # Obtain the harmonic embedding of the normalized ray directions.
+ rays_embedding = self.harmonic_embedding_dir(rays_directions_normed)
+
+ return self.color_layer((self.intermediate_linear(features), rays_embedding))
+
+ @staticmethod
+ def allows_multiple_passes() -> bool:
+ """
+ Returns True as this implicit function allows
+ multiple passes. Overridden from ImplicitFunctionBase.
+ """
+ return True
+
+ def forward(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ **kwargs,
+ ):
+ """
+ The forward function accepts the parametrizations of
+ 3D points sampled along projection rays. The forward
+ pass is responsible for attaching a 3D vector
+ and a 1D scalar representing the point's
+ RGB color and opacity respectively.
+
+ Args:
+ ray_bundle: An ImplicitronRayBundle object containing the following variables:
+ origins: A tensor of shape `(minibatch, ..., 3)` denoting the
+ origins of the sampling rays in world coords.
+ directions: A tensor of shape `(minibatch, ..., 3)`
+ containing the direction vectors of sampling rays in world coords.
+ lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
+ containing the lengths at which the rays are sampled.
+ bins: An optional tensor of shape `(minibatch,..., num_points_per_ray + 1)`
+ containing the bins at which the rays are sampled. In this case
+ lengths is equal to the midpoints of bins.
+
+ fun_viewpool: an optional callback with the signature
+ fun_fiewpool(points) -> pooled_features
+ where points is a [N_TGT x N x 3] tensor of world coords,
+ and pooled_features is a [N_TGT x ... x N_SRC x latent_dim] tensor
+ of the features pooled from the context images.
+
+ Returns:
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
+ denoting the opacitiy of each ray point.
+ rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
+ denoting the color of each ray point.
+
+ Raises:
+ ValueError: If `use_integrated_positional_encoding` is True and
+ `ray_bundle.bins` is None.
+ """
+ if self.use_integrated_positional_encoding and ray_bundle.bins is None:
+ raise ValueError(
+ "When use_integrated_positional_encoding is True, ray_bundle.bins must be set."
+ "Have you set to True `AbstractMaskRaySampler.use_bins_for_ray_sampling`?"
+ )
+
+ rays_points_world, diag_cov = (
+ conical_frustum_to_gaussian(ray_bundle)
+ if self.use_integrated_positional_encoding
+ else (ray_bundle_to_ray_points(ray_bundle), None) # pyre-ignore
+ )
+ # rays_points_world.shape = [minibatch x ... x pts_per_ray x 3]
+
+ embeds = create_embeddings_for_implicit_function(
+ xyz_world=rays_points_world,
+ # for 2nd param but got `Union[None, torch.Tensor, torch.nn.Module]`.
+ xyz_embedding_function=(
+ self.harmonic_embedding_xyz if self.input_xyz else None
+ ),
+ global_code=global_code,
+ fun_viewpool=fun_viewpool,
+ xyz_in_camera_coords=self.xyz_ray_dir_in_camera_coords,
+ camera=camera,
+ diag_cov=diag_cov,
+ )
+
+ # embeds.shape = [minibatch x n_src x n_rays x n_pts x self.n_harmonic_functions*6+3]
+ features = self.xyz_encoder(embeds)
+ # features.shape = [minibatch x ... x self.n_hidden_neurons_xyz]
+ # NNs operate on the flattenned rays; reshaping to the correct spatial size
+ # TODO: maybe make the transformer work on non-flattened tensors to avoid this reshape
+ features = features.reshape(*rays_points_world.shape[:-1], -1)
+
+ raw_densities = self.density_layer(features)
+ # raw_densities.shape = [minibatch x ... x 1] in [0-1]
+
+ if self.xyz_ray_dir_in_camera_coords:
+ if camera is None:
+ raise ValueError("Camera must be given if xyz_ray_dir_in_camera_coords")
+
+ directions = ray_bundle.directions @ camera.R
+ else:
+ directions = ray_bundle.directions
+
+ rays_colors = self._get_colors(features, directions)
+ # rays_colors.shape = [minibatch x ... x 3] in [0-1]
+
+ return raw_densities, rays_colors, {}
+
+
+@registry.register
+class NeuralRadianceFieldImplicitFunction(NeuralRadianceFieldBase):
+ transformer_dim_down_factor: float = 1.0
+ n_hidden_neurons_xyz: int = 256
+ n_layers_xyz: int = 8
+ append_xyz: Tuple[int, ...] = (5,)
+
+ def _construct_xyz_encoder(self, input_dim: int):
+ expand_args_fields(MLPWithInputSkips)
+ return MLPWithInputSkips(
+ self.n_layers_xyz,
+ input_dim,
+ self.n_hidden_neurons_xyz,
+ input_dim,
+ self.n_hidden_neurons_xyz,
+ input_skips=self.append_xyz,
+ )
+
+
+@registry.register
+class NeRFormerImplicitFunction(NeuralRadianceFieldBase):
+ transformer_dim_down_factor: float = 2.0
+ n_hidden_neurons_xyz: int = 80
+ n_layers_xyz: int = 2
+ append_xyz: Tuple[int, ...] = (1,)
+
+ def _construct_xyz_encoder(self, input_dim: int):
+ return TransformerWithInputSkips(
+ self.n_layers_xyz,
+ input_dim,
+ self.n_hidden_neurons_xyz,
+ input_dim,
+ self.n_hidden_neurons_xyz,
+ input_skips=self.append_xyz,
+ dim_down_factor=self.transformer_dim_down_factor,
+ )
+
+ @staticmethod
+ def requires_pooling_without_aggregation() -> bool:
+ """
+ Returns True as this implicit function needs
+ pooling without aggregation. Overridden from ImplicitFunctionBase.
+ """
+ return True
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/scene_representation_networks.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/scene_representation_networks.py
new file mode 100644
index 0000000000000000000000000000000000000000..35e07992a3f2a0e3f089d47e39304b6632a4b527
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/scene_representation_networks.py
@@ -0,0 +1,431 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from https://github.com/vsitzmann/scene-representation-networks
+# Copyright (c) 2019 Vincent Sitzmann
+
+# pyre-unsafe
+from typing import Any, cast, Optional, Tuple
+
+import torch
+from omegaconf import DictConfig
+from pytorch3d.common.linear_with_repeat import LinearWithRepeat
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.third_party import hyperlayers, pytorch_prototyping
+from pytorch3d.implicitron.tools.config import Configurable, registry, run_auto_creation
+from pytorch3d.renderer import ray_bundle_to_ray_points
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+
+from .base import ImplicitFunctionBase
+from .utils import create_embeddings_for_implicit_function
+
+
+def _kaiming_normal_init(module: torch.nn.Module) -> None:
+ if isinstance(module, (torch.nn.Linear, LinearWithRepeat)):
+ torch.nn.init.kaiming_normal_(
+ module.weight, a=0.0, nonlinearity="relu", mode="fan_in"
+ )
+
+
+class SRNRaymarchFunction(Configurable, torch.nn.Module):
+ n_harmonic_functions: int = 3 # 0 means raw 3D coord inputs
+ n_hidden_units: int = 256
+ n_layers: int = 2
+ in_features: int = 3
+ out_features: int = 256
+ latent_dim: int = 0
+ xyz_in_camera_coords: bool = False
+
+ # The internal network can be set as an output of an SRNHyperNet.
+ # Note that, in order to avoid Pytorch's automatic registering of the
+ # raymarch_function module on construction, we input the network wrapped
+ # as a 1-tuple.
+
+ # raymarch_function should ideally be typed as Optional[Tuple[Callable]]
+ # but Omegaconf.structured doesn't like that. TODO: revisit after new
+ # release of omegaconf including https://github.com/omry/omegaconf/pull/749 .
+ raymarch_function: Any = None
+
+ def __post_init__(self):
+ self._harmonic_embedding = HarmonicEmbedding(
+ self.n_harmonic_functions, append_input=True
+ )
+ input_embedding_dim = (
+ HarmonicEmbedding.get_output_dim_static(
+ self.in_features,
+ self.n_harmonic_functions,
+ True,
+ )
+ + self.latent_dim
+ )
+
+ if self.raymarch_function is not None:
+ self._net = self.raymarch_function[0]
+ else:
+ self._net = pytorch_prototyping.FCBlock(
+ hidden_ch=self.n_hidden_units,
+ num_hidden_layers=self.n_layers,
+ in_features=input_embedding_dim,
+ out_features=self.out_features,
+ )
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ **kwargs,
+ ):
+ """
+ Args:
+ ray_bundle: An ImplicitronRayBundle object containing the following variables:
+ origins: A tensor of shape `(minibatch, ..., 3)` denoting the
+ origins of the sampling rays in world coords.
+ directions: A tensor of shape `(minibatch, ..., 3)`
+ containing the direction vectors of sampling rays in world coords.
+ lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
+ containing the lengths at which the rays are sampled.
+ fun_viewpool: an optional callback with the signature
+ fun_fiewpool(points) -> pooled_features
+ where points is a [N_TGT x N x 3] tensor of world coords,
+ and pooled_features is a [N_TGT x ... x N_SRC x latent_dim] tensor
+ of the features pooled from the context images.
+
+ Returns:
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
+ denoting the opacitiy of each ray point.
+ rays_colors: Set to None.
+ """
+ # We first convert the ray parametrizations to world
+ # coordinates with `ray_bundle_to_ray_points`.
+ # pyre-ignore[6]
+ rays_points_world = ray_bundle_to_ray_points(ray_bundle)
+
+ embeds = create_embeddings_for_implicit_function(
+ xyz_world=rays_points_world,
+ xyz_embedding_function=self._harmonic_embedding,
+ global_code=global_code,
+ fun_viewpool=fun_viewpool,
+ xyz_in_camera_coords=self.xyz_in_camera_coords,
+ camera=camera,
+ )
+
+ # Before running the network, we have to resize embeds to ndims=3,
+ # otherwise the SRN layers consume huge amounts of memory.
+ raymarch_features = self._net(
+ embeds.view(embeds.shape[0], -1, embeds.shape[-1])
+ )
+ # raymarch_features.shape = [minibatch x ... x self.n_hidden_neurons_xyz]
+
+ # NNs operate on the flattenned rays; reshaping to the correct spatial size
+ raymarch_features = raymarch_features.reshape(*rays_points_world.shape[:-1], -1)
+
+ return raymarch_features, None
+
+
+class SRNPixelGenerator(Configurable, torch.nn.Module):
+ n_harmonic_functions: int = 4
+ n_hidden_units: int = 256
+ n_hidden_units_color: int = 128
+ n_layers: int = 2
+ in_features: int = 256
+ out_features: int = 3
+ ray_dir_in_camera_coords: bool = False
+
+ def __post_init__(self):
+ self._harmonic_embedding = HarmonicEmbedding(
+ self.n_harmonic_functions, append_input=True
+ )
+ self._net = pytorch_prototyping.FCBlock(
+ hidden_ch=self.n_hidden_units,
+ num_hidden_layers=self.n_layers,
+ in_features=self.in_features,
+ out_features=self.n_hidden_units,
+ )
+ self._density_layer = torch.nn.Linear(self.n_hidden_units, 1)
+ self._density_layer.apply(_kaiming_normal_init)
+ embedding_dim_dir = self._harmonic_embedding.get_output_dim(input_dims=3)
+ self._color_layer = torch.nn.Sequential(
+ LinearWithRepeat(
+ self.n_hidden_units + embedding_dim_dir,
+ self.n_hidden_units_color,
+ ),
+ torch.nn.LayerNorm([self.n_hidden_units_color]),
+ torch.nn.ReLU(inplace=True),
+ torch.nn.Linear(self.n_hidden_units_color, self.out_features),
+ )
+ self._color_layer.apply(_kaiming_normal_init)
+
+ # TODO: merge with NeuralRadianceFieldBase's _get_colors
+ def _get_colors(self, features: torch.Tensor, rays_directions: torch.Tensor):
+ """
+ This function takes per-point `features` predicted by `self.net`
+ and evaluates the color model in order to attach to each
+ point a 3D vector of its RGB color.
+ """
+ # Normalize the ray_directions to unit l2 norm.
+ rays_directions_normed = torch.nn.functional.normalize(rays_directions, dim=-1)
+ # Obtain the harmonic embedding of the normalized ray directions.
+ rays_embedding = self._harmonic_embedding(rays_directions_normed)
+ return self._color_layer((features, rays_embedding))
+
+ def forward(
+ self,
+ raymarch_features: torch.Tensor,
+ ray_bundle: ImplicitronRayBundle,
+ camera: Optional[CamerasBase] = None,
+ **kwargs,
+ ):
+ """
+ Args:
+ raymarch_features: Features from the raymarching network of shape
+ `(minibatch, ..., self.in_features)`
+ ray_bundle: An ImplicitronRayBundle object containing the following variables:
+ origins: A tensor of shape `(minibatch, ..., 3)` denoting the
+ origins of the sampling rays in world coords.
+ directions: A tensor of shape `(minibatch, ..., 3)`
+ containing the direction vectors of sampling rays in world coords.
+ lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
+ containing the lengths at which the rays are sampled.
+
+ Returns:
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
+ denoting the opacitiy of each ray point.
+ rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
+ denoting the color of each ray point.
+ """
+ # raymarch_features.shape = [minibatch x ... x pts_per_ray x 3]
+ features = self._net(raymarch_features)
+ # features.shape = [minibatch x ... x self.n_hidden_units]
+
+ if self.ray_dir_in_camera_coords:
+ if camera is None:
+ raise ValueError("Camera must be given if xyz_ray_dir_in_camera_coords")
+
+ directions = ray_bundle.directions @ camera.R
+ else:
+ directions = ray_bundle.directions
+
+ # NNs operate on the flattenned rays; reshaping to the correct spatial size
+ features = features.reshape(*raymarch_features.shape[:-1], -1)
+
+ raw_densities = self._density_layer(features)
+
+ rays_colors = self._get_colors(features, directions)
+
+ return raw_densities, rays_colors
+
+
+class SRNRaymarchHyperNet(Configurable, torch.nn.Module):
+ """
+ This is a raymarching function which has a forward like SRNRaymarchFunction
+ but instead of the weights being parameters of the module, they
+ are the output of another network, the hypernet, which takes the global_code
+ as input. All the dataclass members of SRNRaymarchFunction are here with the
+ same meaning. In addition, there are members with names ending `_hypernet`
+ which affect the hypernet.
+
+ Because this class may be called repeatedly for the same global_code, the
+ output of the hypernet is cached in self.cached_srn_raymarch_function.
+ This member must be manually set to None whenever the global_code changes.
+ """
+
+ n_harmonic_functions: int = 3 # 0 means raw 3D coord inputs
+ n_hidden_units: int = 256
+ n_layers: int = 2
+ n_hidden_units_hypernet: int = 256
+ n_layers_hypernet: int = 1
+ in_features: int = 3
+ out_features: int = 256
+ latent_dim_hypernet: int = 0
+ latent_dim: int = 0
+ xyz_in_camera_coords: bool = False
+
+ def __post_init__(self):
+ raymarch_input_embedding_dim = (
+ HarmonicEmbedding.get_output_dim_static(
+ self.in_features,
+ self.n_harmonic_functions,
+ True,
+ )
+ + self.latent_dim
+ )
+
+ self._hypernet = hyperlayers.HyperFC(
+ hyper_in_ch=self.latent_dim_hypernet,
+ hyper_num_hidden_layers=self.n_layers_hypernet,
+ hyper_hidden_ch=self.n_hidden_units_hypernet,
+ hidden_ch=self.n_hidden_units,
+ num_hidden_layers=self.n_layers,
+ in_ch=raymarch_input_embedding_dim,
+ out_ch=self.n_hidden_units,
+ )
+
+ self.cached_srn_raymarch_function: Optional[Tuple[SRNRaymarchFunction]] = None
+
+ def _run_hypernet(self, global_code: torch.Tensor) -> Tuple[SRNRaymarchFunction]:
+ """
+ Runs the hypernet and returns a 1-tuple containing the generated
+ srn_raymarch_function.
+ """
+
+ net = self._hypernet(global_code)
+
+ # use the hyper-net generated network to instantiate the raymarch module
+ srn_raymarch_function = SRNRaymarchFunction(
+ n_harmonic_functions=self.n_harmonic_functions,
+ n_hidden_units=self.n_hidden_units,
+ n_layers=self.n_layers,
+ in_features=self.in_features,
+ out_features=self.out_features,
+ latent_dim=self.latent_dim,
+ xyz_in_camera_coords=self.xyz_in_camera_coords,
+ raymarch_function=(net,),
+ )
+
+ # move the generated raymarch function to the correct device
+ srn_raymarch_function.to(global_code.device)
+
+ return (srn_raymarch_function,)
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ **kwargs,
+ ):
+
+ if global_code is None:
+ raise ValueError("SRN Hypernetwork requires a non-trivial global code.")
+
+ # The raymarching network is cached in case the function is called repeatedly
+ # across LSTM iterations for the same global_code.
+ if self.cached_srn_raymarch_function is None:
+ # generate the raymarching network from the hypernet
+ self.cached_srn_raymarch_function = self._run_hypernet(global_code)
+ (srn_raymarch_function,) = cast(
+ Tuple[SRNRaymarchFunction], self.cached_srn_raymarch_function
+ )
+
+ return srn_raymarch_function(
+ ray_bundle=ray_bundle,
+ fun_viewpool=fun_viewpool,
+ camera=camera,
+ global_code=None, # the hypernetwork takes the global code
+ )
+
+
+@registry.register
+class SRNImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
+ latent_dim: int = 0
+ # pyre-fixme[13]: Attribute `raymarch_function` is never initialized.
+ raymarch_function: SRNRaymarchFunction
+ # pyre-fixme[13]: Attribute `pixel_generator` is never initialized.
+ pixel_generator: SRNPixelGenerator
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def create_raymarch_function(self) -> None:
+ self.raymarch_function = SRNRaymarchFunction(
+ latent_dim=self.latent_dim,
+ **self.raymarch_function_args,
+ )
+
+ @classmethod
+ def raymarch_function_tweak_args(cls, type, args: DictConfig) -> None:
+ args.pop("latent_dim", None)
+
+ def forward(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ raymarch_features: Optional[torch.Tensor] = None,
+ **kwargs,
+ ):
+ predict_colors = raymarch_features is not None
+ if predict_colors:
+ return self.pixel_generator(
+ raymarch_features=raymarch_features,
+ ray_bundle=ray_bundle,
+ camera=camera,
+ **kwargs,
+ )
+ else:
+ return self.raymarch_function(
+ ray_bundle=ray_bundle,
+ fun_viewpool=fun_viewpool,
+ camera=camera,
+ global_code=global_code,
+ **kwargs,
+ )
+
+
+@registry.register
+class SRNHyperNetImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
+ """
+ This implicit function uses a hypernetwork to generate the
+ SRNRaymarchingFunction, and this is cached. Whenever the
+ global_code changes, `on_bind_args` must be called to clear
+ the cache.
+ """
+
+ latent_dim_hypernet: int = 0
+ latent_dim: int = 0
+ # pyre-fixme[13]: Attribute `hypernet` is never initialized.
+ hypernet: SRNRaymarchHyperNet
+ # pyre-fixme[13]: Attribute `pixel_generator` is never initialized.
+ pixel_generator: SRNPixelGenerator
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def create_hypernet(self) -> None:
+ self.hypernet = SRNRaymarchHyperNet(
+ latent_dim=self.latent_dim,
+ latent_dim_hypernet=self.latent_dim_hypernet,
+ **self.hypernet_args,
+ )
+
+ @classmethod
+ def hypernet_tweak_args(cls, type, args: DictConfig) -> None:
+ args.pop("latent_dim", None)
+ args.pop("latent_dim_hypernet", None)
+
+ def forward(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ raymarch_features: Optional[torch.Tensor] = None,
+ **kwargs,
+ ):
+ predict_colors = raymarch_features is not None
+ if predict_colors:
+ return self.pixel_generator(
+ raymarch_features=raymarch_features,
+ ray_bundle=ray_bundle,
+ camera=camera,
+ **kwargs,
+ )
+ else:
+ return self.hypernet(
+ ray_bundle=ray_bundle,
+ fun_viewpool=fun_viewpool,
+ camera=camera,
+ global_code=global_code,
+ **kwargs,
+ )
+
+ def on_bind_args(self):
+ """
+ The global_code may have changed, so we reset the hypernet.
+ """
+ self.hypernet.cached_srn_raymarch_function = None
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec453f43e454c71df3cc5448d2c0ca961b59b7df
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/utils.py
@@ -0,0 +1,221 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Callable, Optional
+
+import torch
+
+import torch.nn.functional as F
+from pytorch3d.common.compat import prod
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.renderer import ray_bundle_to_ray_points
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+def broadcast_global_code(embeds: torch.Tensor, global_code: torch.Tensor):
+ """
+ Expands the `global_code` of shape (minibatch, dim)
+ so that it can be appended to `embeds` of shape (minibatch, ..., dim2),
+ and appends to the last dimension of `embeds`.
+ """
+ bs = embeds.shape[0]
+ global_code_broadcast = global_code.view(bs, *([1] * (embeds.ndim - 2)), -1).expand(
+ *embeds.shape[:-1],
+ global_code.shape[-1],
+ )
+ return torch.cat([embeds, global_code_broadcast], dim=-1)
+
+
+def create_embeddings_for_implicit_function(
+ xyz_world: torch.Tensor,
+ xyz_in_camera_coords: bool,
+ global_code: Optional[torch.Tensor],
+ camera: Optional[CamerasBase],
+ fun_viewpool: Optional[Callable],
+ xyz_embedding_function: Optional[Callable],
+ diag_cov: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+
+ bs, *spatial_size, pts_per_ray, _ = xyz_world.shape
+
+ if xyz_in_camera_coords:
+ if camera is None:
+ raise ValueError("Camera must be given if xyz_in_camera_coords")
+
+ ray_points_for_embed = (
+ camera.get_world_to_view_transform()
+ .transform_points(xyz_world.view(bs, -1, 3))
+ .view(xyz_world.shape)
+ )
+ else:
+ ray_points_for_embed = xyz_world
+
+ if xyz_embedding_function is None:
+ embeds = torch.empty(
+ bs,
+ 1,
+ prod(spatial_size),
+ pts_per_ray,
+ 0,
+ )
+ else:
+
+ embeds = xyz_embedding_function(ray_points_for_embed, diag_cov=diag_cov)
+ embeds = embeds.reshape(
+ bs,
+ 1,
+ prod(spatial_size),
+ pts_per_ray,
+ -1,
+ ) # flatten spatial, add n_src dim
+
+ if fun_viewpool is not None:
+ # viewpooling
+ embeds_viewpooled = fun_viewpool(xyz_world.reshape(bs, -1, 3))
+ embed_shape = (
+ bs,
+ embeds_viewpooled.shape[1],
+ prod(spatial_size),
+ pts_per_ray,
+ -1,
+ )
+ embeds_viewpooled = embeds_viewpooled.reshape(*embed_shape)
+ if embeds is not None:
+ embeds = torch.cat([embeds.expand(*embed_shape), embeds_viewpooled], dim=-1)
+ else:
+ embeds = embeds_viewpooled
+
+ if global_code is not None:
+ # append the broadcasted global code to embeds
+ embeds = broadcast_global_code(embeds, global_code)
+
+ return embeds
+
+
+def interpolate_line(
+ points: torch.Tensor,
+ source: torch.Tensor,
+ **kwargs,
+) -> torch.Tensor:
+ """
+ Linearly interpolates values of source grids. The first dimension of points represents
+ number of points and the second coordinate, for example ([[x0], [x1], ...]). The first
+ dimension of argument source represents feature and ones after that the spatial
+ dimension.
+
+ Arguments:
+ points: shape (n_grids, n_points, 1),
+ source: tensor of shape (n_grids, features, width),
+ Returns:
+ interpolated tensor of shape (n_grids, n_points, features)
+ """
+ # To enable sampling of the source using the torch.functional.grid_sample
+ # points need to have 2 coordinates.
+ expansion = points.new_zeros(points.shape)
+ points = torch.cat((points, expansion), dim=-1)
+
+ source = source[:, :, None, :]
+ points = points[:, :, None, :]
+
+ out = F.grid_sample(
+ grid=points,
+ input=source,
+ **kwargs,
+ )
+ return out[:, :, :, 0].permute(0, 2, 1)
+
+
+def interpolate_plane(
+ points: torch.Tensor,
+ source: torch.Tensor,
+ **kwargs,
+) -> torch.Tensor:
+ """
+ Bilinearly interpolates values of source grids. The first dimension of points represents
+ number of points and the second coordinates, for example ([[x0, y0], [x1, y1], ...]).
+ The first dimension of argument source represents feature and ones after that the
+ spatial dimension.
+
+ Arguments:
+ points: shape (n_grids, n_points, 2),
+ source: tensor of shape (n_grids, features, width, height),
+ Returns:
+ interpolated tensor of shape (n_grids, n_points, features)
+ """
+ # permuting because torch.nn.functional.grid_sample works with
+ # (features, height, width) and not
+ # (features, width, height)
+ source = source.permute(0, 1, 3, 2)
+ points = points[:, :, None, :]
+
+ out = F.grid_sample(
+ grid=points,
+ input=source,
+ **kwargs,
+ )
+ return out[:, :, :, 0].permute(0, 2, 1)
+
+
+def interpolate_volume(
+ points: torch.Tensor, source: torch.Tensor, **kwargs
+) -> torch.Tensor:
+ """
+ Interpolates values of source grids. The first dimension of points represents
+ number of points and the second coordinates, for example
+ [[x0, y0, z0], [x1, y1, z1], ...]. The first dimension of a source represents features
+ and ones after that the spatial dimension.
+
+ Arguments:
+ points: shape (n_grids, n_points, 3),
+ source: tensor of shape (n_grids, features, width, height, depth),
+ Returns:
+ interpolated tensor of shape (n_grids, n_points, features)
+ """
+ if "mode" in kwargs and kwargs["mode"] == "trilinear":
+ kwargs = kwargs.copy()
+ kwargs["mode"] = "bilinear"
+ # permuting because torch.nn.functional.grid_sample works with
+ # (features, depth, height, width) and not (features, width, height, depth)
+ source = source.permute(0, 1, 4, 3, 2)
+ grid = points[:, :, None, None, :]
+
+ out = F.grid_sample(
+ grid=grid,
+ input=source,
+ **kwargs,
+ )
+ return out[:, :, :, 0, 0].permute(0, 2, 1)
+
+
+def get_rays_points_world(
+ ray_bundle: Optional[ImplicitronRayBundle] = None,
+ rays_points_world: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ """
+ Converts the ray_bundle to rays_points_world if rays_points_world is not defined
+ and raises error if both are defined.
+
+ Args:
+ ray_bundle: An ImplicitronRayBundle object or None
+ rays_points_world: A torch.Tensor representing ray points converted to
+ world coordinates
+ Returns:
+ A torch.Tensor representing ray points converted to world coordinates
+ of shape [minibatch x ... x pts_per_ray x 3].
+ """
+ if rays_points_world is not None and ray_bundle is not None:
+ raise ValueError(
+ "Cannot define both rays_points_world and ray_bundle,"
+ + " one has to be None."
+ )
+ if rays_points_world is not None:
+ return rays_points_world
+ if ray_bundle is not None:
+ # pyre-ignore[6]
+ return ray_bundle_to_ray_points(ray_bundle)
+ raise ValueError("ray_bundle and rays_points_world cannot both be None")
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e22eb2b2d2551748fce2637efd5d9d7d63cb65
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid.py
@@ -0,0 +1,1139 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+"""
+This file contains classes that implement Voxel grids, both in their full resolution
+as in the factorized form. There are two factorized forms implemented, Tensor rank decomposition
+or CANDECOMP/PARAFAC (here CP) and Vector Matrix (here VM) factorization from the
+TensoRF (https://arxiv.org/abs/2203.09517) paper.
+
+In addition, the module VoxelGridModule implements a trainable instance of one of
+these classes.
+
+"""
+
+import logging
+import warnings
+from collections.abc import Mapping
+from dataclasses import dataclass, field
+
+from distutils.version import LooseVersion
+from typing import Any, Callable, ClassVar, Dict, Iterator, List, Optional, Tuple, Type
+
+import torch
+from omegaconf import DictConfig
+from pytorch3d.implicitron.tools.config import (
+ Configurable,
+ registry,
+ ReplaceableBase,
+ run_auto_creation,
+)
+from pytorch3d.structures.volumes import VolumeLocator
+
+from .utils import interpolate_line, interpolate_plane, interpolate_volume
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class VoxelGridValuesBase:
+ pass
+
+
+class VoxelGridBase(ReplaceableBase, torch.nn.Module):
+ """
+ Base class for all the voxel grid variants whith added trilinear interpolation between
+ voxels (for example if voxel (0.333, 1, 3) is queried that would return the result
+ 2/3*voxel[0, 1, 3] + 1/3*voxel[1, 1, 3])
+
+ Internally voxel grids are indexed by (features, x, y, z). If queried the point is not
+ inside the voxel grid the vector that will be returned is determined by padding.
+
+ Members:
+ align_corners: parameter used in torch.functional.grid_sample. For details go to
+ https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html by
+ default is True
+ padding: padding mode for outside grid values 'zeros' | 'border' | 'reflection'.
+ Default is 'zeros'
+ mode: interpolation mode to calculate output values :
+ 'bilinear' | 'nearest' | 'bicubic' | 'trilinear'.
+ Default: 'bilinear' Note: mode='bicubic' supports only FullResolutionVoxelGrid.
+ When mode='bilinear' and the input is 5-D, the interpolation mode used internally
+ will actually be trilinear.
+ n_features: number of dimensions of base feature vector. Determines how many features
+ the grid returns.
+ resolution_changes: a dictionary, where keys are change epochs and values are
+ 3-tuples containing x, y, z grid sizes corresponding to each axis to each epoch
+ """
+
+ align_corners: bool = True
+ padding: str = "zeros"
+ mode: str = "bilinear"
+ n_features: int = 1
+ # return the line below once we drop OmegaConf 2.1 support
+ # resolution_changes: Dict[int, List[int]] = field(
+ resolution_changes: Dict[int, Any] = field(
+ default_factory=lambda: {0: [128, 128, 128]}
+ )
+
+ def __post_init__(self):
+ if 0 not in self.resolution_changes:
+ raise ValueError("There has to be key `0` in `resolution_changes`.")
+
+ def evaluate_world(
+ self,
+ points: torch.Tensor,
+ grid_values: VoxelGridValuesBase,
+ locator: VolumeLocator,
+ ) -> torch.Tensor:
+ """
+ Evaluates the voxel grid at points in the world coordinate frame.
+ The interpolation type is determined by the `mode` member.
+
+ Arguments:
+ points (torch.Tensor): tensor of points that you want to query
+ of a form (n_grids, ..., 3)
+ grid_values: an object of type Class.values_type which has tensors as
+ members which have shapes derived from the get_shapes() method
+ locator: a VolumeLocator object
+ Returns:
+ torch.Tensor: shape (n_grids, ..., n_features)
+ """
+ points_local = locator.world_to_local_coords(points)
+ return self.evaluate_local(points_local, grid_values)
+
+ def evaluate_local(
+ self, points: torch.Tensor, grid_values: VoxelGridValuesBase
+ ) -> torch.Tensor:
+ """
+ Evaluates the voxel grid at points in the local coordinate frame,
+ The interpolation type is determined by the `mode` member.
+
+ Arguments:
+ points (torch.Tensor): tensor of points that you want to query
+ of a form (n_grids, ..., 3), in a normalized form (coordinates are in [-1, 1])
+ grid_values: an object of type VMFactorizedVoxelGrid.values_type which has tensors
+ as members which have shapes derived from the get_shapes() method
+ Returns:
+ torch.Tensor: shape (n_grids, ..., n_features)
+ """
+ raise NotImplementedError()
+
+ def get_shapes(self, epoch: int) -> Dict[str, Tuple]:
+ """
+ Using parameters from the __init__ method, this method returns the
+ shapes of individual tensors needed to run the evaluate method.
+
+ Args:
+ epoch: If the shape varies during training, which training epoch's shape to return.
+ Returns:
+ a dictionary of needed shapes. To use the evaluate_local and evaluate_world methods
+ replace the shapes in the dictionary with tensors of those shapes and add the
+ first 'batch' dimension. If the required shape is (a, b) and you want to
+ have g grids then the tensor that replaces the shape should have the
+ shape (g, a, b).
+ """
+ raise NotImplementedError()
+
+ def get_resolution(self, epoch: int) -> List[int]:
+ """
+ Returns the resolution which the grid should have at specific epoch
+
+ Args:
+ epoch which to use in the resolution calculation
+ Returns:
+ resolution at specific epoch
+ """
+ last_change = 0
+ for change_epoch in self.resolution_changes:
+ if change_epoch <= epoch:
+ last_change = max(last_change, change_epoch)
+ return self.resolution_changes[last_change]
+
+ @staticmethod
+ def get_output_dim(args: DictConfig) -> int:
+ """
+ Given all the arguments of the grid's __init__, returns output's last dimension length.
+
+ In particular, if self.evaluate_world or self.evaluate_local
+ are called with `points` of shape (n_grids, n_points, 3),
+ their output will be of shape
+ (n_grids, n_points, grid.get_output_dim()).
+
+ Args:
+ args: DictConfig which would be used to initialize the object
+ Returns:
+ output's last dimension length
+ """
+ return args["n_features"]
+
+ def change_resolution(
+ self,
+ grid_values: VoxelGridValuesBase,
+ *,
+ epoch: Optional[int] = None,
+ grid_values_with_wanted_resolution: Optional[VoxelGridValuesBase] = None,
+ mode: str = "linear",
+ align_corners: bool = True,
+ antialias: bool = False,
+ ) -> Tuple[VoxelGridValuesBase, bool]:
+ """
+ Changes resolution of tensors in `grid_values` to match the
+ `grid_values_with_wanted_resolution` or resolution on wanted epoch.
+
+ Args:
+ epoch: current training epoch, used to see if the grid needs regridding
+ grid_values: instance of self.values_type which contains
+ the voxel grid which will be interpolated to create the new grid
+ epoch: epoch which is used to get the resolution of the new
+ `grid_values` using `self.resolution_changes`.
+ grid_values_with_wanted_resolution: `VoxelGridValuesBase` to whose resolution
+ to interpolate grid_values
+ align_corners: as for torch.nn.functional.interpolate
+ mode: as for torch.nn.functional.interpolate
+ 'nearest' | 'bicubic' | 'linear' | 'area' | 'nearest-exact'.
+ Default: 'linear'
+ antialias: as for torch.nn.functional.interpolate.
+ Using anti-alias option
+ together with align_corners=False and mode='bicubic', interpolation
+ result would match Pillow result for downsampling operation.
+ Supported mode: 'bicubic'
+ Returns:
+ tuple of
+ - new voxel grid_values of desired resolution, of type self.values_type
+ - True if regridding has happened.
+ """
+
+ if (epoch is None) == (grid_values_with_wanted_resolution is None):
+ raise ValueError(
+ "Exactly one of `epoch` or "
+ "`grid_values_with_wanted_resolution` has to be defined."
+ )
+
+ if mode not in ("nearest", "bicubic", "linear", "area", "nearest-exact"):
+ raise ValueError(
+ "`mode` should be one of the following 'nearest'"
+ + "| 'bicubic' | 'linear' | 'area' | 'nearest-exact'"
+ )
+
+ interpolate_has_antialias = LooseVersion(torch.__version__) >= "1.11"
+
+ if antialias and not interpolate_has_antialias:
+ warnings.warn("Antialiased interpolation requires PyTorch 1.11+; ignoring")
+
+ interp_kwargs = {"antialias": antialias} if interpolate_has_antialias else {}
+
+ def change_individual_resolution(tensor, wanted_resolution):
+ if mode == "linear":
+ n_dim = len(wanted_resolution)
+ new_mode = ("linear", "bilinear", "trilinear")[n_dim - 1]
+ else:
+ new_mode = mode
+ return torch.nn.functional.interpolate(
+ input=tensor,
+ size=wanted_resolution,
+ mode=new_mode,
+ align_corners=align_corners,
+ recompute_scale_factor=False,
+ **interp_kwargs,
+ )
+
+ if epoch is not None:
+ if epoch not in self.resolution_changes:
+ return grid_values, False
+
+ wanted_shapes = self.get_shapes(epoch=epoch)
+ params = {
+ name: change_individual_resolution(
+ getattr(grid_values, name), shape[1:]
+ )
+ for name, shape in wanted_shapes.items()
+ }
+ res = self.get_resolution(epoch)
+ logger.info(f"Changed grid resolutiuon at epoch {epoch} to {res}")
+ else:
+ params = {
+ name: (
+ change_individual_resolution(
+ getattr(grid_values, name), tensor.shape[2:]
+ )
+ if tensor is not None
+ else None
+ )
+ for name, tensor in vars(grid_values_with_wanted_resolution).items()
+ }
+
+ return self.values_type(**params), True
+
+ def get_resolution_change_epochs(self) -> Tuple[int, ...]:
+ """
+ Returns epochs at which this grid should change epochs.
+ """
+ return tuple(self.resolution_changes.keys())
+
+ def get_align_corners(self) -> bool:
+ """
+ Returns True if voxel grid uses align_corners=True
+ """
+ return self.align_corners
+
+ def crop_world(
+ self,
+ min_point_world: torch.Tensor,
+ max_point_world: torch.Tensor,
+ grid_values: VoxelGridValuesBase,
+ volume_locator: VolumeLocator,
+ ) -> VoxelGridValuesBase:
+ """
+ Crops the voxel grid based on minimum and maximum occupied point in
+ world coordinates. After cropping all 8 corner points are preserved in
+ the voxel grid. This is achieved by preserving all the voxels needed to
+ calculate the point.
+
+ +--------B
+ / /|
+ / / |
+ +--------+ | <==== Bounding box represented by points A and B:
+ | | | - B has x, y and z coordinates bigger or equal
+ | | + to all other points of the object
+ | | / - A has x, y and z coordinates smaller or equal
+ | |/ to all other points of the object
+ A--------+
+
+ Args:
+ min_point_world: torch.Tensor of shape (3,). Has x, y and z coordinates
+ smaller or equal to all other occupied points. Point A from the
+ picture above.
+ max_point_world: torch.Tensor of shape (3,). Has x, y and z coordinates
+ bigger or equal to all other occupied points. Point B from the
+ picture above.
+ grid_values: instance of self.values_type which contains
+ the voxel grid which will be cropped to create the new grid
+ volume_locator: VolumeLocator object used to convert world to local
+ cordinates
+ Returns:
+ instance of self.values_type which has volume cropped to desired size.
+ """
+ min_point_local = volume_locator.world_to_local_coords(min_point_world[None])[0]
+ max_point_local = volume_locator.world_to_local_coords(max_point_world[None])[0]
+ return self.crop_local(min_point_local, max_point_local, grid_values)
+
+ def crop_local(
+ self,
+ min_point_local: torch.Tensor,
+ max_point_local: torch.Tensor,
+ grid_values: VoxelGridValuesBase,
+ ) -> VoxelGridValuesBase:
+ """
+ Crops the voxel grid based on minimum and maximum occupied point in local
+ coordinates. After cropping both min and max point are preserved in the voxel
+ grid. This is achieved by preserving all the voxels needed to calculate the point.
+
+ +--------B
+ / /|
+ / / |
+ +--------+ | <==== Bounding box represented by points A and B:
+ | | | - B has x, y and z coordinates bigger or equal
+ | | + to all other points of the object
+ | | / - A has x, y and z coordinates smaller or equal
+ | |/ to all other points of the object
+ A--------+
+
+ Args:
+ min_point_local: torch.Tensor of shape (3,). Has x, y and z coordinates
+ smaller or equal to all other occupied points. Point A from the
+ picture above. All elements in [-1, 1].
+ max_point_local: torch.Tensor of shape (3,). Has x, y and z coordinates
+ bigger or equal to all other occupied points. Point B from the
+ picture above. All elements in [-1, 1].
+ grid_values: instance of self.values_type which contains
+ the voxel grid which will be cropped to create the new grid
+ Returns:
+ instance of self.values_type which has volume cropped to desired size.
+ """
+ raise NotImplementedError()
+
+
+@dataclass
+class FullResolutionVoxelGridValues(VoxelGridValuesBase):
+ voxel_grid: torch.Tensor
+
+
+@registry.register
+class FullResolutionVoxelGrid(VoxelGridBase):
+ """
+ Full resolution voxel grid equivalent to 4D tensor where shape is
+ (features, width, height, depth) with linear interpolation between voxels.
+ """
+
+ # the type of grid_values argument needed to run evaluate_local()
+ values_type: ClassVar[Type[VoxelGridValuesBase]] = FullResolutionVoxelGridValues
+
+ # pyre-fixme[14]: `evaluate_local` overrides method defined in `VoxelGridBase`
+ # inconsistently.
+ def evaluate_local(
+ self, points: torch.Tensor, grid_values: FullResolutionVoxelGridValues
+ ) -> torch.Tensor:
+ """
+ Evaluates the voxel grid at points in the local coordinate frame,
+ The interpolation type is determined by the `mode` member.
+
+ Arguments:
+ points (torch.Tensor): tensor of points that you want to query
+ of a form (..., 3), in a normalized form (coordinates are in [-1, 1])
+ grid_values: an object of type values_type which has tensors as
+ members which have shapes derived from the get_shapes() method
+ Returns:
+ torch.Tensor: shape (n_grids, ..., n_features)
+ """
+ # (n_grids, n_points_total, n_features) from (n_grids, ..., n_features)
+ recorded_shape = points.shape
+ points = points.view(points.shape[0], -1, points.shape[-1])
+ interpolated = interpolate_volume(
+ points,
+ grid_values.voxel_grid,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ )
+ return interpolated.view(*recorded_shape[:-1], -1)
+
+ def get_shapes(self, epoch: int) -> Dict[str, Tuple]:
+ width, height, depth = self.get_resolution(epoch)
+ return {"voxel_grid": (self.n_features, width, height, depth)}
+
+ # pyre-ignore[14]
+ def crop_local(
+ self,
+ min_point_local: torch.Tensor,
+ max_point_local: torch.Tensor,
+ grid_values: FullResolutionVoxelGridValues,
+ ) -> FullResolutionVoxelGridValues:
+ assert torch.all(min_point_local < max_point_local)
+ min_point_local = torch.clamp(min_point_local, -1, 1)
+ max_point_local = torch.clamp(max_point_local, -1, 1)
+ _, _, width, height, depth = grid_values.voxel_grid.shape
+ resolution = grid_values.voxel_grid.new_tensor([width, height, depth])
+ min_point_local01 = (min_point_local + 1) / 2
+ max_point_local01 = (max_point_local + 1) / 2
+
+ if self.align_corners:
+ minx, miny, minz = torch.floor(min_point_local01 * (resolution - 1)).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * (resolution - 1)).long()
+ else:
+ minx, miny, minz = torch.floor(min_point_local01 * resolution - 0.5).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * resolution - 0.5).long()
+
+ return FullResolutionVoxelGridValues(
+ voxel_grid=grid_values.voxel_grid[
+ :, :, minx : maxx + 1, miny : maxy + 1, minz : maxz + 1
+ ]
+ )
+
+
+@dataclass
+class CPFactorizedVoxelGridValues(VoxelGridValuesBase):
+ vector_components_x: torch.Tensor
+ vector_components_y: torch.Tensor
+ vector_components_z: torch.Tensor
+ basis_matrix: Optional[torch.Tensor] = None
+
+
+@registry.register
+class CPFactorizedVoxelGrid(VoxelGridBase):
+ """
+ Canonical Polyadic (CP/CANDECOMP/PARAFAC) Factorization factorizes the 3d grid into three
+ vectors (x, y, z). For n_components=n, the 3d grid is a sum of the two outer products
+ (call it ⊗) of each vector type (x, y, z):
+
+ 3d_grid = x0 ⊗ y0 ⊗ z0 + x1 ⊗ y1 ⊗ z1 + ... + xn ⊗ yn ⊗ zn
+
+ These tensors are passed in a object of CPFactorizedVoxelGridValues (here obj) as
+ obj.vector_components_x, obj.vector_components_y, obj.vector_components_z. Their shapes are
+ `(n_components, r)` where `r` is the relevant resolution.
+
+ Each element of this sum has an extra dimension, which gets matrix-multiplied by an
+ appropriate "basis matrix" of shape (n_grids, n_components, n_features). This multiplication
+ brings us to the desired "n_features" dimensionality. If basis_matrix=False the elements
+ of different components are summed together to create (n_grids, n_components, 1) tensor.
+ With some notation abuse, ignoring the interpolation operation, simplifying and denoting
+ n_features as F, n_components as C and n_grids as G:
+
+ 3d_grid = (x ⊗ y ⊗ z) @ basis # GWHDC x GCF -> GWHDF
+
+ The basis feature vectors are passed as obj.basis_matrix.
+
+ Members:
+ n_components: number of vector triplets, higher number gives better approximation.
+ basis_matrix: how to transform components. If matrix_reduction=True result
+ matrix of shape (n_grids, n_points_total, n_components) is batch matrix multiplied
+ by the basis_matrix of shape (n_grids, n_components, n_features). If
+ matrix_reduction=False, the result tensor of (n_grids, n_points_total, n_components)
+ is summed along the rows to get (n_grids, n_points_total, 1), which is then viewed
+ to return to starting shape (n_grids, ..., 1).
+ """
+
+ # the type of grid_values argument needed to run evaluate_local()
+ values_type: ClassVar[Type[VoxelGridValuesBase]] = CPFactorizedVoxelGridValues
+
+ n_components: int = 24
+ basis_matrix: bool = True
+
+ # pyre-fixme[14]: `evaluate_local` overrides method defined in `VoxelGridBase`
+ # inconsistently.
+ def evaluate_local(
+ self, points: torch.Tensor, grid_values: CPFactorizedVoxelGridValues
+ ) -> torch.Tensor:
+ def factor(axis):
+ i = {"x": 0, "y": 1, "z": 2}[axis]
+ index = points[..., i, None]
+ vector = getattr(grid_values, "vector_components_" + axis)
+ return interpolate_line(
+ index,
+ vector,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ )
+
+ # (n_grids, n_points_total, n_features) from (n_grids, ..., n_features)
+ recorded_shape = points.shape
+ points = points.view(points.shape[0], -1, points.shape[-1])
+
+ # collect points from all the vectors and multipy them out
+ mult = factor("x") * factor("y") * factor("z")
+
+ # reduce the result from
+ # (n_grids, n_points_total, n_components) to (n_grids, n_points_total, n_features)
+ if grid_values.basis_matrix is not None:
+ # (n_grids, n_points_total, n_features) =
+ # (n_grids, n_points_total, total_n_components) @
+ # (n_grids, total_n_components, n_features)
+ result = torch.bmm(mult, grid_values.basis_matrix)
+ else:
+ # (n_grids, n_points_total, 1) from (n_grids, n_points_total, n_features)
+ result = mult.sum(axis=-1, keepdim=True)
+ # (n_grids, ..., n_features)
+ return result.view(*recorded_shape[:-1], -1)
+
+ def get_shapes(self, epoch: int) -> Dict[str, Tuple[int, int]]:
+ if self.basis_matrix is False and self.n_features != 1:
+ raise ValueError("Cannot set basis_matrix=False and n_features to != 1")
+
+ width, height, depth = self.get_resolution(epoch=epoch)
+ shape_dict = {
+ "vector_components_x": (self.n_components, width),
+ "vector_components_y": (self.n_components, height),
+ "vector_components_z": (self.n_components, depth),
+ }
+ if self.basis_matrix:
+ shape_dict["basis_matrix"] = (self.n_components, self.n_features)
+ return shape_dict
+
+ # pyre-ignore[14]
+ def crop_local(
+ self,
+ min_point_local: torch.Tensor,
+ max_point_local: torch.Tensor,
+ grid_values: CPFactorizedVoxelGridValues,
+ ) -> CPFactorizedVoxelGridValues:
+ assert torch.all(min_point_local < max_point_local)
+ min_point_local = torch.clamp(min_point_local, -1, 1)
+ max_point_local = torch.clamp(max_point_local, -1, 1)
+ _, _, width = grid_values.vector_components_x.shape
+ _, _, height = grid_values.vector_components_y.shape
+ _, _, depth = grid_values.vector_components_z.shape
+ resolution = grid_values.vector_components_x.new_tensor([width, height, depth])
+ min_point_local01 = (min_point_local + 1) / 2
+ max_point_local01 = (max_point_local + 1) / 2
+
+ if self.align_corners:
+ minx, miny, minz = torch.floor(min_point_local01 * (resolution - 1)).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * (resolution - 1)).long()
+ else:
+ minx, miny, minz = torch.floor(min_point_local01 * resolution - 0.5).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * resolution - 0.5).long()
+
+ return CPFactorizedVoxelGridValues(
+ vector_components_x=grid_values.vector_components_x[:, :, minx : maxx + 1],
+ vector_components_y=grid_values.vector_components_y[:, :, miny : maxy + 1],
+ vector_components_z=grid_values.vector_components_z[:, :, minz : maxz + 1],
+ basis_matrix=grid_values.basis_matrix,
+ )
+
+
+@dataclass
+class VMFactorizedVoxelGridValues(VoxelGridValuesBase):
+ vector_components_x: torch.Tensor
+ vector_components_y: torch.Tensor
+ vector_components_z: torch.Tensor
+ matrix_components_xy: torch.Tensor
+ matrix_components_yz: torch.Tensor
+ matrix_components_xz: torch.Tensor
+ basis_matrix: Optional[torch.Tensor] = None
+
+
+@registry.register
+class VMFactorizedVoxelGrid(VoxelGridBase):
+ """
+ Implementation of Vector-Matrix Factorization of a tensor from
+ https://arxiv.org/abs/2203.09517.
+
+ Vector-Matrix Factorization factorizes the 3d grid into three matrices
+ (xy, xz, yz) and three vectors (x, y, z). For n_components=1, the 3d grid
+ is a sum of the outer products (call it ⊗) of each matrix with its
+ complementary vector:
+
+ 3d_grid = xy ⊗ z + xz ⊗ y + yz ⊗ x.
+
+ These tensors are passed in a VMFactorizedVoxelGridValues object (here obj)
+ as obj.matrix_components_xy, obj.matrix_components_xy, obj.vector_components_y, etc.
+
+ Their shapes are `(n_grids, n_components, r0, r1)` for matrix_components and
+ (n_grids, n_components, r2)` for vector_componenets. Each of `r0, r1 and r2` coresponds
+ to one resolution in (width, height and depth).
+
+ Each element of this sum has an extra dimension, which gets matrix-multiplied by an
+ appropriate "basis matrix" of shape (n_grids, n_components, n_features). This multiplication
+ brings us to the desired "n_features" dimensionality. If basis_matrix=False the elements
+ of different components are summed together to create (n_grids, n_components, 1) tensor.
+ With some notation abuse, ignoring the interpolation operation, simplifying and denoting
+ n_features as F, n_components as C (which can differ for each dimension) and n_grids as G:
+
+ 3d_grid = concat((xy ⊗ z), (xz ⊗ y).permute(0, 2, 1),
+ (yz ⊗ x).permute(2, 0, 1)) @ basis_matrix # GWHDC x GCF -> GWHDF
+
+ Members:
+ n_components: total number of matrix vector pairs, this must be divisible by 3. Set
+ this if you want to have equal representational power in all 3 directions. You
+ must specify either n_components or distribution_of_components, you cannot
+ specify both.
+ distribution_of_components: if you do not want equal representational power in
+ all 3 directions specify a tuple of numbers of matrix_vector pairs for each
+ coordinate of a form (n_xy_planes, n_yz_planes, n_xz_planes). You must specify
+ either n_components or distribution_of_components, you cannot specify both.
+ basis_matrix: how to transform components. If matrix_reduction=True result
+ matrix of shape (n_grids, n_points_total, n_components) is batch matrix multiplied
+ by the basis_matrix of shape (n_grids, n_components, n_features). If
+ matrix_reduction=False, the result tensor of (n_grids, n_points_total, n_components)
+ is summed along the rows to get (n_grids, n_points_total, 1), which is then viewed
+ to return to starting shape (n_grids, ..., 1).
+ """
+
+ # the type of grid_values argument needed to run evaluate_local()
+ values_type: ClassVar[Type[VoxelGridValuesBase]] = VMFactorizedVoxelGridValues
+
+ n_components: Optional[int] = None
+ distribution_of_components: Optional[Tuple[int, int, int]] = None
+ basis_matrix: bool = True
+
+ # pyre-fixme[14]: `evaluate_local` overrides method defined in `VoxelGridBase`
+ # inconsistently.
+ def evaluate_local(
+ self, points: torch.Tensor, grid_values: VMFactorizedVoxelGridValues
+ ) -> torch.Tensor:
+ # (n_grids, n_points_total, n_features) from (n_grids, ..., n_features)
+ recorded_shape = points.shape
+ points = points.view(points.shape[0], -1, points.shape[-1])
+
+ # collect points from matrices and vectors and multiply them
+ a = interpolate_plane(
+ points[..., :2],
+ grid_values.matrix_components_xy,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ ) * interpolate_line(
+ points[..., 2:],
+ grid_values.vector_components_z,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ )
+ b = interpolate_plane(
+ points[..., [0, 2]],
+ grid_values.matrix_components_xz,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ ) * interpolate_line(
+ points[..., 1:2],
+ grid_values.vector_components_y,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ )
+ c = interpolate_plane(
+ points[..., 1:],
+ grid_values.matrix_components_yz,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ ) * interpolate_line(
+ points[..., :1],
+ grid_values.vector_components_x,
+ align_corners=self.align_corners,
+ padding_mode=self.padding,
+ mode=self.mode,
+ )
+ # pyre-ignore[28]
+ feats = torch.cat((a, b, c), axis=-1)
+
+ # reduce the result from
+ # (n_grids, n_points, n_components) to (n_grids, n_points, n_features)
+ if grid_values.basis_matrix is not None:
+ # (n_grids, n_points, n_features) =
+ # (n_grids, n_points, total_n_components) x
+ # (n_grids, total_n_components, n_features)
+ result = torch.bmm(feats, grid_values.basis_matrix)
+ else:
+ # pyre-ignore[28]
+ # (n_grids, n_points, 1) from (n_grids, n_points, n_features)
+ result = feats.sum(axis=-1, keepdim=True)
+ # (n_grids, ..., n_features)
+ return result.view(*recorded_shape[:-1], -1)
+
+ def get_shapes(self, epoch: int) -> Dict[str, Tuple]:
+ if self.basis_matrix is False and self.n_features != 1:
+ raise ValueError("Cannot set basis_matrix=False and n_features to != 1")
+ if self.distribution_of_components is None and self.n_components is None:
+ raise ValueError(
+ "You need to provide n_components or distribution_of_components"
+ )
+ if (
+ self.distribution_of_components is not None
+ and self.n_components is not None
+ ):
+ raise ValueError(
+ "You cannot define n_components and distribution_of_components"
+ )
+ # pyre-ignore[58]
+ if self.distribution_of_components is None and self.n_components % 3 != 0:
+ raise ValueError("n_components must be divisible by 3")
+ if self.distribution_of_components is None:
+ calculated_distribution_of_components = [
+ # pyre-fixme[58]: `//` is not supported for operand types
+ # `Optional[int]` and `int`.
+ self.n_components // 3
+ for _ in range(3)
+ ]
+ else:
+ calculated_distribution_of_components = self.distribution_of_components
+
+ width, height, depth = self.get_resolution(epoch=epoch)
+ shape_dict = {
+ "vector_components_x": (
+ calculated_distribution_of_components[1],
+ width,
+ ),
+ "vector_components_y": (
+ calculated_distribution_of_components[2],
+ height,
+ ),
+ "vector_components_z": (
+ calculated_distribution_of_components[0],
+ depth,
+ ),
+ "matrix_components_xy": (
+ calculated_distribution_of_components[0],
+ width,
+ height,
+ ),
+ "matrix_components_yz": (
+ calculated_distribution_of_components[1],
+ height,
+ depth,
+ ),
+ "matrix_components_xz": (
+ calculated_distribution_of_components[2],
+ width,
+ depth,
+ ),
+ }
+ if self.basis_matrix:
+ shape_dict["basis_matrix"] = (
+ sum(calculated_distribution_of_components),
+ self.n_features,
+ )
+
+ return shape_dict
+
+ # pyre-ignore[14]
+ def crop_local(
+ self,
+ min_point_local: torch.Tensor,
+ max_point_local: torch.Tensor,
+ grid_values: VMFactorizedVoxelGridValues,
+ ) -> VMFactorizedVoxelGridValues:
+ assert torch.all(min_point_local < max_point_local)
+ min_point_local = torch.clamp(min_point_local, -1, 1)
+ max_point_local = torch.clamp(max_point_local, -1, 1)
+ _, _, width = grid_values.vector_components_x.shape
+ _, _, height = grid_values.vector_components_y.shape
+ _, _, depth = grid_values.vector_components_z.shape
+ resolution = grid_values.vector_components_x.new_tensor([width, height, depth])
+ min_point_local01 = (min_point_local + 1) / 2
+ max_point_local01 = (max_point_local + 1) / 2
+
+ if self.align_corners:
+ minx, miny, minz = torch.floor(min_point_local01 * (resolution - 1)).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * (resolution - 1)).long()
+ else:
+ minx, miny, minz = torch.floor(min_point_local01 * resolution - 0.5).long()
+ maxx, maxy, maxz = torch.ceil(max_point_local01 * resolution - 0.5).long()
+
+ return VMFactorizedVoxelGridValues(
+ vector_components_x=grid_values.vector_components_x[:, :, minx : maxx + 1],
+ vector_components_y=grid_values.vector_components_y[:, :, miny : maxy + 1],
+ vector_components_z=grid_values.vector_components_z[:, :, minz : maxz + 1],
+ matrix_components_xy=grid_values.matrix_components_xy[
+ :, :, minx : maxx + 1, miny : maxy + 1
+ ],
+ matrix_components_yz=grid_values.matrix_components_yz[
+ :, :, miny : maxy + 1, minz : maxz + 1
+ ],
+ matrix_components_xz=grid_values.matrix_components_xz[
+ :, :, minx : maxx + 1, minz : maxz + 1
+ ],
+ basis_matrix=grid_values.basis_matrix,
+ )
+
+
+class VoxelGridModule(Configurable, torch.nn.Module):
+ """
+ A wrapper torch.nn.Module for the VoxelGrid classes, which
+ contains parameters that are needed to train the VoxelGrid classes.
+ Can contain the parameters for the voxel grid as pytorch parameters
+ or as registered buffers.
+
+ Members:
+ voxel_grid_class_type: The name of the class to use for voxel_grid,
+ which must be available in the registry. Default FullResolutionVoxelGrid.
+ voxel_grid: An instance of `VoxelGridBase`. This is the object which
+ this class wraps.
+ extents: 3-tuple of a form (width, height, depth), denotes the size of the grid
+ in world units.
+ translation: 3-tuple of float. The center of the volume in world units as (x, y, z).
+ init_std: Parameters are initialized using the gaussian distribution
+ with mean=init_mean and std=init_std. Default 0.1
+ init_mean: Parameters are initialized using the gaussian distribution
+ with mean=init_mean and std=init_std. Default 0.
+ hold_voxel_grid_as_parameters: if True components of the underlying voxel grids
+ will be saved as parameters and therefore be trainable. Default True.
+ param_groups: dictionary where keys are names of individual parameters
+ or module members and values are the parameter group where the
+ parameter/member will be sorted to. "self" key is used to denote the
+ parameter group at the module level. Possible keys, including the "self" key
+ do not have to be defined. By default all parameters are put into "default"
+ parameter group and have the learning rate defined in the optimizer,
+ it can be overridden at the:
+ - module level with “self” key, all the parameters and child
+ module's parameters will be put to that parameter group
+ - member level, which is the same as if the `param_groups` in that
+ member has key=“self” and value equal to that parameter group.
+ This is useful if members do not have `param_groups`, for
+ example torch.nn.Linear.
+ - parameter level, parameter with the same name as the key
+ will be put to that parameter group.
+ """
+
+ voxel_grid_class_type: str = "FullResolutionVoxelGrid"
+ # pyre-fixme[13]: Attribute `voxel_grid` is never initialized.
+ voxel_grid: VoxelGridBase
+
+ extents: Tuple[float, float, float] = (2.0, 2.0, 2.0)
+ translation: Tuple[float, float, float] = (0.0, 0.0, 0.0)
+
+ init_std: float = 0.1
+ init_mean: float = 0
+
+ hold_voxel_grid_as_parameters: bool = True
+ param_groups: Dict[str, str] = field(default_factory=lambda: {})
+
+ def __post_init__(self):
+ run_auto_creation(self)
+ n_grids = 1 # Voxel grid objects are batched. We need only a single grid.
+ shapes = self.voxel_grid.get_shapes(epoch=0)
+ params = {
+ name: torch.normal(
+ mean=torch.zeros((n_grids, *shape)) + self.init_mean,
+ std=self.init_std,
+ )
+ for name, shape in shapes.items()
+ }
+
+ self.set_voxel_grid_parameters(self.voxel_grid.values_type(**params))
+ self._register_load_state_dict_pre_hook(self._create_parameters_with_new_size)
+
+ def forward(self, points: torch.Tensor) -> torch.Tensor:
+ """
+ Evaluates points in the world coordinate frame on the voxel_grid.
+
+ Args:
+ points (torch.Tensor): tensor of points that you want to query
+ of a form (..., 3)
+ Returns:
+ torch.Tensor of shape (..., n_features)
+ """
+ locator = self._get_volume_locator()
+ grid_values = self.voxel_grid.values_type(**self.params)
+ # voxel grids operate with extra n_grids dimension, which we fix to one
+ return self.voxel_grid.evaluate_world(points[None], grid_values, locator)[0]
+
+ def set_voxel_grid_parameters(self, params: VoxelGridValuesBase) -> None:
+ """
+ Sets the parameters of the underlying voxel grid.
+
+ Args:
+ params: parameters of type `self.voxel_grid.values_type` which will
+ replace current parameters
+ """
+ if self.hold_voxel_grid_as_parameters:
+ self.params = torch.nn.ParameterDict(
+ {
+ k: torch.nn.Parameter(val)
+ for k, val in vars(params).items()
+ if val is not None
+ }
+ )
+ else:
+ # Torch Module to hold parameters since they can only be registered
+ # at object level.
+ self.params = _RegistratedBufferDict(vars(params))
+
+ @staticmethod
+ def get_output_dim(args: DictConfig) -> int:
+ """
+ Utility to help predict the shape of the output of `forward`.
+
+ Args:
+ args: DictConfig which would be used to initialize the object
+ Returns:
+ int: the length of the last dimension of the output tensor
+ """
+ grid = registry.get(VoxelGridBase, args["voxel_grid_class_type"])
+ return grid.get_output_dim(
+ args["voxel_grid_" + args["voxel_grid_class_type"] + "_args"]
+ )
+
+ def subscribe_to_epochs(self) -> Tuple[Tuple[int, ...], Callable[[int], bool]]:
+ """
+ Method which expresses interest in subscribing to optimization epoch updates.
+
+ Returns:
+ tuple of epochs on which to call a callable and callable to be called on
+ particular epoch. The callable returns True if parameter change has
+ happened else False and it must be supplied with one argument, epoch.
+ """
+ return self.voxel_grid.get_resolution_change_epochs(), self._apply_epochs
+
+ def _apply_epochs(self, epoch: int) -> bool:
+ """
+ Asks voxel_grid to change the resolution.
+ This method is returned with subscribe_to_epochs and is the method that collects
+ updates on training epochs, it is run on the training epochs that are requested.
+
+ Args:
+ epoch: current training epoch used for voxel grids to know to which
+ resolution to change
+ Returns:
+ True if parameter change has happened else False.
+ """
+ grid_values = self.voxel_grid.values_type(**self.params)
+ grid_values, change = self.voxel_grid.change_resolution(
+ grid_values, epoch=epoch
+ )
+ if change:
+ self.set_voxel_grid_parameters(grid_values)
+ return change and self.hold_voxel_grid_as_parameters
+
+ def _create_parameters_with_new_size(
+ self,
+ state_dict: dict,
+ prefix: str,
+ local_metadata: dict,
+ strict: bool,
+ missing_keys: List[str],
+ unexpected_keys: List[str],
+ error_msgs: List[str],
+ ) -> None:
+ '''
+ Automatically ran before loading the parameters with `load_state_dict()`.
+ Creates new parameters with the sizes of the ones in the loaded state dict.
+ This is necessary because the parameters are changing throughout training and
+ at the time of construction `VoxelGridModule` does not know the size of
+ parameters which will be loaded.
+
+ Args:
+ state_dict (dict): a dict containing parameters and
+ persistent buffers.
+ prefix (str): the prefix for parameters and buffers used in this
+ module
+ local_metadata (dict): a dict containing the metadata for this module.
+ See
+ strict (bool): whether to strictly enforce that the keys in
+ :attr:`state_dict` with :attr:`prefix` match the names of
+ parameters and buffers in this module
+ missing_keys (list of str): if ``strict=True``, add missing keys to
+ this list
+ unexpected_keys (list of str): if ``strict=True``, add unexpected
+ keys to this list
+ error_msgs (list of str): error messages should be added to this
+ list, and will be reported together in
+ :meth:`~torch.nn.Module.load_state_dict`
+ Returns:
+ nothing
+ """
+ '''
+ new_params = {}
+ for name in self.params:
+ key = prefix + "params." + name
+ if key in state_dict:
+ new_params[name] = torch.zeros_like(state_dict[key])
+ self.set_voxel_grid_parameters(self.voxel_grid.values_type(**new_params))
+
+ def get_device(self) -> torch.device:
+ """
+ Returns torch.device on which module parameters are located
+ """
+ return next(val for val in self.params.values() if val is not None).device
+
+ def crop_self(self, min_point: torch.Tensor, max_point: torch.Tensor) -> None:
+ """
+ Crops self to only represent points between min_point and max_point (inclusive).
+
+ Args:
+ min_point: torch.Tensor of shape (3,). Has x, y and z coordinates
+ smaller or equal to all other occupied points.
+ max_point: torch.Tensor of shape (3,). Has x, y and z coordinates
+ bigger or equal to all other occupied points.
+ Returns:
+ nothing
+ """
+ locator = self._get_volume_locator()
+ # torch.nn.modules.module.Module]` is not a function.
+ old_grid_values = self.voxel_grid.values_type(**self.params)
+ new_grid_values = self.voxel_grid.crop_world(
+ min_point, max_point, old_grid_values, locator
+ )
+ grid_values, _ = self.voxel_grid.change_resolution(
+ new_grid_values, grid_values_with_wanted_resolution=old_grid_values
+ )
+ self.params = torch.nn.ParameterDict(
+ {
+ k: torch.nn.Parameter(val)
+ for k, val in vars(grid_values).items()
+ if val is not None
+ }
+ )
+ # New center of voxel grid is the middle point between max and min points.
+ self.translation = tuple((max_point + min_point) / 2)
+ # new extents of voxel grid are distances between min and max points
+ self.extents = tuple(max_point - min_point)
+
+ def _get_volume_locator(self) -> VolumeLocator:
+ """
+ Returns VolumeLocator calculated from `extents` and `translation` members.
+ """
+ return VolumeLocator(
+ batch_size=1,
+ # The resolution of the voxel grid does not need to be known
+ # to the locator object. It is easiest to fix the resolution of the locator.
+ # In particular we fix it to (2,2,2) so that there is exactly one voxel of the
+ # desired size. The locator object uses (z, y, x) convention for the grid_size,
+ # and this module uses (x, y, z) convention so the order has to be reversed
+ # (irrelevant in this case since they are all equal).
+ # It is (2, 2, 2) because the VolumeLocator object behaves like
+ # align_corners=True, which means that the points are in the corners of
+ # the volume. So in the grid of (2, 2, 2) there is only one voxel.
+ grid_sizes=(2, 2, 2),
+ # The locator object uses (x, y, z) convention for the
+ # voxel size and translation.
+ voxel_size=tuple(self.extents),
+ # volume_translation is defined in `VolumeLocator` as a vector from the origin
+ # of local coordinate frame to origin of world coordinate frame, that is:
+ # x_world = x_local * extents/2 - translation.
+ # To get the reverse we need to negate it.
+ volume_translation=tuple(-t for t in self.translation),
+ device=self.get_device(),
+ )
+
+ def get_grid_points(self, epoch: int) -> torch.Tensor:
+ """
+ Returns a grid of points that represent centers of voxels of the
+ underlying voxel grid in world coordinates at specific epoch.
+
+ Args:
+ epoch: underlying voxel grids change resolution depending on the
+ epoch, this argument is used to determine the resolution
+ of the voxel grid at that epoch.
+ Returns:
+ tensor of shape [xresolution, yresolution, zresolution, 3] where
+ xresolution, yresolution, zresolution are resolutions of the
+ underlying voxel grid
+ """
+ xresolution, yresolution, zresolution = self.voxel_grid.get_resolution(epoch)
+ width, height, depth = self.extents
+ if not self.voxel_grid.get_align_corners():
+ width = (
+ width * (xresolution - 1) / xresolution if xresolution > 1 else width
+ )
+ height = (
+ height * (xresolution - 1) / xresolution if xresolution > 1 else height
+ )
+ depth = (
+ depth * (xresolution - 1) / xresolution if xresolution > 1 else depth
+ )
+ xs = torch.linspace(
+ -width / 2, width / 2, xresolution, device=self.get_device()
+ )
+ ys = torch.linspace(
+ -height / 2, height / 2, yresolution, device=self.get_device()
+ )
+ zs = torch.linspace(
+ -depth / 2, depth / 2, zresolution, device=self.get_device()
+ )
+ xmesh, ymesh, zmesh = torch.meshgrid(xs, ys, zs, indexing="ij")
+ return torch.stack((xmesh, ymesh, zmesh), dim=3)
+
+
+class _RegistratedBufferDict(torch.nn.Module, Mapping):
+ """
+ Mapping class and a torch.nn.Module that registeres its values
+ with `self.register_buffer`. Can be indexed like a regular Python
+ dictionary, but torch.Tensors it contains are properly registered, and will be visible
+ by all Module methods. Supports only `torch.Tensor` as value and str as key.
+ """
+
+ def __init__(self, init_dict: Optional[Dict[str, torch.Tensor]] = None) -> None:
+ """
+ Args:
+ init_dict: dictionary which will be used to populate the object
+ """
+ super().__init__()
+ self._keys = set()
+ if init_dict is not None:
+ for k, v in init_dict.items():
+ self[k] = v
+
+ def __iter__(self) -> Iterator[Dict[str, torch.Tensor]]:
+ return iter({k: self[k] for k in self._keys})
+
+ def __len__(self) -> int:
+ return len(self._keys)
+
+ def __getitem__(self, key: str) -> torch.Tensor:
+ return getattr(self, key)
+
+ def __setitem__(self, key, value) -> None:
+ self._keys.add(key)
+ self.register_buffer(key, value)
+
+ def __hash__(self) -> int:
+ return hash(repr(self))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid_implicit_function.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid_implicit_function.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec963b35e40dfd4a62440835a37e46bd0623e4b9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/implicit_function/voxel_grid_implicit_function.py
@@ -0,0 +1,621 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import logging
+import math
+import warnings
+from dataclasses import fields
+from typing import Callable, Dict, Optional, Tuple
+
+import torch
+
+from omegaconf import DictConfig
+
+from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
+from pytorch3d.implicitron.models.implicit_function.decoding_functions import (
+ DecoderFunctionBase,
+)
+from pytorch3d.implicitron.models.implicit_function.voxel_grid import VoxelGridModule
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import (
+ enable_get_default_args,
+ get_default_args_field,
+ registry,
+ run_auto_creation,
+)
+from pytorch3d.renderer import ray_bundle_to_ray_points
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+
+logger = logging.getLogger(__name__)
+
+
+enable_get_default_args(HarmonicEmbedding)
+
+
+@registry.register
+class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
+ """
+ This implicit function consists of two streams, one for the density calculation and one
+ for the color calculation. Each of these streams has three main parts:
+ 1) Voxel grids:
+ They take the (x, y, z) position and return the embedding of that point.
+ These components are replaceable, you can make your own or choose one of
+ several options.
+ 2) Harmonic embeddings:
+ Convert each feature into series of 'harmonic features', feature is passed through
+ sine and cosine functions. Input is of shape [minibatch, ..., D] output
+ [minibatch, ..., (n_harmonic_functions * 2 + int(append_input)) * D]. Appends
+ input by default. If you want it to behave like identity, put n_harmonic_functions=0
+ and append_input=True.
+ 3) Decoding functions:
+ The decoder is an instance of the DecoderFunctionBase and converts the embedding
+ of a spatial location to density/color. Examples are Identity which returns its
+ input and the MLP which uses fully connected nerual network to transform the input.
+ These components are replaceable, you can make your own or choose from
+ several options.
+
+ Calculating density is done in three steps:
+ 1) Evaluating the voxel grid on points
+ 2) Embedding the outputs with harmonic embedding
+ 3) Passing through the Density decoder
+
+ To calculate the color we need the embedding and the viewing direction, it has five steps:
+ 1) Transforming the viewing direction with camera
+ 2) Evaluating the voxel grid on points
+ 3) Embedding the outputs with harmonic embedding
+ 4) Embedding the normalized direction with harmonic embedding
+ 5) Passing everything through the Color decoder
+
+ If using the Implicitron configuration system the input_dim to the decoding functions will
+ be set to the output_dim of the Harmonic embeddings.
+
+ A speed up comes from using the scaffold, a low resolution voxel grid.
+ The scaffold is referenced as "binary occupancy grid mask" in TensoRF paper and "AlphaMask"
+ in official TensoRF implementation.
+ The scaffold is used in:
+ 1) filtering points in empty space
+ - controlled by `scaffold_filter_points` boolean. If set to True, points for which
+ scaffold predicts that are in empty space will return 0 density and
+ (0, 0, 0) color.
+ 2) calculating the bounding box of an object and cropping the voxel grids
+ - controlled by `volume_cropping_epochs`.
+ - at those epochs the implicit function will find the bounding box of an object
+ inside it and crop density and color grids. Cropping of the voxel grids means
+ preserving only voxel values that are inside the bounding box and changing the
+ resolution to match the original, while preserving the new cropped location in
+ world coordinates.
+
+ The scaffold has to exist before attempting filtering and cropping, and is created on
+ `scaffold_calculating_epochs`. Each voxel in the scaffold is labeled as having density 1 if
+ the point in the center of it evaluates to greater than `scaffold_empty_space_threshold`.
+ 3D max pooling is performed on the densities of the points in 3D.
+ Scaffold features are off by default.
+
+ Members:
+ voxel_grid_density (VoxelGridBase): voxel grid to use for density estimation
+ voxel_grid_color (VoxelGridBase): voxel grid to use for color estimation
+
+ harmonic_embedder_xyz_density (HarmonicEmbedder): Function to transform the outputs of
+ the voxel_grid_density
+ harmonic_embedder_xyz_color (HarmonicEmbedder): Function to transform the outputs of
+ the voxel_grid_color for density
+ harmonic_embedder_dir_color (HarmonicEmbedder): Function to transform the outputs of
+ the voxel_grid_color for color
+
+ decoder_density (DecoderFunctionBase): decoder function to use for density estimation
+ color_density (DecoderFunctionBase): decoder function to use for color estimation
+
+ use_multiple_streams (bool): if you want the density and color calculations to run on
+ different cuda streams set this to True. Default True.
+ xyz_ray_dir_in_camera_coords (bool): This is true if the directions are given in
+ camera coordinates. Default False.
+
+ voxel_grid_scaffold (VoxelGridModule): which holds the scaffold. Extents and
+ translation of it are set to those of voxel_grid_density.
+ scaffold_calculating_epochs (Tuple[int, ...]): at which epochs to recalculate the
+ scaffold. (The scaffold will be created automatically at the beginning of
+ the calculation.)
+ scaffold_resolution (Tuple[int, int, int]): (width, height, depth) of the underlying
+ voxel grid which stores scaffold
+ scaffold_empty_space_threshold (float): if `self._get_density` evaluates to less than
+ this it will be considered as empty space and the scaffold at that point would
+ evaluate as empty space.
+ scaffold_occupancy_chunk_size (str or int): Number of xy scaffold planes to calculate
+ at the same time. To calculate the scaffold we need to query `_get_density()` at
+ every voxel, this calculation can be split into scaffold depth number of xy plane
+ calculations if you want the lowest memory usage, one calculation to calculate the
+ whole scaffold, but with higher memory footprint or any other number of planes.
+ Setting to a non-positive number calculates all planes at the same time.
+ Defaults to '-1' (=calculating all planes).
+ scaffold_max_pool_kernel_size (int): Size of the pooling region to use when
+ calculating the scaffold. Defaults to 3.
+ scaffold_filter_points (bool): If set to True the points will be filtered using
+ `self.voxel_grid_scaffold`. Filtered points will be predicted as having 0 density
+ and (0, 0, 0) color. The points which were not evaluated as empty space will be
+ passed through the steps outlined above.
+ volume_cropping_epochs: on which epochs to crop the voxel grids to fit the object's
+ bounding box. Scaffold has to be calculated before cropping.
+ """
+
+ # ---- voxel grid for density
+ # pyre-fixme[13]: Attribute `voxel_grid_density` is never initialized.
+ voxel_grid_density: VoxelGridModule
+
+ # ---- voxel grid for color
+ # pyre-fixme[13]: Attribute `voxel_grid_color` is never initialized.
+ voxel_grid_color: VoxelGridModule
+
+ # ---- harmonic embeddings density
+ harmonic_embedder_xyz_density_args: DictConfig = get_default_args_field(
+ HarmonicEmbedding
+ )
+ harmonic_embedder_xyz_color_args: DictConfig = get_default_args_field(
+ HarmonicEmbedding
+ )
+ harmonic_embedder_dir_color_args: DictConfig = get_default_args_field(
+ HarmonicEmbedding
+ )
+
+ # ---- decoder function for density
+ decoder_density_class_type: str = "MLPDecoder"
+ # pyre-fixme[13]: Attribute `decoder_density` is never initialized.
+ decoder_density: DecoderFunctionBase
+
+ # ---- decoder function for color
+ decoder_color_class_type: str = "MLPDecoder"
+ # pyre-fixme[13]: Attribute `decoder_color` is never initialized.
+ decoder_color: DecoderFunctionBase
+
+ # ---- cuda streams
+ use_multiple_streams: bool = True
+
+ # ---- camera
+ xyz_ray_dir_in_camera_coords: bool = False
+
+ # --- scaffold
+ # voxel_grid_scaffold: VoxelGridModule
+ scaffold_calculating_epochs: Tuple[int, ...] = ()
+ scaffold_resolution: Tuple[int, int, int] = (128, 128, 128)
+ scaffold_empty_space_threshold: float = 0.001
+ scaffold_occupancy_chunk_size: int = -1
+ scaffold_max_pool_kernel_size: int = 3
+ scaffold_filter_points: bool = True
+
+ # --- cropping
+ volume_cropping_epochs: Tuple[int, ...] = ()
+
+ def __post_init__(self) -> None:
+ run_auto_creation(self)
+ self.voxel_grid_scaffold = self._create_voxel_grid_scaffold()
+ self.harmonic_embedder_xyz_density = HarmonicEmbedding(
+ **self.harmonic_embedder_xyz_density_args
+ )
+ self.harmonic_embedder_xyz_color = HarmonicEmbedding(
+ **self.harmonic_embedder_xyz_color_args
+ )
+ self.harmonic_embedder_dir_color = HarmonicEmbedding(
+ **self.harmonic_embedder_dir_color_args
+ )
+ self._scaffold_ready = False
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ fun_viewpool=None,
+ camera: Optional[CamerasBase] = None,
+ global_code=None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, torch.Tensor, Dict]:
+ """
+ The forward function accepts the parametrizations of 3D points sampled along
+ projection rays. The forward pass is responsible for attaching a 3D vector
+ and a 1D scalar representing the point's RGB color and opacity respectively.
+
+ Args:
+ ray_bundle: An ImplicitronRayBundle object containing the following variables:
+ origins: A tensor of shape `(minibatch, ..., 3)` denoting the
+ origins of the sampling rays in world coords.
+ directions: A tensor of shape `(minibatch, ..., 3)`
+ containing the direction vectors of sampling rays in world coords.
+ lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
+ containing the lengths at which the rays are sampled.
+ fun_viewpool: an optional callback with the signature
+ fun_fiewpool(points) -> pooled_features
+ where points is a [N_TGT x N x 3] tensor of world coords,
+ and pooled_features is a [N_TGT x ... x N_SRC x latent_dim] tensor
+ of the features pooled from the context images.
+ camera: A camera model which will be used to transform the viewing
+ directions
+
+ Returns:
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
+ denoting the opacitiy of each ray point.
+ rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
+ denoting the color of each ray point.
+ """
+ # ########## convert the ray parametrizations to world coordinates ########## #
+ # points.shape = [minibatch x n_rays_width x n_rays_height x pts_per_ray x 3]
+ # pyre-ignore[6]
+ points = ray_bundle_to_ray_points(ray_bundle)
+ directions = ray_bundle.directions.reshape(-1, 3)
+ input_shape = points.shape
+ num_points_per_ray = input_shape[-2]
+ points = points.view(-1, 3)
+ non_empty_points = None
+
+ # ########## filter the points using the scaffold ########## #
+ if self._scaffold_ready and self.scaffold_filter_points:
+ with torch.no_grad():
+ non_empty_points = self.voxel_grid_scaffold(points)[..., 0] > 0
+ points = points[non_empty_points]
+ if len(points) == 0:
+ warnings.warn(
+ "The scaffold has filtered all the points."
+ "The voxel grids and decoding functions will not be run."
+ )
+ return (
+ points.new_zeros((*input_shape[:-1], 1)),
+ points.new_zeros((*input_shape[:-1], 3)),
+ {},
+ )
+
+ # ########## calculate color and density ########## #
+ rays_densities, rays_colors = self._calculate_density_and_color(
+ points, directions, camera, non_empty_points, num_points_per_ray
+ )
+
+ if not (self._scaffold_ready and self.scaffold_filter_points):
+ return (
+ rays_densities.view((*input_shape[:-1], rays_densities.shape[-1])),
+ rays_colors.view((*input_shape[:-1], rays_colors.shape[-1])),
+ {},
+ )
+
+ # ########## merge scaffold calculated points ########## #
+ # Create a zeroed tensor corresponding to a point with density=0 and fill it
+ # with calculated density for points which are not in empty space. Do the
+ # same for color
+ rays_densities_combined = rays_densities.new_zeros(
+ (math.prod(input_shape[:-1]), rays_densities.shape[-1])
+ )
+ rays_colors_combined = rays_colors.new_zeros(
+ (math.prod(input_shape[:-1]), rays_colors.shape[-1])
+ )
+ assert non_empty_points is not None
+ rays_densities_combined[non_empty_points] = rays_densities
+ rays_colors_combined[non_empty_points] = rays_colors
+
+ return (
+ rays_densities_combined.view((*input_shape[:-1], rays_densities.shape[-1])),
+ rays_colors_combined.view((*input_shape[:-1], rays_colors.shape[-1])),
+ {},
+ )
+
+ def _calculate_density_and_color(
+ self,
+ points: torch.Tensor,
+ directions: torch.Tensor,
+ camera: Optional[CamerasBase],
+ non_empty_points: Optional[torch.Tensor],
+ num_points_per_ray: int,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Calculates density and color at `points`.
+ If enabled use cuda streams.
+
+ Args:
+ points: points at which to calculate density and color.
+ Tensor of shape [n_points, 3].
+ directions: from which directions are the points viewed.
+ One per ray. Tensor of shape [n_rays, 3].
+ camera: A camera model which will be used to transform the viewing
+ directions
+ non_empty_points: indices of points which weren't filtered out;
+ used for expanding directions
+ num_points_per_ray: number of points per ray, needed to expand directions.
+ Returns:
+ Tuple of color (tensor of shape [..., 3]) and density
+ (tensor of shape [..., 1])
+ """
+ if self.use_multiple_streams and points.is_cuda:
+ current_stream = torch.cuda.current_stream(points.device)
+ other_stream = torch.cuda.Stream(points.device)
+ other_stream.wait_stream(current_stream)
+
+ with torch.cuda.stream(other_stream):
+ # rays_densities.shape =
+ # [minibatch x n_rays_width x n_rays_height x pts_per_ray x density_dim]
+ rays_densities = self._get_density(points)
+
+ # rays_colors.shape =
+ # [minibatch x n_rays_width x n_rays_height x pts_per_ray x color_dim]
+ rays_colors = self._get_color(
+ points, camera, directions, non_empty_points, num_points_per_ray
+ )
+
+ current_stream.wait_stream(other_stream)
+ else:
+ # Same calculation as above, just serial.
+ rays_densities = self._get_density(points)
+ rays_colors = self._get_color(
+ points, camera, directions, non_empty_points, num_points_per_ray
+ )
+ return rays_densities, rays_colors
+
+ def _get_density(self, points: torch.Tensor) -> torch.Tensor:
+ """
+ Calculates density at points:
+ 1) Evaluates the voxel grid on points
+ 2) Embeds the outputs with harmonic embedding
+ 3) Passes everything through the Density decoder
+
+ Args:
+ points: tensor of shape [..., 3]
+ where the last dimension is the points in the (x, y, z)
+ Returns:
+ calculated densities of shape [..., density_dim], `density_dim` is the
+ feature dimensionality which `decoder_density` returns
+ """
+ embeds_density = self.voxel_grid_density(points)
+ harmonic_embedding_density = self.harmonic_embedder_xyz_density(embeds_density)
+ # shape = [..., density_dim]
+ return self.decoder_density(harmonic_embedding_density)
+
+ def _get_color(
+ self,
+ points: torch.Tensor,
+ camera: Optional[CamerasBase],
+ directions: torch.Tensor,
+ non_empty_points: Optional[torch.Tensor],
+ num_points_per_ray: int,
+ ) -> torch.Tensor:
+ """
+ Calculates color at points using the viewing direction:
+ 1) Transforms the viewing direction with camera
+ 2) Evaluates the voxel grid on points
+ 3) Embeds the outputs with harmonic embedding
+ 4) Embeds the normalized direction with harmonic embedding
+ 5) Passes everything through the Color decoder
+ Args:
+ points: tensor of shape (..., 3)
+ where the last dimension is the points in the (x, y, z)
+ camera: A camera model which will be used to transform the viewing
+ directions
+ directions: A tensor of shape `(..., 3)`
+ containing the direction vectors of sampling rays in world coords.
+ non_empty_points: indices of points which weren't filtered out;
+ used for expanding directions
+ num_points_per_ray: number of points per ray, needed to expand directions.
+ """
+ # ########## transform direction ########## #
+ if self.xyz_ray_dir_in_camera_coords:
+ if camera is None:
+ raise ValueError("Camera must be given if xyz_ray_dir_in_camera_coords")
+ directions = directions @ camera.R
+
+ # ########## get voxel grid output ########## #
+ # embeds_color.shape = [..., pts_per_ray, n_features]
+ embeds_color = self.voxel_grid_color(points)
+
+ # ########## embed with the harmonic function ########## #
+ # Obtain the harmonic embedding of the voxel grid output.
+ harmonic_embedding_color = self.harmonic_embedder_xyz_color(embeds_color)
+
+ # Normalize the ray_directions to unit l2 norm.
+ rays_directions_normed = torch.nn.functional.normalize(directions, dim=-1)
+ # Obtain the harmonic embedding of the normalized ray directions.
+ harmonic_embedding_dir = self.harmonic_embedder_dir_color(
+ rays_directions_normed
+ )
+
+ harmonic_embedding_dir = torch.repeat_interleave(
+ harmonic_embedding_dir, num_points_per_ray, dim=0
+ )
+ if non_empty_points is not None:
+ harmonic_embedding_dir = harmonic_embedding_dir[non_empty_points]
+
+ # total color embedding is concatenation of the harmonic embedding of voxel grid
+ # output and harmonic embedding of the normalized direction
+ total_color_embedding = torch.cat(
+ (harmonic_embedding_color, harmonic_embedding_dir), dim=-1
+ )
+
+ # ########## evaluate color with the decoding function ########## #
+ # rays_colors.shape = [..., pts_per_ray, 3] in [0-1]
+ return self.decoder_color(total_color_embedding)
+
+ @staticmethod
+ def allows_multiple_passes() -> bool:
+ """
+ Returns True as this implicit function allows
+ multiple passes. Overridden from ImplicitFunctionBase.
+ """
+ return True
+
+ def subscribe_to_epochs(self) -> Tuple[Tuple[int, ...], Callable[[int], bool]]:
+ """
+ Method which expresses interest in subscribing to optimization epoch updates.
+ This implicit function subscribes to epochs to calculate the scaffold and to
+ crop voxel grids, so this method combines wanted epochs and wraps their callbacks.
+
+ Returns:
+ list of epochs on which to call a callable and callable to be called on
+ particular epoch. The callable returns True if parameter change has
+ happened else False and it must be supplied with one argument, epoch.
+ """
+
+ def callback(epoch) -> bool:
+ change = False
+ if epoch in self.scaffold_calculating_epochs:
+ change = self._get_scaffold(epoch)
+ if epoch in self.volume_cropping_epochs:
+ change = self._crop(epoch) or change
+ return change
+
+ # remove duplicates
+ call_epochs = list(
+ set(self.scaffold_calculating_epochs) | set(self.volume_cropping_epochs)
+ )
+ return call_epochs, callback
+
+ def _crop(self, epoch: int) -> bool:
+ """
+ Finds the bounding box of an object represented in the scaffold and crops
+ density and color voxel grids to match that bounding box. If density of the
+ scaffold is 0 everywhere (there is no object in it) no change will
+ happen.
+
+ Args:
+ epoch: ignored
+ Returns:
+ True (indicating that parameter change has happened) if there is
+ an object inside, else False.
+ """
+ # find bounding box
+ points = self.voxel_grid_scaffold.get_grid_points(epoch=epoch)
+ assert self._scaffold_ready, "Scaffold has to be calculated before cropping."
+ occupancy = self.voxel_grid_scaffold(points)[..., 0] > 0
+ non_zero_idxs = torch.nonzero(occupancy)
+ if len(non_zero_idxs) == 0:
+ return False
+ min_indices = tuple(torch.min(non_zero_idxs, dim=0)[0])
+ max_indices = tuple(torch.max(non_zero_idxs, dim=0)[0])
+ min_point, max_point = points[min_indices], points[max_indices]
+
+ logger.info(
+ f"Cropping at epoch {epoch} to bounding box "
+ f"[{min_point.tolist()}, {max_point.tolist()}]."
+ )
+
+ # crop the voxel grids
+ self.voxel_grid_density.crop_self(min_point, max_point)
+ self.voxel_grid_color.crop_self(min_point, max_point)
+ return True
+
+ @torch.no_grad()
+ def _get_scaffold(self, epoch: int) -> bool:
+ """
+ Creates a low resolution grid which is used to filter points that are in empty
+ space.
+
+ Args:
+ epoch: epoch on which it is called, ignored inside method
+ Returns:
+ Always False: Modifies `self.voxel_grid_scaffold` member.
+ """
+
+ planes = []
+ points = self.voxel_grid_scaffold.get_grid_points(epoch=epoch)
+
+ chunk_size = (
+ self.scaffold_occupancy_chunk_size
+ if self.scaffold_occupancy_chunk_size > 0
+ else points.shape[-1]
+ )
+ for k in range(0, points.shape[-1], chunk_size):
+ points_in_planes = points[..., k : k + chunk_size]
+ planes.append(self._get_density(points_in_planes)[..., 0])
+
+ density_cube = torch.cat(planes, dim=-1)
+ density_cube = torch.nn.functional.max_pool3d(
+ density_cube[None, None],
+ kernel_size=self.scaffold_max_pool_kernel_size,
+ padding=self.scaffold_max_pool_kernel_size // 2,
+ stride=1,
+ )
+ occupancy_cube = density_cube > self.scaffold_empty_space_threshold
+ self.voxel_grid_scaffold.params["voxel_grid"] = occupancy_cube.float()
+ self._scaffold_ready = True
+
+ return False
+
+ @classmethod
+ def decoder_density_tweak_args(cls, type_, args: DictConfig) -> None:
+ args.pop("input_dim", None)
+
+ def create_decoder_density_impl(self, type_, args: DictConfig) -> None:
+ """
+ Decoding functions come after harmonic embedding and voxel grid. In order to not
+ calculate the input dimension of the decoder in the config file this function
+ calculates the required input dimension and sets the input dimension of the
+ decoding function to this value.
+ """
+ grid_args = self.voxel_grid_density_args
+ grid_output_dim = VoxelGridModule.get_output_dim(grid_args)
+
+ embedder_args = self.harmonic_embedder_xyz_density_args
+ input_dim = HarmonicEmbedding.get_output_dim_static(
+ grid_output_dim,
+ embedder_args["n_harmonic_functions"],
+ embedder_args["append_input"],
+ )
+
+ cls = registry.get(DecoderFunctionBase, type_)
+ need_input_dim = any(field.name == "input_dim" for field in fields(cls))
+ if need_input_dim:
+ self.decoder_density = cls(input_dim=input_dim, **args)
+ else:
+ self.decoder_density = cls(**args)
+
+ @classmethod
+ def decoder_color_tweak_args(cls, type_, args: DictConfig) -> None:
+ args.pop("input_dim", None)
+
+ def create_decoder_color_impl(self, type_, args: DictConfig) -> None:
+ """
+ Decoding functions come after harmonic embedding and voxel grid. In order to not
+ calculate the input dimension of the decoder in the config file this function
+ calculates the required input dimension and sets the input dimension of the
+ decoding function to this value.
+ """
+ grid_args = self.voxel_grid_color_args
+ grid_output_dim = VoxelGridModule.get_output_dim(grid_args)
+
+ embedder_args = self.harmonic_embedder_xyz_color_args
+ input_dim0 = HarmonicEmbedding.get_output_dim_static(
+ grid_output_dim,
+ embedder_args["n_harmonic_functions"],
+ embedder_args["append_input"],
+ )
+
+ dir_dim = 3
+ embedder_args = self.harmonic_embedder_dir_color_args
+ input_dim1 = HarmonicEmbedding.get_output_dim_static(
+ dir_dim,
+ embedder_args["n_harmonic_functions"],
+ embedder_args["append_input"],
+ )
+
+ input_dim = input_dim0 + input_dim1
+
+ cls = registry.get(DecoderFunctionBase, type_)
+ need_input_dim = any(field.name == "input_dim" for field in fields(cls))
+ if need_input_dim:
+ self.decoder_color = cls(input_dim=input_dim, **args)
+ else:
+ self.decoder_color = cls(**args)
+
+ def _create_voxel_grid_scaffold(self) -> VoxelGridModule:
+ """
+ Creates object to become self.voxel_grid_scaffold:
+ - makes `self.voxel_grid_scaffold` have same world to local mapping as
+ `self.voxel_grid_density`
+ """
+ return VoxelGridModule(
+ extents=self.voxel_grid_density_args["extents"],
+ translation=self.voxel_grid_density_args["translation"],
+ voxel_grid_class_type="FullResolutionVoxelGrid",
+ hold_voxel_grid_as_parameters=False,
+ voxel_grid_FullResolutionVoxelGrid_args={
+ "resolution_changes": {0: self.scaffold_resolution},
+ "padding": "zeros",
+ "align_corners": True,
+ "mode": "trilinear",
+ },
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/metrics.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1a3b6220a3e3e3238dcbc9796b3fa1d7e5d230
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/metrics.py
@@ -0,0 +1,402 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import warnings
+from typing import Any, Dict, Optional
+
+import torch
+from pytorch3d.implicitron.models.renderer.ray_sampler import ImplicitronRayBundle
+from pytorch3d.implicitron.tools import metric_utils as utils
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.ops import padded_to_packed
+from pytorch3d.renderer import utils as rend_utils
+
+from .renderer.base import RendererOutput
+
+
+class RegularizationMetricsBase(ReplaceableBase, torch.nn.Module):
+ """
+ Replaceable abstract base for regularization metrics.
+ `forward()` method produces regularization metrics and (unlike ViewMetrics) can
+ depend on the model's parameters.
+ """
+
+ def forward(
+ self, model: Any, keys_prefix: str = "loss_", **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Calculates various regularization terms useful for supervising differentiable
+ rendering pipelines.
+
+ Args:
+ model: A model instance. Useful, for example, to implement
+ weights-based regularization.
+ keys_prefix: A common prefix for all keys in the output dictionary
+ containing all regularization metrics.
+
+ Returns:
+ A dictionary with the resulting regularization metrics. The items
+ will have form `{metric_name_i: metric_value_i}` keyed by the
+ names of the output metrics `metric_name_i` with their corresponding
+ values `metric_value_i` represented as 0-dimensional float tensors.
+ """
+ raise NotImplementedError
+
+
+class ViewMetricsBase(ReplaceableBase, torch.nn.Module):
+ """
+ Replaceable abstract base for model metrics.
+ `forward()` method produces losses and other metrics.
+ """
+
+ def forward(
+ self,
+ raymarched: RendererOutput,
+ ray_bundle: ImplicitronRayBundle,
+ image_rgb: Optional[torch.Tensor] = None,
+ depth_map: Optional[torch.Tensor] = None,
+ fg_probability: Optional[torch.Tensor] = None,
+ mask_crop: Optional[torch.Tensor] = None,
+ keys_prefix: str = "loss_",
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Calculates various metrics and loss functions useful for supervising
+ differentiable rendering pipelines. Any additional parameters can be passed
+ in the `raymarched.aux` dictionary.
+
+ Args:
+ results: A dictionary with the resulting view metrics. The items
+ will have form `{metric_name_i: metric_value_i}` keyed by the
+ names of the output metrics `metric_name_i` with their corresponding
+ values `metric_value_i` represented as 0-dimensional float tensors.
+ raymarched: Output of the renderer.
+ ray_bundle: ImplicitronRayBundle object which was used to produce the raymarched
+ object
+ image_rgb: A tensor of shape `(B, H, W, 3)` containing ground truth rgb
+ values.
+ depth_map: A tensor of shape `(B, Hd, Wd, 1)` containing ground truth depth
+ values.
+ fg_probability: A tensor of shape `(B, Hm, Wm, 1)` containing ground truth
+ foreground masks.
+ keys_prefix: A common prefix for all keys in the output dictionary
+ containing all view metrics.
+
+ Returns:
+ A dictionary with the resulting view metrics. The items
+ will have form `{metric_name_i: metric_value_i}` keyed by the
+ names of the output metrics `metric_name_i` with their corresponding
+ values `metric_value_i` represented as 0-dimensional float tensors.
+ """
+ raise NotImplementedError()
+
+
+@registry.register
+class RegularizationMetrics(RegularizationMetricsBase):
+ def forward(
+ self, model: Any, keys_prefix: str = "loss_", **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Calculates the AD penalty, or returns an empty dict if the model's autoencoder
+ is inactive.
+
+ Args:
+ model: A model instance.
+ keys_prefix: A common prefix for all keys in the output dictionary
+ containing all regularization metrics.
+
+ Returns:
+ A dictionary with the resulting regularization metrics. The items
+ will have form `{metric_name_i: metric_value_i}` keyed by the
+ names of the output metrics `metric_name_i` with their corresponding
+ values `metric_value_i` represented as 0-dimensional float tensors.
+
+ The calculated metric is:
+ autoencoder_norm: Autoencoder weight norm regularization term.
+ """
+ metrics = {}
+ if getattr(model, "sequence_autodecoder", None) is not None:
+ ad_penalty = model.sequence_autodecoder.calculate_squared_encoding_norm()
+ if ad_penalty is not None:
+ metrics["autodecoder_norm"] = ad_penalty
+
+ if keys_prefix is not None:
+ metrics = {(keys_prefix + k): v for k, v in metrics.items()}
+
+ return metrics
+
+
+@registry.register
+class ViewMetrics(ViewMetricsBase):
+ def forward(
+ self,
+ raymarched: RendererOutput,
+ ray_bundle: ImplicitronRayBundle,
+ image_rgb: Optional[torch.Tensor] = None,
+ depth_map: Optional[torch.Tensor] = None,
+ fg_probability: Optional[torch.Tensor] = None,
+ mask_crop: Optional[torch.Tensor] = None,
+ keys_prefix: str = "loss_",
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Calculates various differentiable metrics useful for supervising
+ differentiable rendering pipelines.
+
+ Args:
+ results: A dict to store the results in.
+ raymarched.features: Predicted rgb or feature values.
+ raymarched.depths: A tensor of shape `(B, ..., 1)` containing
+ predicted depth values.
+ raymarched.masks: A tensor of shape `(B, ..., 1)` containing
+ predicted foreground masks.
+ raymarched.aux["grad_theta"]: A tensor of shape `(B, ..., 3)` containing an
+ evaluation of a gradient of a signed distance function w.r.t.
+ input 3D coordinates used to compute the eikonal loss.
+ raymarched.aux["density_grid"]: A tensor of shape `(B, Hg, Wg, Dg, 1)`
+ containing a `Hg x Wg x Dg` voxel grid of density values.
+ ray_bundle: ImplicitronRayBundle object which was used to produce the raymarched
+ object
+ image_rgb: A tensor of shape `(B, H, W, 3)` containing ground truth rgb
+ values.
+ depth_map: A tensor of shape `(B, Hd, Wd, 1)` containing ground truth depth
+ values.
+ fg_probability: A tensor of shape `(B, Hm, Wm, 1)` containing ground truth
+ foreground masks.
+ keys_prefix: A common prefix for all keys in the output dictionary
+ containing all view metrics.
+
+ Returns:
+ A dictionary `{metric_name_i: metric_value_i}` keyed by the
+ names of the output metrics `metric_name_i` with their corresponding
+ values `metric_value_i` represented as 0-dimensional float tensors.
+
+ The calculated metrics are:
+ rgb_huber: A robust huber loss between `image_pred` and `image`.
+ rgb_mse: Mean squared error between `image_pred` and `image`.
+ rgb_psnr: Peak signal-to-noise ratio between `image_pred` and `image`.
+ rgb_psnr_fg: Peak signal-to-noise ratio between the foreground
+ region of `image_pred` and `image` as defined by `mask`.
+ rgb_mse_fg: Mean squared error between the foreground
+ region of `image_pred` and `image` as defined by `mask`.
+ mask_neg_iou: (1 - intersection-over-union) between `mask_pred`
+ and `mask`.
+ mask_bce: Binary cross entropy between `mask_pred` and `mask`.
+ mask_beta_prior: A loss enforcing strictly binary values
+ of `mask_pred`: `log(mask_pred) + log(1-mask_pred)`
+ depth_abs: Mean per-pixel L1 distance between
+ `depth_pred` and `depth`.
+ depth_abs_fg: Mean per-pixel L1 distance between the foreground
+ region of `depth_pred` and `depth` as defined by `mask`.
+ eikonal: Eikonal regularizer `(||grad_theta|| - 1)**2`.
+ density_tv: The Total Variation regularizer of density
+ values in `density_grid` (sum of L1 distances of values
+ of all 4-neighbouring cells).
+ depth_neg_penalty: `min(depth_pred, 0)**2` penalizing negative
+ predicted depth values.
+ """
+ metrics = self._calculate_stage(
+ raymarched,
+ ray_bundle,
+ image_rgb,
+ depth_map,
+ fg_probability,
+ mask_crop,
+ keys_prefix,
+ )
+
+ if raymarched.prev_stage:
+ metrics.update(
+ self(
+ raymarched.prev_stage,
+ ray_bundle,
+ image_rgb,
+ depth_map,
+ fg_probability,
+ mask_crop,
+ keys_prefix=(keys_prefix + "prev_stage_"),
+ )
+ )
+
+ return metrics
+
+ def _calculate_stage(
+ self,
+ raymarched: RendererOutput,
+ ray_bundle: ImplicitronRayBundle,
+ image_rgb: Optional[torch.Tensor] = None,
+ depth_map: Optional[torch.Tensor] = None,
+ fg_probability: Optional[torch.Tensor] = None,
+ mask_crop: Optional[torch.Tensor] = None,
+ keys_prefix: str = "loss_",
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Calculate metrics for the current stage.
+ """
+ # TODO: extract functions
+
+ # reshape from B x ... x DIM to B x DIM x -1 x 1
+ image_rgb_pred, fg_probability_pred, depth_map_pred = [
+ _reshape_nongrid_var(x)
+ for x in [raymarched.features, raymarched.masks, raymarched.depths]
+ ]
+ xys = ray_bundle.xys
+
+ # If ray_bundle is packed than we can sample images in padded state to lower
+ # memory requirements. Instead of having one image for every element in
+ # ray_bundle we can than have one image per unique sampled camera.
+ if ray_bundle.is_packed():
+ xys, first_idxs, num_inputs = ray_bundle.get_padded_xys()
+
+ # reshape the sampling grid as well
+ # TODO: we can get rid of the singular dimension here and in _reshape_nongrid_var
+ # now that we use rend_utils.ndc_grid_sample
+ xys = xys.reshape(xys.shape[0], -1, 1, 2)
+
+ # closure with the given xys
+ def sample_full(tensor, mode):
+ if tensor is None:
+ return tensor
+ return rend_utils.ndc_grid_sample(tensor, xys, mode=mode)
+
+ def sample_packed(tensor, mode):
+ if tensor is None:
+ return tensor
+
+ # select images that corespond to sampled cameras if raybundle is packed
+ tensor = tensor[ray_bundle.camera_ids]
+ if ray_bundle.is_packed():
+ # select images that corespond to sampled cameras if raybundle is packed
+ tensor = tensor[ray_bundle.camera_ids]
+ result = rend_utils.ndc_grid_sample(tensor, xys, mode=mode)
+ return padded_to_packed(result, first_idxs, num_inputs, max_size_dim=2)[
+ :, :, None
+ ] # the result is [n_rays_total_training, 3, 1, 1]
+
+ sample = sample_packed if ray_bundle.is_packed() else sample_full
+
+ # eval all results in this size
+ image_rgb = sample(image_rgb, mode="bilinear")
+ depth_map = sample(depth_map, mode="nearest")
+ fg_probability = sample(fg_probability, mode="nearest")
+ mask_crop = sample(mask_crop, mode="nearest")
+ if mask_crop is None and image_rgb_pred is not None:
+ mask_crop = torch.ones_like(image_rgb_pred[:, :1])
+ if mask_crop is None and depth_map_pred is not None:
+ mask_crop = torch.ones_like(depth_map_pred[:, :1])
+
+ metrics = {}
+ if image_rgb is not None and image_rgb_pred is not None:
+ metrics.update(
+ _rgb_metrics(
+ image_rgb,
+ image_rgb_pred,
+ fg_probability,
+ fg_probability_pred,
+ mask_crop,
+ )
+ )
+
+ if fg_probability_pred is not None:
+ metrics["mask_beta_prior"] = utils.beta_prior(fg_probability_pred)
+ if fg_probability is not None and fg_probability_pred is not None:
+ metrics["mask_neg_iou"] = utils.neg_iou_loss(
+ fg_probability_pred, fg_probability, mask=mask_crop
+ )
+ metrics["mask_bce"] = utils.calc_bce(
+ fg_probability_pred, fg_probability, mask=mask_crop
+ )
+
+ if depth_map is not None and depth_map_pred is not None:
+ assert mask_crop is not None
+ _, abs_ = utils.eval_depth(
+ depth_map_pred, depth_map, get_best_scale=True, mask=mask_crop, crop=0
+ )
+ metrics["depth_abs"] = abs_.mean()
+
+ if fg_probability is not None:
+ mask = fg_probability * mask_crop
+ _, abs_ = utils.eval_depth(
+ depth_map_pred, depth_map, get_best_scale=True, mask=mask, crop=0
+ )
+ metrics["depth_abs_fg"] = abs_.mean()
+
+ # regularizers
+ grad_theta = raymarched.aux.get("grad_theta")
+ if grad_theta is not None:
+ metrics["eikonal"] = _get_eikonal_loss(grad_theta)
+
+ density_grid = raymarched.aux.get("density_grid")
+ if density_grid is not None:
+ metrics["density_tv"] = _get_grid_tv_loss(density_grid)
+
+ if depth_map_pred is not None:
+ metrics["depth_neg_penalty"] = _get_depth_neg_penalty_loss(depth_map_pred)
+
+ if keys_prefix is not None:
+ metrics = {(keys_prefix + k): v for k, v in metrics.items()}
+
+ return metrics
+
+
+def _rgb_metrics(images, images_pred, masks, masks_pred, masks_crop):
+ assert masks_crop is not None
+ if images.shape[1] != images_pred.shape[1]:
+ raise ValueError(
+ f"Network output's RGB images had {images_pred.shape[1]} "
+ f"channels. {images.shape[1]} expected."
+ )
+ rgb_squared = ((images_pred - images) ** 2).mean(dim=1, keepdim=True)
+ rgb_loss = utils.huber(rgb_squared, scaling=0.03)
+ crop_mass = masks_crop.sum().clamp(1.0)
+ results = {
+ "rgb_huber": (rgb_loss * masks_crop).sum() / crop_mass,
+ "rgb_mse": (rgb_squared * masks_crop).sum() / crop_mass,
+ "rgb_psnr": utils.calc_psnr(images_pred, images, mask=masks_crop),
+ }
+ if masks is not None:
+ masks = masks_crop * masks
+ results["rgb_psnr_fg"] = utils.calc_psnr(images_pred, images, mask=masks)
+ results["rgb_mse_fg"] = (rgb_squared * masks).sum() / masks.sum().clamp(1.0)
+ return results
+
+
+def _get_eikonal_loss(grad_theta):
+ return ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
+
+
+def _get_grid_tv_loss(grid, log_domain: bool = True, eps: float = 1e-5):
+ if log_domain:
+ if (grid <= -eps).any():
+ warnings.warn("Grid has negative values; this will produce NaN loss")
+ grid = torch.log(grid + eps)
+
+ # this is an isotropic version, note that it ignores last rows/cols
+ return torch.mean(
+ utils.safe_sqrt(
+ (grid[..., :-1, :-1, 1:] - grid[..., :-1, :-1, :-1]) ** 2
+ + (grid[..., :-1, 1:, :-1] - grid[..., :-1, :-1, :-1]) ** 2
+ + (grid[..., 1:, :-1, :-1] - grid[..., :-1, :-1, :-1]) ** 2,
+ eps=1e-5,
+ )
+ )
+
+
+def _get_depth_neg_penalty_loss(depth):
+ neg_penalty = depth.clamp(min=None, max=0.0) ** 2
+ return torch.mean(neg_penalty)
+
+
+def _reshape_nongrid_var(x):
+ if x is None:
+ return None
+
+ ba, *_, dim = x.shape
+ return x.reshape(ba, -1, 1, dim).permute(0, 3, 1, 2).contiguous()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/model_dbir.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/model_dbir.py
new file mode 100644
index 0000000000000000000000000000000000000000..496f9ed15d10877d34b4489a032487bf28eba6e4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/model_dbir.py
@@ -0,0 +1,153 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.dataset.utils import is_known_frame
+from pytorch3d.implicitron.tools.config import registry
+from pytorch3d.implicitron.tools.point_cloud_utils import (
+ get_rgbd_point_cloud,
+ render_point_cloud_pytorch3d,
+)
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.structures import Pointclouds
+
+from .base_model import ImplicitronModelBase, ImplicitronRender
+from .renderer.base import EvaluationMode
+
+
+@registry.register
+class ModelDBIR(ImplicitronModelBase):
+ """
+ A simple depth-based image rendering model.
+
+ Args:
+ render_image_width: The width of the rendered rectangular images.
+ render_image_height: The height of the rendered rectangular images.
+ bg_color: The color of the background.
+ max_points: Maximum number of points in the point cloud
+ formed by unprojecting all source view depths.
+ If more points are present, they are randomly subsampled
+ to this number of points without replacement.
+ """
+
+ render_image_width: int = 256
+ render_image_height: int = 256
+ bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
+ max_points: int = -1
+
+ # pyre-fixme[14]: `forward` overrides method defined in `ImplicitronModelBase`
+ # inconsistently.
+ def forward(
+ self,
+ *, # force keyword-only arguments
+ image_rgb: Optional[torch.Tensor],
+ camera: CamerasBase,
+ fg_probability: Optional[torch.Tensor],
+ mask_crop: Optional[torch.Tensor],
+ depth_map: Optional[torch.Tensor],
+ sequence_name: Optional[List[str]],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ frame_type: List[str],
+ **kwargs,
+ ) -> Dict[str, Any]: # TODO: return a namedtuple or dataclass
+ """
+ Given a set of input source cameras images and depth maps, unprojects
+ all RGBD maps to a colored point cloud and renders into the target views.
+
+ Args:
+ camera: A batch of `N` PyTorch3D cameras.
+ image_rgb: A batch of `N` images of shape `(N, 3, H, W)`.
+ depth_map: A batch of `N` depth maps of shape `(N, 1, H, W)`.
+ fg_probability: A batch of `N` foreground probability maps
+ of shape `(N, 1, H, W)`.
+ frame_type: A list of `N` strings containing frame type indicators
+ which specify target and source views.
+
+ Returns:
+ preds: A dict with the following fields:
+ implicitron_render: The rendered colors, depth and mask
+ of the target views.
+ point_cloud: The point cloud of the scene. It's renders are
+ stored in `implicitron_render`.
+ """
+
+ if image_rgb is None:
+ raise ValueError("ModelDBIR needs image input")
+
+ if fg_probability is None:
+ raise ValueError("ModelDBIR needs foreground mask input")
+
+ if depth_map is None:
+ raise ValueError("ModelDBIR needs depth map input")
+
+ is_known = is_known_frame(frame_type)
+ is_known_idx = torch.where(is_known)[0]
+
+ mask_fg = (fg_probability > 0.5).type_as(image_rgb)
+
+ point_cloud = get_rgbd_point_cloud(
+ # pyre-fixme[6]: For 1st param expected `Union[List[int], int,
+ # LongTensor]` but got `Tensor`.
+ camera[is_known_idx],
+ image_rgb[is_known_idx],
+ depth_map[is_known_idx],
+ mask_fg[is_known_idx],
+ )
+
+ pcl_size = point_cloud.num_points_per_cloud().item()
+ if (self.max_points > 0) and (pcl_size > self.max_points):
+ # pyre-fixme[6]: For 1st param expected `int` but got `Union[bool,
+ # float, int]`.
+ prm = torch.randperm(pcl_size)[: self.max_points]
+ point_cloud = Pointclouds(
+ point_cloud.points_padded()[:, prm, :],
+ # pyre-fixme[16]: Optional type has no attribute `__getitem__`.
+ features=point_cloud.features_padded()[:, prm, :],
+ )
+
+ is_target_idx = torch.where(~is_known)[0]
+
+ depth_render, image_render, mask_render = [], [], []
+
+ # render into target frames in a for loop to save memory
+ for tgt_idx in is_target_idx:
+ _image_render, _mask_render, _depth_render = render_point_cloud_pytorch3d(
+ camera[int(tgt_idx)],
+ point_cloud,
+ render_size=(self.render_image_height, self.render_image_width),
+ point_radius=1e-2,
+ topk=10,
+ bg_color=self.bg_color,
+ )
+ _image_render = _image_render.clamp(0.0, 1.0)
+ # the mask is the set of pixels with opacity bigger than eps
+ _mask_render = (_mask_render > 1e-4).float()
+
+ depth_render.append(_depth_render)
+ image_render.append(_image_render)
+ mask_render.append(_mask_render)
+
+ implicitron_render = ImplicitronRender(
+ **{
+ k: torch.cat(v, dim=0)
+ for k, v in zip(
+ ["depth_render", "image_render", "mask_render"],
+ [depth_render, image_render, mask_render],
+ )
+ }
+ )
+
+ preds = {
+ "implicitron_render": implicitron_render,
+ "point_cloud": point_cloud,
+ }
+
+ return preds
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/overfit_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/overfit_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..23a45665d58051f646adf2ae8f5ed80afbab6ff8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/overfit_model.py
@@ -0,0 +1,678 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+# Note: The #noqa comments below are for unused imports of pluggable implementations
+# which are part of implicitron. They ensure that the registry is prepopulated.
+
+import functools
+import logging
+from dataclasses import field
+from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
+
+import torch
+from omegaconf import DictConfig
+
+from pytorch3d.implicitron.models.base_model import (
+ ImplicitronModelBase,
+ ImplicitronRender,
+)
+from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase
+from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
+from pytorch3d.implicitron.models.metrics import (
+ RegularizationMetricsBase,
+ ViewMetricsBase,
+)
+
+from pytorch3d.implicitron.models.renderer.base import (
+ BaseRenderer,
+ EvaluationMode,
+ ImplicitronRayBundle,
+ RendererOutput,
+ RenderSamplingMode,
+)
+from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase
+from pytorch3d.implicitron.models.utils import (
+ apply_chunked,
+ chunk_generator,
+ log_loss_weights,
+ preprocess_input,
+ weighted_sum_losses,
+)
+from pytorch3d.implicitron.tools import vis_utils
+from pytorch3d.implicitron.tools.config import (
+ expand_args_fields,
+ registry,
+ run_auto_creation,
+)
+
+from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
+from pytorch3d.renderer import utils as rend_utils
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+if TYPE_CHECKING:
+ from visdom import Visdom
+logger = logging.getLogger(__name__)
+
+IMPLICIT_FUNCTION_ARGS_TO_REMOVE: List[str] = [
+ "feature_vector_size",
+ "encoding_dim",
+ "latent_dim",
+ "color_dim",
+]
+
+
+@registry.register
+class OverfitModel(ImplicitronModelBase):
+ """
+ OverfitModel is a wrapper for the neural implicit
+ rendering and reconstruction pipeline which consists
+ of the following sequence of 4 steps:
+
+
+ (1) Ray Sampling
+ ------------------
+ Rays are sampled from an image grid based on the target view(s).
+ │
+ ▼
+ (2) Implicit Function Evaluation
+ ------------------
+ Evaluate the implicit function(s) at the sampled ray points
+ (also optionally pass in a global encoding from global_encoder).
+ │
+ ▼
+ (3) Rendering
+ ------------------
+ Render the image into the target cameras by raymarching along
+ the sampled rays and aggregating the colors and densities
+ output by the implicit function in (2).
+ │
+ ▼
+ (4) Loss Computation
+ ------------------
+ Compute losses based on the predicted target image(s).
+
+
+ The `forward` function of OverfitModel executes
+ this sequence of steps. Currently, steps 1, 2, 3
+ can be customized by intializing a subclass of the appropriate
+ base class and adding the newly created module to the registry.
+ Please see https://github.com/facebookresearch/pytorch3d/blob/main/projects/implicitron_trainer/README.md#custom-plugins
+ for more details on how to create and register a custom component.
+
+ In the config .yaml files for experiments, the parameters below are
+ contained in the
+ `model_factory_ImplicitronModelFactory_args.model_OverfitModel_args`
+ node. As OverfitModel derives from ReplaceableBase, the input arguments are
+ parsed by the run_auto_creation function to initialize the
+ necessary member modules. Please see implicitron_trainer/README.md
+ for more details on this process.
+
+ Args:
+ mask_images: Whether or not to mask the RGB image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ mask_depths: Whether or not to mask the depth image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ render_image_width: Width of the output image to render
+ render_image_height: Height of the output image to render
+ mask_threshold: If greater than 0.0, the foreground mask is
+ thresholded by this value before being applied to the RGB/Depth images
+ output_rasterized_mc: If True, visualize the Monte-Carlo pixel renders by
+ splatting onto an image grid. Default: False.
+ bg_color: RGB values for setting the background color of input image
+ if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own
+ way to determine the background color of its output, unrelated to this.
+ chunk_size_grid: The total number of points which can be rendered
+ per chunk. This is used to compute the number of rays used
+ per chunk when the chunked version of the renderer is used (in order
+ to fit rendering on all rays in memory)
+ render_features_dimensions: The number of output features to render.
+ Defaults to 3, corresponding to RGB images.
+ sampling_mode_training: The sampling method to use during training. Must be
+ a value from the RenderSamplingMode Enum.
+ sampling_mode_evaluation: Same as above but for evaluation.
+ global_encoder_class_type: The name of the class to use for global_encoder,
+ which must be available in the registry. Or `None` to disable global encoder.
+ global_encoder: An instance of `GlobalEncoder`. This is used to generate an encoding
+ of the image (referred to as the global_code) that can be used to model aspects of
+ the scene such as multiple objects or morphing objects. It is up to the implicit
+ function definition how to use it, but the most typical way is to broadcast and
+ concatenate to the other inputs for the implicit function.
+ raysampler_class_type: The name of the raysampler class which is available
+ in the global registry.
+ raysampler: An instance of RaySampler which is used to emit
+ rays from the target view(s).
+ renderer_class_type: The name of the renderer class which is available in the global
+ registry.
+ renderer: A renderer class which inherits from BaseRenderer. This is used to
+ generate the images from the target view(s).
+ share_implicit_function_across_passes: If set to True
+ coarse_implicit_function is automatically set as implicit_function
+ (coarse_implicit_function=implicit_funciton). The
+ implicit_functions are then run sequentially during the rendering.
+ implicit_function_class_type: The type of implicit function to use which
+ is available in the global registry.
+ implicit_function: An instance of ImplicitFunctionBase.
+ coarse_implicit_function_class_type: The type of implicit function to use which
+ is available in the global registry.
+ coarse_implicit_function: An instance of ImplicitFunctionBase.
+ If set and `share_implicit_function_across_passes` is set to False,
+ coarse_implicit_function is instantiated on itself. It
+ is then used as the second pass during the rendering.
+ If set to None, we only do a single pass with implicit_function.
+ view_metrics: An instance of ViewMetricsBase used to compute loss terms which
+ are independent of the model's parameters.
+ view_metrics_class_type: The type of view metrics to use, must be available in
+ the global registry.
+ regularization_metrics: An instance of RegularizationMetricsBase used to compute
+ regularization terms which can depend on the model's parameters.
+ regularization_metrics_class_type: The type of regularization metrics to use,
+ must be available in the global registry.
+ loss_weights: A dictionary with a {loss_name: weight} mapping; see documentation
+ for `ViewMetrics` class for available loss functions.
+ log_vars: A list of variable names which should be logged.
+ The names should correspond to a subset of the keys of the
+ dict `preds` output by the `forward` function.
+ """ # noqa: B950
+
+ mask_images: bool = True
+ mask_depths: bool = True
+ render_image_width: int = 400
+ render_image_height: int = 400
+ mask_threshold: float = 0.5
+ output_rasterized_mc: bool = False
+ bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
+ chunk_size_grid: int = 4096
+ render_features_dimensions: int = 3
+ tqdm_trigger_threshold: int = 16
+
+ n_train_target_views: int = 1
+ sampling_mode_training: str = "mask_sample"
+ sampling_mode_evaluation: str = "full_grid"
+
+ # ---- global encoder settings
+ global_encoder_class_type: Optional[str] = None
+ # pyre-fixme[13]: Attribute `global_encoder` is never initialized.
+ global_encoder: Optional[GlobalEncoderBase]
+
+ # ---- raysampler
+ raysampler_class_type: str = "AdaptiveRaySampler"
+ # pyre-fixme[13]: Attribute `raysampler` is never initialized.
+ raysampler: RaySamplerBase
+
+ # ---- renderer configs
+ renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
+ # pyre-fixme[13]: Attribute `renderer` is never initialized.
+ renderer: BaseRenderer
+
+ # ---- implicit function settings
+ share_implicit_function_across_passes: bool = False
+ implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
+ # pyre-fixme[13]: Attribute `implicit_function` is never initialized.
+ implicit_function: ImplicitFunctionBase
+ coarse_implicit_function_class_type: Optional[str] = None
+ # pyre-fixme[13]: Attribute `coarse_implicit_function` is never initialized.
+ coarse_implicit_function: Optional[ImplicitFunctionBase]
+
+ # ----- metrics
+ # pyre-fixme[13]: Attribute `view_metrics` is never initialized.
+ view_metrics: ViewMetricsBase
+ view_metrics_class_type: str = "ViewMetrics"
+
+ # pyre-fixme[13]: Attribute `regularization_metrics` is never initialized.
+ regularization_metrics: RegularizationMetricsBase
+ regularization_metrics_class_type: str = "RegularizationMetrics"
+
+ # ---- loss weights
+ loss_weights: Dict[str, float] = field(
+ default_factory=lambda: {
+ "loss_rgb_mse": 1.0,
+ "loss_prev_stage_rgb_mse": 1.0,
+ "loss_mask_bce": 0.0,
+ "loss_prev_stage_mask_bce": 0.0,
+ }
+ )
+
+ # ---- variables to be logged (logger automatically ignores if not computed)
+ log_vars: List[str] = field(
+ default_factory=lambda: [
+ "loss_rgb_psnr_fg",
+ "loss_rgb_psnr",
+ "loss_rgb_mse",
+ "loss_rgb_huber",
+ "loss_depth_abs",
+ "loss_depth_abs_fg",
+ "loss_mask_neg_iou",
+ "loss_mask_bce",
+ "loss_mask_beta_prior",
+ "loss_eikonal",
+ "loss_density_tv",
+ "loss_depth_neg_penalty",
+ "loss_autodecoder_norm",
+ # metrics that are only logged in 2+stage renderes
+ "loss_prev_stage_rgb_mse",
+ "loss_prev_stage_rgb_psnr_fg",
+ "loss_prev_stage_rgb_psnr",
+ "loss_prev_stage_mask_bce",
+ # basic metrics
+ "objective",
+ "epoch",
+ "sec/it",
+ ]
+ )
+
+ @classmethod
+ def pre_expand(cls) -> None:
+ # use try/finally to bypass cinder's lazy imports
+ try:
+ from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950
+ IdrFeatureField,
+ )
+ from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950
+ NeuralRadianceFieldImplicitFunction,
+ )
+ from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950
+ SRNImplicitFunction,
+ )
+ from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401
+ LSTMRenderer,
+ )
+ from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa: F401
+ MultiPassEmissionAbsorptionRenderer,
+ )
+ from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401
+ SignedDistanceFunctionRenderer,
+ )
+ finally:
+ pass
+
+ def __post_init__(self):
+ # The attribute will be filled by run_auto_creation
+ run_auto_creation(self)
+ log_loss_weights(self.loss_weights, logger)
+ # We need to set it here since run_auto_creation
+ # will create coarse_implicit_function before implicit_function
+ if self.share_implicit_function_across_passes:
+ self.coarse_implicit_function = self.implicit_function
+
+ def forward(
+ self,
+ *, # force keyword-only arguments
+ image_rgb: Optional[torch.Tensor],
+ camera: CamerasBase,
+ fg_probability: Optional[torch.Tensor] = None,
+ mask_crop: Optional[torch.Tensor] = None,
+ depth_map: Optional[torch.Tensor] = None,
+ sequence_name: Optional[List[str]] = None,
+ frame_timestamp: Optional[torch.Tensor] = None,
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> Dict[str, Any]:
+ """
+ Args:
+ image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images;
+ the first `min(B, n_train_target_views)` images are considered targets and
+ are used to supervise the renders; the rest corresponding to the source
+ viewpoints from which features will be extracted.
+ camera: An instance of CamerasBase containing a batch of `B` cameras corresponding
+ to the viewpoints of target images, from which the rays will be sampled,
+ and source images, which will be used for intersecting with target rays.
+ fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of
+ foreground masks.
+ mask_crop: A binary tensor of shape `(B, 1, H, W)` deonting valid
+ regions in the input images (i.e. regions that do not correspond
+ to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to
+ "mask_sample", rays will be sampled in the non zero regions.
+ depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
+ sequence_name: A list of `B` strings corresponding to the sequence names
+ from which images `image_rgb` were extracted. They are used to match
+ target frames with relevant source frames.
+ frame_timestamp: Optionally a tensor of shape `(B,)` containing a batch
+ of frame timestamps.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering.
+
+ Returns:
+ preds: A dictionary containing all outputs of the forward pass including the
+ rendered images, depths, masks, losses and other metrics.
+ """
+ image_rgb, fg_probability, depth_map = preprocess_input(
+ image_rgb,
+ fg_probability,
+ depth_map,
+ self.mask_images,
+ self.mask_depths,
+ self.mask_threshold,
+ self.bg_color,
+ )
+
+ # Determine the used ray sampling mode.
+ sampling_mode = RenderSamplingMode(
+ self.sampling_mode_training
+ if evaluation_mode == EvaluationMode.TRAINING
+ else self.sampling_mode_evaluation
+ )
+
+ # (1) Sample rendering rays with the ray sampler.
+ # pyre-ignore[29]
+ ray_bundle: ImplicitronRayBundle = self.raysampler(
+ camera,
+ evaluation_mode,
+ mask=(
+ mask_crop
+ if mask_crop is not None
+ and sampling_mode == RenderSamplingMode.MASK_SAMPLE
+ else None
+ ),
+ )
+
+ inputs_to_be_chunked = {}
+ if fg_probability is not None and self.renderer.requires_object_mask():
+ sampled_fb_prob = rend_utils.ndc_grid_sample(
+ fg_probability, ray_bundle.xys, mode="nearest"
+ )
+ inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5
+
+ # (2)-(3) Implicit function evaluation and Rendering
+ implicit_functions: List[Union[Callable, ImplicitFunctionBase]] = [
+ self.implicit_function
+ ]
+ if self.coarse_implicit_function is not None:
+ implicit_functions = [self.coarse_implicit_function, self.implicit_function]
+
+ if self.global_encoder is not None:
+ global_code = self.global_encoder( # pyre-fixme[29]
+ sequence_name=sequence_name,
+ frame_timestamp=frame_timestamp,
+ )
+ implicit_functions = [
+ (
+ functools.partial(implicit_function, global_code=global_code)
+ if isinstance(implicit_function, Callable)
+ else functools.partial(
+ implicit_function.forward, global_code=global_code
+ )
+ )
+ for implicit_function in implicit_functions
+ ]
+ rendered = self._render(
+ ray_bundle=ray_bundle,
+ sampling_mode=sampling_mode,
+ evaluation_mode=evaluation_mode,
+ implicit_functions=implicit_functions,
+ inputs_to_be_chunked=inputs_to_be_chunked,
+ )
+
+ # A dict to store losses as well as rendering results.
+ preds: Dict[str, Any] = self.view_metrics(
+ results={},
+ raymarched=rendered,
+ ray_bundle=ray_bundle,
+ image_rgb=image_rgb,
+ depth_map=depth_map,
+ fg_probability=fg_probability,
+ mask_crop=mask_crop,
+ )
+
+ preds.update(
+ self.regularization_metrics(
+ results=preds,
+ model=self,
+ )
+ )
+
+ if sampling_mode == RenderSamplingMode.MASK_SAMPLE:
+ if self.output_rasterized_mc:
+ # Visualize the monte-carlo pixel renders by splatting onto
+ # an image grid.
+ (
+ preds["images_render"],
+ preds["depths_render"],
+ preds["masks_render"],
+ ) = rasterize_sparse_ray_bundle(
+ ray_bundle,
+ rendered.features,
+ (self.render_image_height, self.render_image_width),
+ rendered.depths,
+ masks=rendered.masks,
+ )
+ elif sampling_mode == RenderSamplingMode.FULL_GRID:
+ preds["images_render"] = rendered.features.permute(0, 3, 1, 2)
+ preds["depths_render"] = rendered.depths.permute(0, 3, 1, 2)
+ preds["masks_render"] = rendered.masks.permute(0, 3, 1, 2)
+
+ preds["implicitron_render"] = ImplicitronRender(
+ image_render=preds["images_render"],
+ depth_render=preds["depths_render"],
+ mask_render=preds["masks_render"],
+ )
+ else:
+ raise AssertionError("Unreachable state")
+
+ # (4) Compute losses
+ # finally get the optimization objective using self.loss_weights
+ objective = self._get_objective(preds)
+ if objective is not None:
+ preds["objective"] = objective
+
+ return preds
+
+ def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]:
+ """
+ A helper function to compute the overall loss as the dot product
+ of individual loss functions with the corresponding weights.
+ """
+ return weighted_sum_losses(preds, self.loss_weights)
+
+ def visualize(
+ self,
+ viz: Optional["Visdom"],
+ visdom_env_imgs: str,
+ preds: Dict[str, Any],
+ prefix: str,
+ ) -> None:
+ """
+ Helper function to visualize the predictions generated
+ in the forward pass.
+
+ Args:
+ viz: Visdom connection object
+ visdom_env_imgs: name of visdom environment for the images.
+ preds: predictions dict like returned by forward()
+ prefix: prepended to the names of images
+ """
+ if viz is None or not viz.check_connection():
+ logger.info("no visdom server! -> skipping batch vis")
+ return
+
+ idx_image = 0
+ title = f"{prefix}_im{idx_image}"
+
+ vis_utils.visualize_basics(viz, preds, visdom_env_imgs, title=title)
+
+ def _render(
+ self,
+ *,
+ ray_bundle: ImplicitronRayBundle,
+ inputs_to_be_chunked: Dict[str, torch.Tensor],
+ sampling_mode: RenderSamplingMode,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Args:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g.
+ SignedDistanceFunctionRenderer requires "object_mask", shape
+ (B, 1, H, W), the silhouette of the object in the image. When
+ chunking, they are passed to the renderer as shape
+ `(B, _, chunksize)`.
+ sampling_mode: The sampling method to use. Must be a value from the
+ RenderSamplingMode Enum.
+
+ Returns:
+ An instance of RendererOutput
+ """
+ if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0:
+ return apply_chunked(
+ self.renderer,
+ chunk_generator(
+ self.chunk_size_grid,
+ ray_bundle,
+ inputs_to_be_chunked,
+ self.tqdm_trigger_threshold,
+ **kwargs,
+ ),
+ lambda batch: torch.cat(batch, dim=1).reshape(
+ *ray_bundle.lengths.shape[:-1], -1
+ ),
+ )
+ else:
+ # pyre-fixme[29]: `BaseRenderer` is not a function.
+ return self.renderer(
+ ray_bundle=ray_bundle,
+ **inputs_to_be_chunked,
+ **kwargs,
+ )
+
+ @classmethod
+ def raysampler_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain fields of the raysampler because we want to set
+ them from our own members.
+ """
+ del args["sampling_mode_training"]
+ del args["sampling_mode_evaluation"]
+ del args["image_width"]
+ del args["image_height"]
+
+ def create_raysampler(self):
+ extra_args = {
+ "sampling_mode_training": self.sampling_mode_training,
+ "sampling_mode_evaluation": self.sampling_mode_evaluation,
+ "image_width": self.render_image_width,
+ "image_height": self.render_image_height,
+ }
+ raysampler_args = getattr(
+ self, "raysampler_" + self.raysampler_class_type + "_args"
+ )
+ self.raysampler = registry.get(RaySamplerBase, self.raysampler_class_type)(
+ **raysampler_args, **extra_args
+ )
+
+ @classmethod
+ def renderer_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain fields of the renderer because we want to set
+ them based on other inputs.
+ """
+ args.pop("render_features_dimensions", None)
+ args.pop("object_bounding_sphere", None)
+
+ def create_renderer(self):
+ extra_args = {}
+
+ if self.renderer_class_type == "SignedDistanceFunctionRenderer":
+ extra_args["render_features_dimensions"] = self.render_features_dimensions
+ if not hasattr(self.raysampler, "scene_extent"):
+ raise ValueError(
+ "SignedDistanceFunctionRenderer requires"
+ + " a raysampler that defines the 'scene_extent' field"
+ + " (this field is supported by, e.g., the adaptive raysampler - "
+ + " self.raysampler_class_type='AdaptiveRaySampler')."
+ )
+ extra_args["object_bounding_sphere"] = self.raysampler.scene_extent
+
+ renderer_args = getattr(self, "renderer_" + self.renderer_class_type + "_args")
+ self.renderer = registry.get(BaseRenderer, self.renderer_class_type)(
+ **renderer_args, **extra_args
+ )
+
+ @classmethod
+ def implicit_function_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain implicit_function fields because we want to set
+ them based on other inputs.
+ """
+ for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE:
+ args.pop(arg, None)
+
+ @classmethod
+ def coarse_implicit_function_tweak_args(cls, type, args: DictConfig) -> None:
+ """
+ We don't expose certain implicit_function fields because we want to set
+ them based on other inputs.
+ """
+ for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE:
+ args.pop(arg, None)
+
+ def _create_extra_args_for_implicit_function(self) -> Dict[str, Any]:
+ extra_args = {}
+ global_encoder_dim = (
+ 0 if self.global_encoder is None else self.global_encoder.get_encoding_dim()
+ )
+ if self.implicit_function_class_type in (
+ "NeuralRadianceFieldImplicitFunction",
+ "NeRFormerImplicitFunction",
+ ):
+ extra_args["latent_dim"] = global_encoder_dim
+ extra_args["color_dim"] = self.render_features_dimensions
+
+ if self.implicit_function_class_type == "IdrFeatureField":
+ extra_args["feature_work_size"] = global_encoder_dim
+ extra_args["feature_vector_size"] = self.render_features_dimensions
+
+ if self.implicit_function_class_type == "SRNImplicitFunction":
+ extra_args["latent_dim"] = global_encoder_dim
+ return extra_args
+
+ def create_implicit_function(self) -> None:
+ implicit_function_type = registry.get(
+ ImplicitFunctionBase, self.implicit_function_class_type
+ )
+ expand_args_fields(implicit_function_type)
+
+ config_name = f"implicit_function_{self.implicit_function_class_type}_args"
+ config = getattr(self, config_name, None)
+ if config is None:
+ raise ValueError(f"{config_name} not present")
+
+ extra_args = self._create_extra_args_for_implicit_function()
+ self.implicit_function = implicit_function_type(**config, **extra_args)
+
+ def create_coarse_implicit_function(self) -> None:
+ # If coarse_implicit_function_class_type has been defined
+ # then we init a module based on its arguments
+ if (
+ self.coarse_implicit_function_class_type is not None
+ and not self.share_implicit_function_across_passes
+ ):
+ config_name = "coarse_implicit_function_{0}_args".format(
+ self.coarse_implicit_function_class_type
+ )
+ config = getattr(self, config_name, {})
+
+ implicit_function_type = registry.get(
+ ImplicitFunctionBase,
+ # pyre-ignore: config is None allow to check if this is None.
+ self.coarse_implicit_function_class_type,
+ )
+ expand_args_fields(implicit_function_type)
+
+ extra_args = self._create_extra_args_for_implicit_function()
+ self.coarse_implicit_function = implicit_function_type(
+ **config, **extra_args
+ )
+ elif self.share_implicit_function_across_passes:
+ # Since coarse_implicit_function is initialised before
+ # implicit_function we handle this case in the post_init.
+ pass
+ else:
+ self.coarse_implicit_function = None
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/base.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..a92cb8f173a08f3f534407e640ed041b8eb68dcf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/base.py
@@ -0,0 +1,426 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.tools.config import ReplaceableBase
+from pytorch3d.ops import packed_to_padded
+from pytorch3d.renderer.implicit.utils import ray_bundle_variables_to_ray_points
+
+
+class EvaluationMode(Enum):
+ TRAINING = "training"
+ EVALUATION = "evaluation"
+
+
+class RenderSamplingMode(Enum):
+ MASK_SAMPLE = "mask_sample"
+ FULL_GRID = "full_grid"
+
+
+class ImplicitronRayBundle:
+ """
+ Parametrizes points along projection rays by storing ray `origins`,
+ `directions` vectors and `lengths` at which the ray-points are sampled.
+ Furthermore, the xy-locations (`xys`) of the ray pixels are stored as well.
+ Note that `directions` don't have to be normalized; they define unit vectors
+ in the respective 1D coordinate systems; see documentation for
+ :func:`ray_bundle_to_ray_points` for the conversion formula.
+
+ Ray bundle may represent rays from multiple cameras. In that case, cameras
+ are stored in the packed form (i.e. rays from the same camera are stored in
+ the consecutive elements). The following indices will be set:
+ camera_ids: A tensor of shape (N, ) which indicates which camera
+ was used to sample the rays. `N` is the number of different
+ sampled cameras.
+ camera_counts: A tensor of shape (N, ) which how many times the
+ coresponding camera in `camera_ids` was sampled.
+ `sum(camera_counts) == minibatch`, where `minibatch = origins.shape[0]`.
+
+ Attributes:
+ origins: A tensor of shape `(..., 3)` denoting the
+ origins of the sampling rays in world coords.
+ directions: A tensor of shape `(..., 3)` containing the direction
+ vectors of sampling rays in world coords. They don't have to be normalized;
+ they define unit vectors in the respective 1D coordinate systems; see
+ documentation for :func:`ray_bundle_to_ray_points` for the conversion formula.
+ lengths: A tensor of shape `(..., num_points_per_ray)`
+ containing the lengths at which the rays are sampled.
+ xys: A tensor of shape `(..., 2)`, the xy-locations (`xys`) of the ray pixels
+ camera_ids: An optional tensor of shape (N, ) which indicates which camera
+ was used to sample the rays. `N` is the number of unique sampled cameras.
+ camera_counts: An optional tensor of shape (N, ) indicates how many times the
+ coresponding camera in `camera_ids` was sampled.
+ `sum(camera_counts)==total_number_of_rays`.
+ bins: An optional tensor of shape `(..., num_points_per_ray + 1)`
+ containing the bins at which the rays are sampled. In this case
+ lengths should be equal to the midpoints of bins `(..., num_points_per_ray)`.
+ pixel_radii_2d: An optional tensor of shape `(..., 1)`
+ base radii of the conical frustums.
+
+ Raises:
+ ValueError: If either bins or lengths are not provided.
+ ValueError: If bins is provided and the last dim is inferior or equal to 1.
+ """
+
+ def __init__(
+ self,
+ origins: torch.Tensor,
+ directions: torch.Tensor,
+ lengths: Optional[torch.Tensor],
+ xys: torch.Tensor,
+ camera_ids: Optional[torch.LongTensor] = None,
+ camera_counts: Optional[torch.LongTensor] = None,
+ bins: Optional[torch.Tensor] = None,
+ pixel_radii_2d: Optional[torch.Tensor] = None,
+ ):
+ if bins is not None and bins.shape[-1] <= 1:
+ raise ValueError(
+ "The last dim of bins must be at least superior or equal to 2."
+ )
+
+ if bins is None and lengths is None:
+ raise ValueError(
+ "Please set either bins or lengths to initialize an ImplicitronRayBundle."
+ )
+
+ self.origins = origins
+ self.directions = directions
+ self._lengths = lengths if bins is None else None
+ self.xys = xys
+ self.bins = bins
+ self.pixel_radii_2d = pixel_radii_2d
+ self.camera_ids = camera_ids
+ self.camera_counts = camera_counts
+
+ @property
+ def lengths(self) -> torch.Tensor:
+ if self.bins is not None:
+ # equivalent to: 0.5 * (bins[..., 1:] + bins[..., :-1]) but more efficient
+ # pyre-ignore
+ return torch.lerp(self.bins[..., :-1], self.bins[..., 1:], 0.5)
+ return self._lengths
+
+ @lengths.setter
+ def lengths(self, value):
+ if self.bins is not None:
+ raise ValueError(
+ "If the bins attribute is not None you cannot set the lengths attribute."
+ )
+ else:
+ self._lengths = value
+
+ def float_(self) -> None:
+ """Moves the tensors to float dtype in place
+ (helpful for mixed-precision tensors).
+ """
+ self.origins = self.origins.float()
+ self.directions = self.directions.float()
+ self._lengths = self._lengths.float() if self._lengths is not None else None
+ self.xys = self.xys.float()
+ self.bins = self.bins.float() if self.bins is not None else None
+ self.pixel_radii_2d = (
+ self.pixel_radii_2d.float() if self.pixel_radii_2d is not None else None
+ )
+
+ def is_packed(self) -> bool:
+ """
+ Returns whether the ImplicitronRayBundle carries data in packed state
+ """
+ return self.camera_ids is not None and self.camera_counts is not None
+
+ def get_padded_xys(self) -> Tuple[torch.Tensor, torch.LongTensor, int]:
+ """
+ For a packed ray bundle, returns padded rays. Assumes the input bundle is packed
+ (i.e. `camera_ids` and `camera_counts` are set).
+
+ Returns:
+ - xys: Tensor of shape (N, max_size, ...) containing the padded
+ representation of the pixel coordinated;
+ where max_size is max of `camera_counts`. The values for camera id `i`
+ will be copied to `xys[i, :]`, with zeros padding out the extra inputs.
+ - first_idxs: cumulative sum of `camera_counts` defininf the boundaries
+ between cameras in the packed representation
+ - num_inputs: the number of cameras in the bundle.
+ """
+ if not self.is_packed():
+ raise ValueError("get_padded_xys can be called only on a packed bundle")
+
+ camera_counts = self.camera_counts
+ assert camera_counts is not None
+
+ cumsum = torch.cumsum(camera_counts, dim=0, dtype=torch.long)
+ first_idxs = torch.cat(
+ (camera_counts.new_zeros((1,), dtype=torch.long), cumsum[:-1])
+ )
+ num_inputs = camera_counts.sum().item()
+ max_size = torch.max(camera_counts).item()
+ xys = packed_to_padded(self.xys, first_idxs, max_size)
+ # pyre-ignore [7] pytorch typeshed inaccuracy
+ return xys, first_idxs, num_inputs
+
+
+@dataclass
+class RendererOutput:
+ """
+ A structure for storing the output of a renderer.
+
+ Args:
+ features: rendered features (usually RGB colors), (B, ..., C) tensor.
+ depth: rendered ray-termination depth map, in NDC coordinates, (B, ..., 1) tensor.
+ mask: rendered object mask, values in [0, 1], (B, ..., 1) tensor.
+ prev_stage: for multi-pass renderers (e.g. in NeRF),
+ a reference to the output of the previous stage.
+ normals: surface normals, for renderers that estimate them; (B, ..., 3) tensor.
+ points: ray-termination points in the world coordinates, (B, ..., 3) tensor.
+ aux: dict for implementation-specific renderer outputs.
+ """
+
+ features: torch.Tensor
+ depths: torch.Tensor
+ masks: torch.Tensor
+ prev_stage: Optional[RendererOutput] = None
+ normals: Optional[torch.Tensor] = None
+ points: Optional[torch.Tensor] = None # TODO: redundant with depths
+ weights: Optional[torch.Tensor] = None
+ aux: Dict[str, Any] = field(default_factory=lambda: {})
+
+
+class ImplicitFunctionWrapper(torch.nn.Module):
+ def __init__(self, fn: torch.nn.Module):
+ super().__init__()
+ self._fn = fn
+ self.bound_args = {}
+
+ def bind_args(self, **bound_args):
+ self.bound_args = bound_args
+ self._fn.on_bind_args()
+
+ def unbind_args(self):
+ self.bound_args = {}
+
+ def forward(self, *args, **kwargs):
+ return self._fn(*args, **{**kwargs, **self.bound_args})
+
+
+class BaseRenderer(ABC, ReplaceableBase):
+ """
+ Base class for all Renderer implementations.
+ """
+
+ def requires_object_mask(self) -> bool:
+ """
+ Whether `forward` needs the object_mask.
+ """
+ return False
+
+ @abstractmethod
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ implicit_functions: List[ImplicitFunctionWrapper],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Each Renderer should implement its own forward function
+ that returns an instance of RendererOutput.
+
+ Args:
+ ray_bundle: An ImplicitronRayBundle object containing the following variables:
+ origins: A tensor of shape (minibatch, ..., 3) denoting
+ the origins of the rendering rays.
+ directions: A tensor of shape (minibatch, ..., 3)
+ containing the direction vectors of rendering rays.
+ lengths: A tensor of shape
+ (minibatch, ..., num_points_per_ray)containing the
+ lengths at which the ray points are sampled.
+ The coordinates of the points on the rays are thus computed
+ as `origins + lengths * directions`.
+ xys: A tensor of shape
+ (minibatch, ..., 2) containing the
+ xy locations of each ray's pixel in the NDC screen space.
+ camera_ids: A tensor of shape (N, ) which indicates which camera
+ was used to sample the rays. `N` is the number of different
+ sampled cameras.
+ camera_counts: A tensor of shape (N, ) which how many times the
+ coresponding camera in `camera_ids` was sampled.
+ `sum(camera_counts)==minibatch`
+ implicit_functions: List of ImplicitFunctionWrappers which define the
+ implicit function methods to be used. Most Renderers only allow
+ a single implicit function. Currently, only the
+ MultiPassEmissionAbsorptionRenderer allows specifying mulitple
+ values in the list.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering.
+ **kwargs: In addition to the name args, custom keyword args can be specified.
+ For example in the SignedDistanceFunctionRenderer, an object_mask is
+ required which needs to be passed via the kwargs.
+
+ Returns:
+ instance of RendererOutput
+ """
+ pass
+
+
+def compute_3d_diagonal_covariance_gaussian(
+ rays_directions: torch.Tensor,
+ rays_dir_variance: torch.Tensor,
+ radii_variance: torch.Tensor,
+ eps: float = 1e-6,
+) -> torch.Tensor:
+ """
+ Transform the variances (rays_dir_variance, radii_variance) of the gaussians from
+ the coordinate frame of the conical frustum to 3D world coordinates.
+
+ It follows the equation 16 of `MIP-NeRF `_
+
+ Args:
+ rays_directions: A tensor of shape `(..., 3)`
+ rays_dir_variance: A tensor of shape `(..., num_intervals)` representing
+ the variance of the conical frustum with respect to the rays direction.
+ radii_variance: A tensor of shape `(..., num_intervals)` representing
+ the variance of the conical frustum with respect to its radius.
+ eps: a small number to prevent division by zero.
+
+ Returns:
+ A tensor of shape `(..., num_intervals, 3)` containing the diagonal
+ of the covariance matrix.
+ """
+ d_outer_diag = torch.pow(rays_directions, 2)
+ dir_mag_sq = torch.clamp(torch.sum(d_outer_diag, dim=-1, keepdim=True), min=eps)
+
+ null_outer_diag = 1 - d_outer_diag / dir_mag_sq
+ ray_dir_cov_diag = rays_dir_variance[..., None] * d_outer_diag[..., None, :]
+ xy_cov_diag = radii_variance[..., None] * null_outer_diag[..., None, :]
+ return ray_dir_cov_diag + xy_cov_diag
+
+
+def approximate_conical_frustum_as_gaussians(
+ bins: torch.Tensor, radii: torch.Tensor
+) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Approximates a conical frustum as two Gaussian distributions.
+
+ The Gaussian distributions are characterized by
+ three values:
+
+ - rays_dir_mean: mean along the rays direction
+ (defined as t in the parametric representation of a cone).
+ - rays_dir_variance: the variance of the conical frustum along the rays direction.
+ - radii_variance: variance of the conical frustum with respect to its radius.
+
+
+ The computation is stable and follows equation 7
+ of `MIP-NeRF `_.
+
+ For more information on how the mean and variances are computed
+ refers to the appendix of the paper.
+
+ Args:
+ bins: A tensor of shape `(..., num_points_per_ray + 1)`
+ containing the bins at which the rays are sampled.
+ `bin[..., t]` and `bin[..., t+1]` represent respectively
+ the left and right coordinates of the interval.
+ t0: A tensor of shape `(..., num_points_per_ray)`
+ containing the left coordinates of the intervals
+ on which the rays are sampled.
+ t1: A tensor of shape `(..., num_points_per_ray)`
+ containing the rights coordinates of the intervals
+ on which the rays are sampled.
+ radii: A tensor of shape `(..., 1)`
+ base radii of the conical frustums.
+
+ Returns:
+ rays_dir_mean: A tensor of shape `(..., num_intervals)` representing
+ the mean along the rays direction
+ (t in the parametric represention of the cone)
+ rays_dir_variance: A tensor of shape `(..., num_intervals)` representing
+ the variance of the conical frustum along the rays
+ (t in the parametric represention of the cone).
+ radii_variance: A tensor of shape `(..., num_intervals)` representing
+ the variance of the conical frustum with respect to its radius.
+ """
+ t_mu = torch.lerp(bins[..., 1:], bins[..., :-1], 0.5)
+ t_delta = torch.diff(bins, dim=-1) / 2
+
+ t_mu_pow2 = torch.pow(t_mu, 2)
+ t_delta_pow2 = torch.pow(t_delta, 2)
+ t_delta_pow4 = torch.pow(t_delta, 4)
+
+ den = 3 * t_mu_pow2 + t_delta_pow2
+
+ # mean along the rays direction
+ rays_dir_mean = t_mu + 2 * t_mu * t_delta_pow2 / den
+
+ # Variance of the conical frustum with along the rays directions
+ rays_dir_variance = t_delta_pow2 / 3 - (4 / 15) * (
+ t_delta_pow4 * (12 * t_mu_pow2 - t_delta_pow2) / torch.pow(den, 2)
+ )
+
+ # Variance of the conical frustum with respect to its radius
+ radii_variance = torch.pow(radii, 2) * (
+ t_mu_pow2 / 4 + (5 / 12) * t_delta_pow2 - 4 / 15 * (t_delta_pow4) / den
+ )
+ return rays_dir_mean, rays_dir_variance, radii_variance
+
+
+def conical_frustum_to_gaussian(
+ ray_bundle: ImplicitronRayBundle,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Approximate a conical frustum following a ray bundle as a Gaussian.
+
+ Args:
+ ray_bundle: A `RayBundle` or `HeterogeneousRayBundle` object with fields:
+ origins: A tensor of shape `(..., 3)`
+ directions: A tensor of shape `(..., 3)`
+ lengths: A tensor of shape `(..., num_points_per_ray)`
+ bins: A tensor of shape `(..., num_points_per_ray + 1)`
+ containing the bins at which the rays are sampled. .
+ pixel_radii_2d: A tensor of shape `(..., 1)`
+ base radii of the conical frustums.
+
+ Returns:
+ means: A tensor of shape `(..., num_points_per_ray - 1, 3)`
+ representing the means of the Gaussians
+ approximating the conical frustums.
+ diag_covariances: A tensor of shape `(...,num_points_per_ray -1, 3)`
+ representing the diagonal covariance matrices of our Gaussians.
+ """
+
+ if ray_bundle.pixel_radii_2d is None or ray_bundle.bins is None:
+ raise ValueError(
+ "RayBundle pixel_radii_2d or bins have not been provided."
+ " Look at pytorch3d.renderer.implicit.renderer.ray_sampler::"
+ "AbstractMaskRaySampler to see how to compute them. Have you forgot to set"
+ "`cast_ray_bundle_as_cone` to True?"
+ )
+
+ (
+ rays_dir_mean,
+ rays_dir_variance,
+ radii_variance,
+ ) = approximate_conical_frustum_as_gaussians(
+ ray_bundle.bins,
+ ray_bundle.pixel_radii_2d,
+ )
+ means = ray_bundle_variables_to_ray_points(
+ ray_bundle.origins, ray_bundle.directions, rays_dir_mean
+ )
+ diag_covariances = compute_3d_diagonal_covariance_gaussian(
+ ray_bundle.directions, rays_dir_variance, radii_variance
+ )
+ return means, diag_covariances
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/lstm_renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/lstm_renderer.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e95dc630283092e2f89d48821a78bb312a49619
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/lstm_renderer.py
@@ -0,0 +1,189 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import copy
+import logging
+from typing import List, Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import registry
+
+from .base import BaseRenderer, EvaluationMode, ImplicitFunctionWrapper, RendererOutput
+
+
+logger = logging.getLogger(__name__)
+
+
+@registry.register
+class LSTMRenderer(BaseRenderer, torch.nn.Module):
+ """
+ Implements the learnable LSTM raymarching function from SRN [1].
+ This requires there to be one implicit function, and it is expected to be
+ like SRNImplicitFunction or SRNHyperNetImplicitFunction.
+
+ Settings:
+ num_raymarch_steps: The number of LSTM raymarching steps.
+ init_depth: Initializes the bias of the last raymarching LSTM layer so that
+ the farthest point from the camera reaches a far z-plane that
+ lies `init_depth` units from the camera plane.
+ init_depth_noise_std: The standard deviation of the random normal noise
+ added to the initial depth of each marched ray.
+ hidden_size: The dimensionality of the LSTM's hidden state.
+ n_feature_channels: The number of feature channels returned by the
+ implicit_function evaluated at each raymarching step.
+ bg_color: If supplied, used as the background color. Otherwise the pixel
+ generator is used everywhere. This has to have length either 1
+ (for a constant value for all output channels) or equal to the number
+ of output channels (which is `out_features` on the pixel generator,
+ typically 3.)
+ verbose: If `True`, logs raymarching debug info.
+
+ References:
+ [1] Sitzmann, V. and Zollhöfer, M. and Wetzstein, G..
+ "Scene representation networks: Continuous 3d-structure-aware
+ neural scene representations." NeurIPS 2019.
+ """
+
+ num_raymarch_steps: int = 10
+ init_depth: float = 17.0
+ init_depth_noise_std: float = 5e-4
+ hidden_size: int = 16
+ n_feature_channels: int = 256
+ bg_color: Optional[List[float]] = None
+ verbose: bool = False
+
+ def __post_init__(self):
+ self._lstm = torch.nn.LSTMCell(
+ input_size=self.n_feature_channels,
+ hidden_size=self.hidden_size,
+ )
+ self._lstm.apply(_init_recurrent_weights)
+ _lstm_forget_gate_init(self._lstm)
+ self._out_layer = torch.nn.Linear(self.hidden_size, 1)
+
+ one_step = self.init_depth / self.num_raymarch_steps
+ self._out_layer.bias.data.fill_(one_step)
+ self._out_layer.weight.data.normal_(mean=0.0, std=1e-3)
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ implicit_functions: List[ImplicitFunctionWrapper],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+
+ Args:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ implicit_functions: A single-element list of ImplicitFunctionWrappers which
+ defines the implicit function to be used.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering, specifically the RayPointRefiner and the density_noise_std.
+
+ Returns:
+ instance of RendererOutput
+ """
+ if len(implicit_functions) != 1:
+ raise ValueError("LSTM renderer expects a single implicit function.")
+
+ implicit_function = implicit_functions[0]
+
+ if ray_bundle.lengths.shape[-1] != 1:
+ raise ValueError(
+ "LSTM renderer requires a ray-bundle with a single point per ray"
+ + " which is the initial raymarching point."
+ )
+
+ # jitter the initial depths
+
+ ray_bundle_t = copy.copy(ray_bundle)
+ ray_bundle_t.lengths = (
+ ray_bundle.lengths
+ + torch.randn_like(ray_bundle.lengths) * self.init_depth_noise_std
+ )
+
+ states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None]
+ signed_distance = torch.zeros_like(ray_bundle_t.lengths)
+ raymarch_features = None
+ for t in range(self.num_raymarch_steps + 1):
+ # move signed_distance along each ray
+ ray_bundle_t.lengths += signed_distance
+
+ # eval the raymarching function
+ raymarch_features, _ = implicit_function(
+ ray_bundle=ray_bundle_t,
+ raymarch_features=None,
+ )
+ if self.verbose:
+ msg = (
+ f"{t}: mu={float(signed_distance.mean()):1.2e};"
+ + f" std={float(signed_distance.std()):1.2e};"
+ + f" mu_d={float(ray_bundle_t.lengths.mean()):1.2e};"
+ + f" std_d={float(ray_bundle_t.lengths.std()):1.2e};"
+ )
+ logger.info(msg)
+ if t == self.num_raymarch_steps:
+ break
+
+ # run the lstm marcher
+ state_h, state_c = self._lstm(
+ raymarch_features.view(-1, raymarch_features.shape[-1]),
+ states[-1],
+ )
+ if state_h.requires_grad:
+ state_h.register_hook(lambda x: x.clamp(min=-10, max=10))
+ # predict the next step size
+ signed_distance = self._out_layer(state_h).view(ray_bundle_t.lengths.shape)
+ # log the lstm states
+ states.append((state_h, state_c))
+
+ opacity_logits, features = implicit_function(
+ raymarch_features=raymarch_features,
+ ray_bundle=ray_bundle_t,
+ )
+ mask = torch.sigmoid(opacity_logits)
+ depth = ray_bundle_t.lengths * ray_bundle_t.directions.norm(
+ dim=-1, keepdim=True
+ )
+
+ if self.bg_color is not None:
+ background = features.new_tensor(self.bg_color)
+ features = torch.lerp(background, features, mask)
+
+ return RendererOutput(
+ features=features[..., 0, :],
+ depths=depth,
+ masks=mask[..., 0, :],
+ )
+
+
+def _init_recurrent_weights(self) -> None:
+ # copied from SRN codebase
+ for m in self.modules():
+ if type(m) in [torch.nn.GRU, torch.nn.LSTM, torch.nn.RNN]:
+ for name, param in m.named_parameters():
+ if "weight_ih" in name:
+ torch.nn.init.kaiming_normal_(param.data)
+ elif "weight_hh" in name:
+ torch.nn.init.orthogonal_(param.data)
+ elif "bias" in name:
+ param.data.fill_(0)
+
+
+def _lstm_forget_gate_init(lstm_layer) -> None:
+ # copied from SRN codebase
+ for name, parameter in lstm_layer.named_parameters():
+ if "bias" not in name:
+ continue
+ n = parameter.size(0)
+ start, end = n // 4, n // 2
+ parameter.data[start:end].fill_(1.0)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6367fc871a1166a700028753b14d00d5d662bb2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py
@@ -0,0 +1,186 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import List
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import registry, run_auto_creation
+
+from .base import BaseRenderer, EvaluationMode, ImplicitFunctionWrapper, RendererOutput
+from .ray_point_refiner import RayPointRefiner
+from .raymarcher import RaymarcherBase
+
+
+@registry.register
+class MultiPassEmissionAbsorptionRenderer(BaseRenderer, torch.nn.Module):
+ """
+ Implements the multi-pass rendering function, in particular,
+ with emission-absorption ray marching used in NeRF [1]. First, it evaluates
+ opacity-based ray-point weights and then optionally (in case more implicit
+ functions are given) resamples points using importance sampling and evaluates
+ new weights.
+
+ During each ray marching pass, features, depth map, and masks
+ are integrated: Let o_i be the opacity estimated by the implicit function,
+ and d_i be the offset between points `i` and `i+1` along the respective ray.
+ Ray marching is performed using the following equations::
+
+ ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
+ weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
+
+ and the final rendered quantities are computed by a dot-product of ray values
+ with the weights, e.g. `features = sum_n(weight_n * ray_features_n)`.
+
+ By default, for the EA raymarcher from [1] (
+ activated with `self.raymarcher_class_type="EmissionAbsorptionRaymarcher"`
+ )::
+
+ cap_fn(x) = 1 - exp(-x),
+ weight_fn(x) = w * x.
+
+ Note that the latter can altered by changing `self.raymarcher_class_type`,
+ e.g. to "CumsumRaymarcher" which implements the cumulative-sum raymarcher
+ from NeuralVolumes [2].
+
+ Settings:
+ n_pts_per_ray_fine_training: The number of points sampled per ray for the
+ fine rendering pass during training.
+ n_pts_per_ray_fine_evaluation: The number of points sampled per ray for the
+ fine rendering pass during evaluation.
+ stratified_sampling_coarse_training: Enable/disable stratified sampling in the
+ refiner during training. Only matters if there are multiple implicit
+ functions (i.e. in GenericModel if num_passes>1).
+ stratified_sampling_coarse_evaluation: Enable/disable stratified sampling in
+ the refiner during evaluation. Only matters if there are multiple implicit
+ functions (i.e. in GenericModel if num_passes>1).
+ append_coarse_samples_to_fine: Add the fine ray points to the coarse points
+ after sampling.
+ density_noise_std_train: Standard deviation of the noise added to the
+ opacity field.
+ return_weights: Enables returning the rendering weights of the EA raymarcher.
+ Setting to `True` can lead to a prohibitivelly large memory consumption.
+ blurpool_weights: Use blurpool defined in [3], on the input weights of
+ each implicit_function except the first (implicit_functions[0]).
+ sample_pdf_eps: Padding applied to the weights (alpha in equation 18 of [3]).
+ raymarcher_class_type: The type of self.raymarcher corresponding to
+ a child of `RaymarcherBase` in the registry.
+ raymarcher: The raymarcher object used to convert per-point features
+ and opacities to a feature render.
+
+ References:
+ [1] Mildenhall, Ben, et al. "Nerf: Representing Scenes as Neural Radiance
+ Fields for View Synthesis." ECCV 2020.
+ [2] Lombardi, Stephen, et al. "Neural Volumes: Learning Dynamic Renderable
+ Volumes from Images." SIGGRAPH 2019.
+ [3] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation
+ for Anti-Aliasing Neural Radiance Fields." ICCV 2021.
+
+ """
+
+ raymarcher_class_type: str = "EmissionAbsorptionRaymarcher"
+ # pyre-fixme[13]: Attribute `raymarcher` is never initialized.
+ raymarcher: RaymarcherBase
+
+ n_pts_per_ray_fine_training: int = 64
+ n_pts_per_ray_fine_evaluation: int = 64
+ stratified_sampling_coarse_training: bool = True
+ stratified_sampling_coarse_evaluation: bool = False
+ append_coarse_samples_to_fine: bool = True
+ density_noise_std_train: float = 0.0
+ return_weights: bool = False
+ blurpool_weights: bool = False
+ sample_pdf_eps: float = 1e-5
+
+ def __post_init__(self):
+ self._refiners = {
+ EvaluationMode.TRAINING: RayPointRefiner(
+ n_pts_per_ray=self.n_pts_per_ray_fine_training,
+ random_sampling=self.stratified_sampling_coarse_training,
+ add_input_samples=self.append_coarse_samples_to_fine,
+ blurpool_weights=self.blurpool_weights,
+ sample_pdf_eps=self.sample_pdf_eps,
+ ),
+ EvaluationMode.EVALUATION: RayPointRefiner(
+ n_pts_per_ray=self.n_pts_per_ray_fine_evaluation,
+ random_sampling=self.stratified_sampling_coarse_evaluation,
+ add_input_samples=self.append_coarse_samples_to_fine,
+ blurpool_weights=self.blurpool_weights,
+ sample_pdf_eps=self.sample_pdf_eps,
+ ),
+ }
+ run_auto_creation(self)
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ implicit_functions: List[ImplicitFunctionWrapper],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Args:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ implicit_functions: List of ImplicitFunctionWrappers which
+ define the implicit functions to be used sequentially in
+ the raymarching step. The output of raymarching with
+ implicit_functions[n-1] is refined, and then used as
+ input for raymarching with implicit_functions[n].
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering
+
+ Returns:
+ instance of RendererOutput
+ """
+ if not implicit_functions:
+ raise ValueError("EA renderer expects implicit functions")
+
+ return self._run_raymarcher(
+ ray_bundle,
+ implicit_functions,
+ None,
+ evaluation_mode,
+ )
+
+ def _run_raymarcher(
+ self, ray_bundle, implicit_functions, prev_stage, evaluation_mode
+ ):
+ density_noise_std = (
+ self.density_noise_std_train
+ if evaluation_mode == EvaluationMode.TRAINING
+ else 0.0
+ )
+
+ ray_deltas = (
+ None if ray_bundle.bins is None else torch.diff(ray_bundle.bins, dim=-1)
+ )
+ output = self.raymarcher(
+ *implicit_functions[0](ray_bundle=ray_bundle),
+ ray_lengths=ray_bundle.lengths,
+ ray_deltas=ray_deltas,
+ density_noise_std=density_noise_std,
+ )
+ output.prev_stage = prev_stage
+
+ weights = output.weights
+ if not self.return_weights:
+ output.weights = None
+
+ # we may need to make a recursive call
+ if len(implicit_functions) > 1:
+ fine_ray_bundle = self._refiners[evaluation_mode](ray_bundle, weights)
+ output = self._run_raymarcher(
+ fine_ray_bundle,
+ implicit_functions[1:],
+ output,
+ evaluation_mode,
+ )
+
+ return output
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py
new file mode 100644
index 0000000000000000000000000000000000000000..74a7f046b155a6b1d6ca2545bad7d8397fd9b959
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py
@@ -0,0 +1,147 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import copy
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import Configurable, expand_args_fields
+
+from pytorch3d.renderer.implicit.sample_pdf import sample_pdf
+
+
+@expand_args_fields
+class RayPointRefiner(Configurable, torch.nn.Module):
+ """
+ Implements the importance sampling of points along rays.
+ The input is a `RayBundle` object with a `ray_weights` tensor
+ which specifies the probabilities of sampling a point along each ray.
+
+ This raysampler is used for the fine rendering pass of NeRF.
+ As such, the forward pass accepts the RayBundle output by the
+ raysampling of the coarse rendering pass. Hence, it does not
+ take cameras as input.
+
+ Args:
+ n_pts_per_ray: The number of points to sample along each ray.
+ random_sampling: If `False`, returns equispaced percentiles of the
+ distribution defined by the input weights, otherwise performs
+ sampling from that distribution.
+ add_input_samples: Concatenates and returns the sampled values
+ together with the input samples.
+ blurpool_weights: Use blurpool defined in [1], on the input weights.
+ sample_pdf_eps: A constant preventing division by zero in case empty bins
+ are present.
+
+ References:
+ [1] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation
+ for Anti-Aliasing Neural Radiance Fields." ICCV 2021.
+ """
+
+ # pyre-fixme[13]: Attribute `n_pts_per_ray` is never initialized.
+ n_pts_per_ray: int
+ # pyre-fixme[13]: Attribute `random_sampling` is never initialized.
+ random_sampling: bool
+ add_input_samples: bool = True
+ blurpool_weights: bool = False
+ sample_pdf_eps: float = 1e-5
+
+ def forward(
+ self,
+ input_ray_bundle: ImplicitronRayBundle,
+ ray_weights: torch.Tensor,
+ blurpool_weights: bool = False,
+ sample_pdf_padding: float = 1e-5,
+ **kwargs,
+ ) -> ImplicitronRayBundle:
+ """
+ Args:
+ input_ray_bundle: An instance of `ImplicitronRayBundle` specifying the
+ source rays for sampling of the probability distribution.
+ ray_weights: A tensor of shape
+ `(..., input_ray_bundle.lengths.shape[-1])` with non-negative
+ elements defining the probability distribution to sample
+ ray points from.
+ blurpool_weights: Use blurpool defined in [1], on the input weights.
+ sample_pdf_padding: A constant preventing division by zero in case empty bins
+ are present.
+
+ Returns:
+ ray_bundle: A new `ImplicitronRayBundle` instance containing the input ray
+ points together with `n_pts_per_ray` additionally sampled
+ points per ray. For each ray, the lengths are sorted.
+
+ References:
+ [1] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation
+ for Anti-Aliasing Neural Radiance Fields." ICCV 2021.
+
+ """
+
+ with torch.no_grad():
+ if self.blurpool_weights:
+ ray_weights = apply_blurpool_on_weights(ray_weights)
+
+ n_pts_per_ray = self.n_pts_per_ray
+ ray_weights = ray_weights.view(-1, ray_weights.shape[-1])
+ if input_ray_bundle.bins is None:
+ z_vals: torch.Tensor = input_ray_bundle.lengths
+ ray_weights = ray_weights[..., 1:-1]
+ bins = torch.lerp(z_vals[..., 1:], z_vals[..., :-1], 0.5)
+ else:
+ z_vals = input_ray_bundle.bins
+ n_pts_per_ray += 1
+ bins = z_vals
+ z_samples = sample_pdf(
+ bins.view(-1, bins.shape[-1]),
+ ray_weights,
+ n_pts_per_ray,
+ det=not self.random_sampling,
+ eps=self.sample_pdf_eps,
+ ).view(*z_vals.shape[:-1], n_pts_per_ray)
+
+ if self.add_input_samples:
+ z_vals = torch.cat((z_vals, z_samples), dim=-1)
+ else:
+ z_vals = z_samples
+ # Resort by depth.
+ z_vals, _ = torch.sort(z_vals, dim=-1)
+ ray_bundle = copy.copy(input_ray_bundle)
+ if input_ray_bundle.bins is None:
+ ray_bundle.lengths = z_vals
+ else:
+ ray_bundle.bins = z_vals
+
+ return ray_bundle
+
+
+def apply_blurpool_on_weights(weights) -> torch.Tensor:
+ """
+ Filter weights with a 2-tap max filters followed by a 2-tap blur filter,
+ which produces a wide and smooth upper envelope on the weights.
+
+ Args:
+ weights: Tensor of shape `(..., dim)`
+
+ Returns:
+ blured_weights: Tensor of shape `(..., dim)`
+ """
+ weights_pad = torch.concatenate(
+ [
+ weights[..., :1],
+ weights,
+ weights[..., -1:],
+ ],
+ dim=-1,
+ )
+
+ weights_max = torch.nn.functional.max_pool1d(
+ weights_pad.flatten(end_dim=-2), 2, stride=1
+ )
+ return torch.lerp(weights_max[..., :-1], weights_max[..., 1:], 0.5).reshape_as(
+ weights
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..11ee27b045e5a047fd029f7bf361b000119b5358
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py
@@ -0,0 +1,387 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.tools import camera_utils
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.renderer import NDCMultinomialRaysampler
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.renderer.implicit.utils import HeterogeneousRayBundle
+
+from .base import EvaluationMode, ImplicitronRayBundle, RenderSamplingMode
+
+
+class RaySamplerBase(ReplaceableBase):
+ """
+ Base class for ray samplers.
+ """
+
+ def forward(
+ self,
+ cameras: CamerasBase,
+ evaluation_mode: EvaluationMode,
+ mask: Optional[torch.Tensor] = None,
+ ) -> ImplicitronRayBundle:
+ """
+ Args:
+ cameras: A batch of `batch_size` cameras from which the rays are emitted.
+ evaluation_mode: one of `EvaluationMode.TRAINING` or
+ `EvaluationMode.EVALUATION` which determines the sampling mode
+ that is used.
+ mask: Active for the `RenderSamplingMode.MASK_SAMPLE` sampling mode.
+ Defines a non-negative mask of shape
+ `(batch_size, image_height, image_width)` where each per-pixel
+ value is proportional to the probability of sampling the
+ corresponding pixel's ray.
+
+ Returns:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ """
+ raise NotImplementedError()
+
+
+class AbstractMaskRaySampler(RaySamplerBase, torch.nn.Module):
+ """
+ Samples a fixed number of points along rays which are in turn sampled for
+ each camera in a batch.
+
+ This class utilizes `NDCMultinomialRaysampler` which allows to either
+ randomly sample rays from an input foreground saliency mask
+ (`RenderSamplingMode.MASK_SAMPLE`), or on a rectangular image grid
+ (`RenderSamplingMode.FULL_GRID`). The sampling mode can be set separately
+ for training and evaluation by setting `self.sampling_mode_training`
+ and `self.sampling_mode_training` accordingly.
+
+ The class allows to adjust the sampling points along rays by overwriting the
+ `AbstractMaskRaySampler._get_min_max_depth_bounds` function which returns
+ the near/far planes (`min_depth`/`max_depth`) `NDCMultinomialRaysampler`.
+
+ Settings:
+ image_width: The horizontal size of the image grid.
+ image_height: The vertical size of the image grid.
+ sampling_mode_training: The ray sampling mode for training. This should be a str
+ option from the RenderSamplingMode Enum
+ sampling_mode_evaluation: Same as above but for evaluation.
+ n_pts_per_ray_training: The number of points sampled along each ray during training.
+ n_pts_per_ray_evaluation: The number of points sampled along each ray during evaluation.
+ n_rays_per_image_sampled_from_mask: The amount of rays to be sampled from the image
+ grid. Given a batch of image grids, this many is sampled from each.
+ `n_rays_per_image_sampled_from_mask` and `n_rays_total_training` cannot both be
+ defined.
+ n_rays_total_training: (optional) How many rays in total to sample from the entire
+ batch of provided image grid. The result is as if `n_rays_total_training`
+ cameras/image grids were sampled with replacement from the cameras / image grids
+ provided and for every camera one ray was sampled.
+ `n_rays_per_image_sampled_from_mask` and `n_rays_total_training` cannot both be
+ defined, to use you have to set `n_rays_per_image` to None.
+ Used only for EvaluationMode.TRAINING.
+ stratified_point_sampling_training: if set, performs stratified random sampling
+ along the ray; otherwise takes ray points at deterministic offsets.
+ stratified_point_sampling_evaluation: Same as above but for evaluation.
+ cast_ray_bundle_as_cone: If True, the sampling will generate the bins and radii
+ attribute of ImplicitronRayBundle. The `bins` contain the z-coordinate
+ (=depth) of each ray in world units and are of shape
+ `(batch_size, n_rays_per_image, n_pts_per_ray_training/evaluation + 1)`
+ while `lengths` is equal to the midpoint of the bins:
+ (0.5 * (bins[..., 1:] + bins[..., :-1]).
+ If False, `bins` is None, `radii` is None and `lengths` contains
+ the z-coordinate (=depth) of each ray in world units and are of shape
+ `(batch_size, n_rays_per_image, n_pts_per_ray_training/evaluation)`
+
+ Raises:
+ TypeError: if cast_ray_bundle_as_cone is set to True and n_rays_total_training
+ is not None will result in an error. HeterogeneousRayBundle is
+ not supported for conical frustum computation yet.
+ """
+
+ image_width: int = 400
+ image_height: int = 400
+ sampling_mode_training: str = "mask_sample"
+ sampling_mode_evaluation: str = "full_grid"
+ n_pts_per_ray_training: int = 64
+ n_pts_per_ray_evaluation: int = 64
+ n_rays_per_image_sampled_from_mask: Optional[int] = 1024
+ n_rays_total_training: Optional[int] = None
+ # stratified sampling vs taking points at deterministic offsets
+ stratified_point_sampling_training: bool = True
+ stratified_point_sampling_evaluation: bool = False
+ cast_ray_bundle_as_cone: bool = False
+
+ def __post_init__(self):
+ if (self.n_rays_per_image_sampled_from_mask is not None) and (
+ self.n_rays_total_training is not None
+ ):
+ raise ValueError(
+ "Cannot both define n_rays_total_training and "
+ "n_rays_per_image_sampled_from_mask."
+ )
+
+ self._sampling_mode = {
+ EvaluationMode.TRAINING: RenderSamplingMode(self.sampling_mode_training),
+ EvaluationMode.EVALUATION: RenderSamplingMode(
+ self.sampling_mode_evaluation
+ ),
+ }
+
+ n_pts_per_ray_training = (
+ self.n_pts_per_ray_training + 1
+ if self.cast_ray_bundle_as_cone
+ else self.n_pts_per_ray_training
+ )
+ n_pts_per_ray_evaluation = (
+ self.n_pts_per_ray_evaluation + 1
+ if self.cast_ray_bundle_as_cone
+ else self.n_pts_per_ray_evaluation
+ )
+ self._training_raysampler = NDCMultinomialRaysampler(
+ image_width=self.image_width,
+ image_height=self.image_height,
+ n_pts_per_ray=n_pts_per_ray_training,
+ min_depth=0.0,
+ max_depth=0.0,
+ n_rays_per_image=(
+ self.n_rays_per_image_sampled_from_mask
+ if self._sampling_mode[EvaluationMode.TRAINING]
+ == RenderSamplingMode.MASK_SAMPLE
+ else None
+ ),
+ n_rays_total=self.n_rays_total_training,
+ unit_directions=True,
+ stratified_sampling=self.stratified_point_sampling_training,
+ )
+
+ self._evaluation_raysampler = NDCMultinomialRaysampler(
+ image_width=self.image_width,
+ image_height=self.image_height,
+ n_pts_per_ray=n_pts_per_ray_evaluation,
+ min_depth=0.0,
+ max_depth=0.0,
+ n_rays_per_image=(
+ self.n_rays_per_image_sampled_from_mask
+ if self._sampling_mode[EvaluationMode.EVALUATION]
+ == RenderSamplingMode.MASK_SAMPLE
+ else None
+ ),
+ unit_directions=True,
+ stratified_sampling=self.stratified_point_sampling_evaluation,
+ )
+
+ max_y, min_y = self._training_raysampler.max_y, self._training_raysampler.min_y
+ max_x, min_x = self._training_raysampler.max_x, self._training_raysampler.min_x
+ self.pixel_height: float = (max_y - min_y) / (self.image_height - 1)
+ self.pixel_width: float = (max_x - min_x) / (self.image_width - 1)
+
+ def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]:
+ raise NotImplementedError()
+
+ def forward(
+ self,
+ cameras: CamerasBase,
+ evaluation_mode: EvaluationMode,
+ mask: Optional[torch.Tensor] = None,
+ ) -> ImplicitronRayBundle:
+ """
+
+ Args:
+ cameras: A batch of `batch_size` cameras from which the rays are emitted.
+ evaluation_mode: one of `EvaluationMode.TRAINING` or
+ `EvaluationMode.EVALUATION` which determines the sampling mode
+ that is used.
+ mask: Active for the `RenderSamplingMode.MASK_SAMPLE` sampling mode.
+ Defines a non-negative mask of shape
+ `(batch_size, image_height, image_width)` where each per-pixel
+ value is proportional to the probability of sampling the
+ corresponding pixel's ray.
+
+ Returns:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ """
+ sample_mask = None
+ if (
+ self._sampling_mode[evaluation_mode] == RenderSamplingMode.MASK_SAMPLE
+ and mask is not None
+ ):
+ sample_mask = torch.nn.functional.interpolate(
+ mask,
+ size=[self.image_height, self.image_width],
+ mode="nearest",
+ )[:, 0]
+
+ min_depth, max_depth = self._get_min_max_depth_bounds(cameras)
+
+ raysampler = {
+ EvaluationMode.TRAINING: self._training_raysampler,
+ EvaluationMode.EVALUATION: self._evaluation_raysampler,
+ }[evaluation_mode]
+
+ ray_bundle = raysampler(
+ cameras=cameras,
+ mask=sample_mask,
+ min_depth=min_depth,
+ max_depth=max_depth,
+ )
+ if self.cast_ray_bundle_as_cone and isinstance(
+ ray_bundle, HeterogeneousRayBundle
+ ):
+ # If this error rises it means that raysampler has among
+ # its arguments `n_ray_totals`. If it is the case
+ # then you should update the radii computation and lengths
+ # computation to handle padding and unpadding.
+ raise TypeError(
+ "Heterogeneous ray bundle is not supported for conical frustum computation yet"
+ )
+ elif self.cast_ray_bundle_as_cone:
+ pixel_hw: Tuple[float, float] = (self.pixel_height, self.pixel_width)
+ pixel_radii_2d = compute_radii(cameras, ray_bundle.xys[..., :2], pixel_hw)
+ return ImplicitronRayBundle(
+ directions=ray_bundle.directions,
+ origins=ray_bundle.origins,
+ lengths=None,
+ xys=ray_bundle.xys,
+ bins=ray_bundle.lengths,
+ pixel_radii_2d=pixel_radii_2d,
+ )
+
+ return ImplicitronRayBundle(
+ directions=ray_bundle.directions,
+ origins=ray_bundle.origins,
+ lengths=ray_bundle.lengths,
+ xys=ray_bundle.xys,
+ camera_counts=getattr(ray_bundle, "camera_counts", None),
+ camera_ids=getattr(ray_bundle, "camera_ids", None),
+ )
+
+
+@registry.register
+class AdaptiveRaySampler(AbstractMaskRaySampler):
+ """
+ Adaptively samples points on each ray between near and far planes whose
+ depths are determined based on the distance from the camera center
+ to a predefined scene center.
+
+ More specifically,
+ `min_depth = max(
+ (self.scene_center-camera_center).norm() - self.scene_extent, eps
+ )` and
+ `max_depth = (self.scene_center-camera_center).norm() + self.scene_extent`.
+
+ This sampling is ideal for object-centric scenes whose contents are
+ centered around a known `self.scene_center` and fit into a bounding sphere
+ with a radius of `self.scene_extent`.
+
+ Args:
+ scene_center: The xyz coordinates of the center of the scene used
+ along with `scene_extent` to compute the min and max depth planes
+ for sampling ray-points.
+ scene_extent: The radius of the scene bounding box centered at `scene_center`.
+ """
+
+ scene_extent: float = 8.0
+ scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0)
+
+ def __post_init__(self):
+ super().__post_init__()
+ if self.scene_extent <= 0.0:
+ raise ValueError("Adaptive raysampler requires self.scene_extent > 0.")
+ self._scene_center = torch.FloatTensor(self.scene_center)
+
+ def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]:
+ """
+ Returns the adaptively calculated near/far planes.
+ """
+ min_depth, max_depth = camera_utils.get_min_max_depth_bounds(
+ cameras, self._scene_center, self.scene_extent
+ )
+ return float(min_depth[0]), float(max_depth[0])
+
+
+@registry.register
+class NearFarRaySampler(AbstractMaskRaySampler):
+ """
+ Samples a fixed number of points between fixed near and far z-planes.
+ Specifically, samples points along each ray with approximately uniform spacing
+ of z-coordinates between the minimum depth `self.min_depth` and the maximum depth
+ `self.max_depth`. This sampling is useful for rendering scenes where the camera is
+ in a constant distance from the focal point of the scene.
+
+ Args:
+ min_depth: The minimum depth of a ray-point.
+ max_depth: The maximum depth of a ray-point.
+ """
+
+ min_depth: float = 0.1
+ max_depth: float = 8.0
+
+ def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]:
+ """
+ Returns the stored near/far planes.
+ """
+ return self.min_depth, self.max_depth
+
+
+def compute_radii(
+ cameras: CamerasBase,
+ xy_grid: torch.Tensor,
+ pixel_hw_ndc: Tuple[float, float],
+) -> torch.Tensor:
+ """
+ Compute radii of conical frustums in world coordinates.
+
+ Args:
+ cameras: cameras object representing a batch of cameras.
+ xy_grid: torch.tensor grid of image xy coords.
+ pixel_hw_ndc: pixel height and width in NDC
+
+ Returns:
+ radii: A tensor of shape `(..., 1)` radii of a cone.
+ """
+ batch_size = xy_grid.shape[0]
+ spatial_size = xy_grid.shape[1:-1]
+ n_rays_per_image = spatial_size.numel()
+
+ xy = xy_grid.view(batch_size, n_rays_per_image, 2)
+
+ # [batch_size, 3 * n_rays_per_image, 2]
+ xy = torch.cat(
+ [
+ xy,
+ # Will allow to find the norm on the x axis
+ xy + torch.tensor([pixel_hw_ndc[1], 0], device=xy.device),
+ # Will allow to find the norm on the y axis
+ xy + torch.tensor([0, pixel_hw_ndc[0]], device=xy.device),
+ ],
+ dim=1,
+ )
+ # [batch_size, 3 * n_rays_per_image, 3]
+ xyz = torch.cat(
+ (
+ xy,
+ xy.new_ones(batch_size, 3 * n_rays_per_image, 1),
+ ),
+ dim=-1,
+ )
+
+ # unproject the points
+ unprojected_xyz = cameras.unproject_points(xyz, from_ndc=True)
+
+ plane_world, plane_world_dx, plane_world_dy = torch.split(
+ unprojected_xyz, n_rays_per_image, dim=1
+ )
+
+ # Distance from each unit-norm direction vector to its neighbors.
+ dx_norm = torch.linalg.norm(plane_world_dx - plane_world, dim=-1, keepdims=True)
+ dy_norm = torch.linalg.norm(plane_world_dy - plane_world, dim=-1, keepdims=True)
+ # Cut the distance in half to obtain the base radius: (dx_norm + dy_norm) * 0.5
+ # Scale it by 2/12**0.5 to match the variance of the pixel’s footprint
+ radii = (dx_norm + dy_norm) / 12**0.5
+
+ return radii.view(batch_size, *spatial_size, 1)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ad46a83e9eee904e61dea6e00306083c0fd4327
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py
@@ -0,0 +1,592 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from https://github.com/lioryariv/idr
+# Copyright (c) 2020 Lior Yariv
+
+# pyre-unsafe
+
+from typing import Any, Callable, Tuple
+
+import torch
+import torch.nn as nn
+from pytorch3d.implicitron.tools.config import Configurable
+
+
+class RayTracing(Configurable, nn.Module):
+ """
+ Finds the intersection points of rays with the implicit surface defined
+ by a signed distance function (SDF). The algorithm follows the pipeline:
+ 1. Initialise start and end points on rays by the intersections with
+ the circumscribing sphere.
+ 2. Run sphere tracing from both ends.
+ 3. Divide the untraced segments of non-convergent rays into uniform
+ intervals and find the one with the sign transition.
+ 4. Run the secant method to estimate the point of the sign transition.
+
+ Args:
+ object_bounding_sphere: The radius of the initial sphere circumscribing
+ the object.
+ sdf_threshold: Absolute SDF value small enough for the sphere tracer
+ to consider it a surface.
+ line_search_step: Length of the backward correction on sphere tracing
+ iterations.
+ line_step_iters: Number of backward correction iterations.
+ sphere_tracing_iters: Maximum number of sphere tracing iterations
+ (the actual number of iterations may be smaller if all ray
+ intersections are found).
+ n_steps: Number of intervals sampled for unconvergent rays.
+ n_secant_steps: Number of iterations in the secant algorithm.
+ """
+
+ object_bounding_sphere: float = 1.0
+ sdf_threshold: float = 5.0e-5
+ line_search_step: float = 0.5
+ line_step_iters: int = 1
+ sphere_tracing_iters: int = 10
+ n_steps: int = 100
+ n_secant_steps: int = 8
+
+ def forward(
+ self,
+ sdf: Callable[[torch.Tensor], torch.Tensor],
+ cam_loc: torch.Tensor,
+ object_mask: torch.BoolTensor,
+ ray_directions: torch.Tensor,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ sdf: A callable that takes a (N, 3) tensor of points and returns
+ a tensor of (N,) SDF values.
+ cam_loc: A tensor of (B, N, 3) ray origins.
+ object_mask: A (N, 3) tensor of indicators whether a sampled pixel
+ corresponds to the rendered object or background.
+ ray_directions: A tensor of (B, N, 3) ray directions.
+
+ Returns:
+ curr_start_points: A tensor of (B*N, 3) found intersection points
+ with the implicit surface.
+ network_object_mask: A tensor of (B*N,) indicators denoting whether
+ intersections were found.
+ acc_start_dis: A tensor of (B*N,) distances from the ray origins
+ to intersrection points.
+ """
+ batch_size, num_pixels, _ = ray_directions.shape
+ device = cam_loc.device
+
+ sphere_intersections, mask_intersect = _get_sphere_intersection(
+ cam_loc, ray_directions, r=self.object_bounding_sphere
+ )
+
+ (
+ curr_start_points,
+ unfinished_mask_start,
+ acc_start_dis,
+ acc_end_dis,
+ min_dis,
+ max_dis,
+ ) = self.sphere_tracing(
+ batch_size,
+ num_pixels,
+ sdf,
+ cam_loc,
+ ray_directions,
+ mask_intersect,
+ sphere_intersections,
+ )
+
+ network_object_mask = acc_start_dis < acc_end_dis
+
+ # The non convergent rays should be handled by the sampler
+ sampler_mask = unfinished_mask_start
+ sampler_net_obj_mask = torch.zeros_like(
+ sampler_mask, dtype=torch.bool, device=device
+ )
+ if sampler_mask.sum() > 0:
+ sampler_min_max = torch.zeros((batch_size, num_pixels, 2), device=device)
+ sampler_min_max.reshape(-1, 2)[sampler_mask, 0] = acc_start_dis[
+ sampler_mask
+ ]
+ sampler_min_max.reshape(-1, 2)[sampler_mask, 1] = acc_end_dis[sampler_mask]
+
+ sampler_pts, sampler_net_obj_mask, sampler_dists = self.ray_sampler(
+ sdf, cam_loc, object_mask, ray_directions, sampler_min_max, sampler_mask
+ )
+
+ curr_start_points[sampler_mask] = sampler_pts[sampler_mask]
+ acc_start_dis[sampler_mask] = sampler_dists[sampler_mask]
+ network_object_mask[sampler_mask] = sampler_net_obj_mask[sampler_mask]
+
+ if not self.training:
+ return curr_start_points, network_object_mask, acc_start_dis
+
+ # in case we are training, we are updating curr_start_points and acc_start_dis for
+
+ ray_directions = ray_directions.reshape(-1, 3)
+ mask_intersect = mask_intersect.reshape(-1)
+ # pyre-fixme[9]: object_mask has type `BoolTensor`; used as `Tensor`.
+ object_mask = object_mask.reshape(-1)
+
+ in_mask = ~network_object_mask & object_mask & ~sampler_mask
+ out_mask = ~object_mask & ~sampler_mask
+
+ mask_left_out = (in_mask | out_mask) & ~mask_intersect
+ if (
+ mask_left_out.sum() > 0
+ ): # project the origin to the not intersect points on the sphere
+ cam_left_out = cam_loc.reshape(-1, 3)[mask_left_out]
+ rays_left_out = ray_directions[mask_left_out]
+ acc_start_dis[mask_left_out] = -torch.bmm(
+ rays_left_out.view(-1, 1, 3), cam_left_out.view(-1, 3, 1)
+ ).squeeze()
+ curr_start_points[mask_left_out] = (
+ cam_left_out + acc_start_dis[mask_left_out].unsqueeze(1) * rays_left_out
+ )
+
+ mask = (in_mask | out_mask) & mask_intersect
+
+ if mask.sum() > 0:
+ min_dis[network_object_mask & out_mask] = acc_start_dis[
+ network_object_mask & out_mask
+ ]
+
+ min_mask_points, min_mask_dist = self.minimal_sdf_points(
+ sdf, cam_loc, ray_directions, mask, min_dis, max_dis
+ )
+
+ curr_start_points[mask] = min_mask_points
+ acc_start_dis[mask] = min_mask_dist
+
+ return curr_start_points, network_object_mask, acc_start_dis
+
+ def sphere_tracing(
+ self,
+ batch_size: int,
+ num_pixels: int,
+ sdf: Callable[[torch.Tensor], torch.Tensor],
+ cam_loc: torch.Tensor,
+ ray_directions: torch.Tensor,
+ mask_intersect: torch.Tensor,
+ sphere_intersections: torch.Tensor,
+ ) -> Tuple[Any, Any, Any, Any, Any, Any]:
+ """
+ Run sphere tracing algorithm for max iterations
+ from both sides of unit sphere intersection
+
+ Args:
+ batch_size:
+ num_pixels:
+ sdf:
+ cam_loc:
+ ray_directions:
+ mask_intersect:
+ sphere_intersections:
+
+ Returns:
+ curr_start_points:
+ unfinished_mask_start:
+ acc_start_dis:
+ acc_end_dis:
+ min_dis:
+ max_dis:
+ """
+
+ device = cam_loc.device
+ sphere_intersections_points = (
+ cam_loc[..., None, :]
+ + sphere_intersections[..., None] * ray_directions[..., None, :]
+ )
+ unfinished_mask_start = mask_intersect.reshape(-1).clone()
+ unfinished_mask_end = mask_intersect.reshape(-1).clone()
+
+ # Initialize start current points
+ curr_start_points = torch.zeros(batch_size * num_pixels, 3, device=device)
+ curr_start_points[unfinished_mask_start] = sphere_intersections_points[
+ :, :, 0, :
+ ].reshape(-1, 3)[unfinished_mask_start]
+ acc_start_dis = torch.zeros(batch_size * num_pixels, device=device)
+ acc_start_dis[unfinished_mask_start] = sphere_intersections.reshape(-1, 2)[
+ unfinished_mask_start, 0
+ ]
+
+ # Initialize end current points
+ curr_end_points = torch.zeros(batch_size * num_pixels, 3, device=device)
+ curr_end_points[unfinished_mask_end] = sphere_intersections_points[
+ :, :, 1, :
+ ].reshape(-1, 3)[unfinished_mask_end]
+ acc_end_dis = torch.zeros(batch_size * num_pixels, device=device)
+ acc_end_dis[unfinished_mask_end] = sphere_intersections.reshape(-1, 2)[
+ unfinished_mask_end, 1
+ ]
+
+ # Initialise min and max depth
+ min_dis = acc_start_dis.clone()
+ max_dis = acc_end_dis.clone()
+
+ # Iterate on the rays (from both sides) till finding a surface
+ iters = 0
+
+ # TODO: sdf should also pass info about batches
+
+ next_sdf_start = torch.zeros_like(acc_start_dis)
+ next_sdf_start[unfinished_mask_start] = sdf(
+ curr_start_points[unfinished_mask_start]
+ )
+
+ next_sdf_end = torch.zeros_like(acc_end_dis)
+ next_sdf_end[unfinished_mask_end] = sdf(curr_end_points[unfinished_mask_end])
+
+ while True:
+ # Update sdf
+ curr_sdf_start = torch.zeros_like(acc_start_dis)
+ curr_sdf_start[unfinished_mask_start] = next_sdf_start[
+ unfinished_mask_start
+ ]
+ curr_sdf_start[curr_sdf_start <= self.sdf_threshold] = 0
+
+ curr_sdf_end = torch.zeros_like(acc_end_dis)
+ curr_sdf_end[unfinished_mask_end] = next_sdf_end[unfinished_mask_end]
+ curr_sdf_end[curr_sdf_end <= self.sdf_threshold] = 0
+
+ # Update masks
+ unfinished_mask_start = unfinished_mask_start & (
+ curr_sdf_start > self.sdf_threshold
+ )
+ unfinished_mask_end = unfinished_mask_end & (
+ curr_sdf_end > self.sdf_threshold
+ )
+
+ if (
+ unfinished_mask_start.sum() == 0 and unfinished_mask_end.sum() == 0
+ ) or iters == self.sphere_tracing_iters:
+ break
+ iters += 1
+
+ # Make step
+ # Update distance
+ acc_start_dis = acc_start_dis + curr_sdf_start
+ acc_end_dis = acc_end_dis - curr_sdf_end
+
+ # Update points
+ curr_start_points = (
+ cam_loc
+ + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions
+ ).reshape(-1, 3)
+ curr_end_points = (
+ cam_loc
+ + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions
+ ).reshape(-1, 3)
+
+ # Fix points which wrongly crossed the surface
+ next_sdf_start = torch.zeros_like(acc_start_dis)
+ next_sdf_start[unfinished_mask_start] = sdf(
+ curr_start_points[unfinished_mask_start]
+ )
+
+ next_sdf_end = torch.zeros_like(acc_end_dis)
+ next_sdf_end[unfinished_mask_end] = sdf(
+ curr_end_points[unfinished_mask_end]
+ )
+
+ not_projected_start = next_sdf_start < 0
+ not_projected_end = next_sdf_end < 0
+ not_proj_iters = 0
+ while (
+ not_projected_start.sum() > 0 or not_projected_end.sum() > 0
+ ) and not_proj_iters < self.line_step_iters:
+ # Step backwards
+ acc_start_dis[not_projected_start] -= (
+ (1 - self.line_search_step) / (2**not_proj_iters)
+ ) * curr_sdf_start[not_projected_start]
+ curr_start_points[not_projected_start] = (
+ cam_loc
+ + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions
+ ).reshape(-1, 3)[not_projected_start]
+
+ acc_end_dis[not_projected_end] += (
+ (1 - self.line_search_step) / (2**not_proj_iters)
+ ) * curr_sdf_end[not_projected_end]
+ curr_end_points[not_projected_end] = (
+ cam_loc
+ + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions
+ ).reshape(-1, 3)[not_projected_end]
+
+ # Calc sdf
+ next_sdf_start[not_projected_start] = sdf(
+ curr_start_points[not_projected_start]
+ )
+ next_sdf_end[not_projected_end] = sdf(
+ curr_end_points[not_projected_end]
+ )
+
+ # Update mask
+ not_projected_start = next_sdf_start < 0
+ not_projected_end = next_sdf_end < 0
+ not_proj_iters += 1
+
+ unfinished_mask_start = unfinished_mask_start & (
+ acc_start_dis < acc_end_dis
+ )
+ unfinished_mask_end = unfinished_mask_end & (acc_start_dis < acc_end_dis)
+
+ return (
+ curr_start_points,
+ unfinished_mask_start,
+ acc_start_dis,
+ acc_end_dis,
+ min_dis,
+ max_dis,
+ )
+
+ def ray_sampler(
+ self,
+ sdf: Callable[[torch.Tensor], torch.Tensor],
+ cam_loc: torch.Tensor,
+ object_mask: torch.Tensor,
+ ray_directions: torch.Tensor,
+ sampler_min_max: torch.Tensor,
+ sampler_mask: torch.Tensor,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Sample the ray in a given range and run secant on rays which have sign transition.
+
+ Args:
+ sdf:
+ cam_loc:
+ object_mask:
+ ray_directions:
+ sampler_min_max:
+ sampler_mask:
+
+ Returns:
+
+ """
+
+ batch_size, num_pixels, _ = ray_directions.shape
+ device = cam_loc.device
+ n_total_pxl = batch_size * num_pixels
+ sampler_pts = torch.zeros(n_total_pxl, 3, device=device)
+ sampler_dists = torch.zeros(n_total_pxl, device=device)
+
+ intervals_dist = torch.linspace(0, 1, steps=self.n_steps, device=device).view(
+ 1, 1, -1
+ )
+
+ pts_intervals = sampler_min_max[:, :, 0].unsqueeze(-1) + intervals_dist * (
+ sampler_min_max[:, :, 1] - sampler_min_max[:, :, 0]
+ ).unsqueeze(-1)
+ points = (
+ cam_loc[..., None, :]
+ + pts_intervals[..., None] * ray_directions[..., None, :]
+ )
+
+ # Get the non convergent rays
+ mask_intersect_idx = torch.nonzero(sampler_mask).flatten()
+ points = points.reshape((-1, self.n_steps, 3))[sampler_mask, :, :]
+ pts_intervals = pts_intervals.reshape((-1, self.n_steps))[sampler_mask]
+
+ sdf_val_all = []
+ for pnts in torch.split(points.reshape(-1, 3), 100000, dim=0):
+ sdf_val_all.append(sdf(pnts))
+ sdf_val = torch.cat(sdf_val_all).reshape(-1, self.n_steps)
+
+ tmp = torch.sign(sdf_val) * torch.arange(
+ self.n_steps, 0, -1, device=device, dtype=torch.float32
+ ).reshape(1, self.n_steps)
+ # Force argmin to return the first min value
+ sampler_pts_ind = torch.argmin(tmp, -1)
+ sampler_pts[mask_intersect_idx] = points[
+ torch.arange(points.shape[0]), sampler_pts_ind, :
+ ]
+ sampler_dists[mask_intersect_idx] = pts_intervals[
+ torch.arange(pts_intervals.shape[0]), sampler_pts_ind
+ ]
+
+ true_surface_pts = object_mask.reshape(-1)[sampler_mask]
+ net_surface_pts = sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind] < 0
+
+ # take points with minimal SDF value for P_out pixels
+ p_out_mask = ~(true_surface_pts & net_surface_pts)
+ n_p_out = p_out_mask.sum()
+ if n_p_out > 0:
+ out_pts_idx = torch.argmin(sdf_val[p_out_mask, :], -1)
+ sampler_pts[mask_intersect_idx[p_out_mask]] = points[p_out_mask, :, :][
+ # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]`
+ # but got `Tensor`.
+ torch.arange(n_p_out),
+ out_pts_idx,
+ :,
+ ]
+ sampler_dists[mask_intersect_idx[p_out_mask]] = pts_intervals[
+ p_out_mask,
+ :,
+ # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` but
+ # got `Tensor`.
+ ][torch.arange(n_p_out), out_pts_idx]
+
+ # Get Network object mask
+ sampler_net_obj_mask = sampler_mask.clone()
+ sampler_net_obj_mask[mask_intersect_idx[~net_surface_pts]] = False
+
+ # Run Secant method
+ secant_pts = (
+ net_surface_pts & true_surface_pts if self.training else net_surface_pts
+ )
+ n_secant_pts = secant_pts.sum()
+ if n_secant_pts > 0:
+ # Get secant z predictions
+ z_high = pts_intervals[
+ torch.arange(pts_intervals.shape[0]), sampler_pts_ind
+ ][secant_pts]
+ sdf_high = sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind][
+ secant_pts
+ ]
+ z_low = pts_intervals[secant_pts][
+ # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]`
+ # but got `Tensor`.
+ torch.arange(n_secant_pts),
+ sampler_pts_ind[secant_pts] - 1,
+ ]
+ sdf_low = sdf_val[secant_pts][
+ # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]`
+ # but got `Tensor`.
+ torch.arange(n_secant_pts),
+ sampler_pts_ind[secant_pts] - 1,
+ ]
+ cam_loc_secant = cam_loc.reshape(-1, 3)[mask_intersect_idx[secant_pts]]
+ ray_directions_secant = ray_directions.reshape((-1, 3))[
+ mask_intersect_idx[secant_pts]
+ ]
+ z_pred_secant = self.secant(
+ sdf_low,
+ sdf_high,
+ z_low,
+ z_high,
+ cam_loc_secant,
+ ray_directions_secant,
+ # pyre-fixme[6]: For 7th param expected `Module` but got `(Tensor)
+ # -> Tensor`.
+ sdf,
+ )
+
+ # Get points
+ sampler_pts[mask_intersect_idx[secant_pts]] = (
+ cam_loc_secant + z_pred_secant.unsqueeze(-1) * ray_directions_secant
+ )
+ sampler_dists[mask_intersect_idx[secant_pts]] = z_pred_secant
+
+ return sampler_pts, sampler_net_obj_mask, sampler_dists
+
+ def secant(
+ self,
+ sdf_low: torch.Tensor,
+ sdf_high: torch.Tensor,
+ z_low: torch.Tensor,
+ z_high: torch.Tensor,
+ cam_loc: torch.Tensor,
+ ray_directions: torch.Tensor,
+ sdf: nn.Module,
+ ) -> torch.Tensor:
+ """
+ Runs the secant method for interval [z_low, z_high] for n_secant_steps
+ """
+
+ z_pred = -sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low
+ for _ in range(self.n_secant_steps):
+ p_mid = cam_loc + z_pred.unsqueeze(-1) * ray_directions
+ sdf_mid = sdf(p_mid)
+ ind_low = sdf_mid > 0
+ if ind_low.sum() > 0:
+ z_low[ind_low] = z_pred[ind_low]
+ sdf_low[ind_low] = sdf_mid[ind_low]
+ ind_high = sdf_mid < 0
+ if ind_high.sum() > 0:
+ z_high[ind_high] = z_pred[ind_high]
+ sdf_high[ind_high] = sdf_mid[ind_high]
+
+ z_pred = -sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low
+
+ return z_pred
+
+ def minimal_sdf_points(
+ self,
+ sdf: Callable[[torch.Tensor], torch.Tensor],
+ cam_loc: torch.Tensor,
+ ray_directions: torch.Tensor,
+ mask: torch.Tensor,
+ min_dis: torch.Tensor,
+ max_dis: torch.Tensor,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Find points with minimal SDF value on rays for P_out pixels
+ """
+
+ n_mask_points = mask.sum()
+
+ n = self.n_steps
+ steps = torch.empty(n, device=cam_loc.device).uniform_(0.0, 1.0)
+ mask_max_dis = max_dis[mask].unsqueeze(-1)
+ mask_min_dis = min_dis[mask].unsqueeze(-1)
+ steps = (
+ # pyre-fixme[6]: For 1st param expected `int` but got `Tensor`.
+ steps.unsqueeze(0).repeat(n_mask_points, 1) * (mask_max_dis - mask_min_dis)
+ + mask_min_dis
+ )
+
+ mask_points = cam_loc.reshape(-1, 3)[mask]
+ mask_rays = ray_directions[mask, :]
+
+ mask_points_all = mask_points.unsqueeze(1).repeat(1, n, 1) + steps.unsqueeze(
+ -1
+ ) * mask_rays.unsqueeze(1).repeat(1, n, 1)
+ points = mask_points_all.reshape(-1, 3)
+
+ mask_sdf_all = []
+ for pnts in torch.split(points, 100000, dim=0):
+ mask_sdf_all.append(sdf(pnts))
+
+ mask_sdf_all = torch.cat(mask_sdf_all).reshape(-1, n)
+ min_vals, min_idx = mask_sdf_all.min(-1)
+ min_mask_points = mask_points_all.reshape(-1, n, 3)[
+ # pyre-fixme[6]: For 2nd param expected `Union[bool, float, int]` but
+ # got `Tensor`.
+ torch.arange(0, n_mask_points),
+ min_idx,
+ ]
+ # pyre-fixme[6]: For 2nd param expected `Union[bool, float, int]` but got
+ # `Tensor`.
+ min_mask_dist = steps.reshape(-1, n)[torch.arange(0, n_mask_points), min_idx]
+
+ return min_mask_points, min_mask_dist
+
+
+# TODO: support variable origins
+def _get_sphere_intersection(
+ cam_loc: torch.Tensor, ray_directions: torch.Tensor, r: float = 1.0
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ # Input: n_images x 3 ; n_images x n_rays x 3
+ # Output: n_images * n_rays x 2 (close and far) ; n_images * n_rays
+
+ n_imgs, n_pix, _ = ray_directions.shape
+ device = cam_loc.device
+
+ # cam_loc = cam_loc.unsqueeze(-1)
+ # ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze()
+ ray_cam_dot = (ray_directions * cam_loc).sum(-1) # n_images x n_rays
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ under_sqrt = ray_cam_dot**2 - (cam_loc.norm(2, dim=-1) ** 2 - r**2)
+
+ under_sqrt = under_sqrt.reshape(-1)
+ mask_intersect = under_sqrt > 0
+
+ sphere_intersections = torch.zeros(n_imgs * n_pix, 2, device=device)
+ sphere_intersections[mask_intersect] = torch.sqrt(
+ under_sqrt[mask_intersect]
+ ).unsqueeze(-1) * torch.tensor([-1.0, 1.0], device=device)
+ sphere_intersections[mask_intersect] -= ray_cam_dot.reshape(-1)[
+ mask_intersect
+ ].unsqueeze(-1)
+
+ sphere_intersections = sphere_intersections.reshape(n_imgs, n_pix, 2)
+ sphere_intersections = sphere_intersections.clamp_min(0.0)
+ mask_intersect = mask_intersect.reshape(n_imgs, n_pix)
+
+ return sphere_intersections, mask_intersect
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..5544590cf807243b7741f9452ed9169d0aed9063
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py
@@ -0,0 +1,240 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Any, Callable, Dict, Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import RendererOutput
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.renderer.implicit.raymarching import _check_raymarcher_inputs
+
+
+_TTensor = torch.Tensor
+
+
+class RaymarcherBase(ReplaceableBase):
+ """
+ Defines a base class for raymarchers. Specifically, a raymarcher is responsible
+ for taking a set of features and density descriptors along rendering rays
+ and marching along them in order to generate a feature render.
+ """
+
+ def forward(
+ self,
+ rays_densities: torch.Tensor,
+ rays_features: torch.Tensor,
+ aux: Dict[str, Any],
+ ) -> RendererOutput:
+ """
+ Args:
+ rays_densities: Per-ray density values represented with a tensor
+ of shape `(..., n_points_per_ray, 1)`.
+ rays_features: Per-ray feature values represented with a tensor
+ of shape `(..., n_points_per_ray, feature_dim)`.
+ aux: a dictionary with extra information.
+ """
+ raise NotImplementedError()
+
+
+class AccumulativeRaymarcherBase(RaymarcherBase, torch.nn.Module):
+ """
+ This generalizes the `pytorch3d.renderer.EmissionAbsorptionRaymarcher`
+ and NeuralVolumes' cumsum ray marcher. It additionally returns
+ the rendering weights that can be used in the NVS pipeline to carry out
+ the importance ray-sampling in the refining pass.
+ Different from `pytorch3d.renderer.EmissionAbsorptionRaymarcher`, it takes raw
+ (non-exponentiated) densities.
+
+ Args:
+ surface_thickness: The thickness of the raymarched surface.
+ bg_color: The background color. A tuple of either 1 element or of D elements,
+ where D matches the feature dimensionality; it is broadcast when necessary.
+ replicate_last_interval: If True, the ray length assigned to the last interval
+ for the opacity delta calculation is copied from the penultimate interval.
+ background_opacity: The length over which the last raw opacity value
+ (i.e. before exponentiation) is considered to apply, for the delta
+ calculation. Ignored if replicate_last_interval=True.
+ density_relu: If `True`, passes the input density through ReLU before
+ raymarching.
+ blend_output: If `True`, alpha-blends the output renders with the
+ background color using the rendered opacity mask.
+
+ capping_function: The capping function of the raymarcher.
+ Options:
+ - "exponential" (`cap_fn(x) = 1 - exp(-x)`)
+ - "cap1" (`cap_fn(x) = min(x, 1)`)
+ Set to "exponential" for the standard Emission Absorption raymarching.
+ weight_function: The weighting function of the raymarcher.
+ Options:
+ - "product" (`weight_fn(w, x) = w * x`)
+ - "minimum" (`weight_fn(w, x) = min(w, x)`)
+ Set to "product" for the standard Emission Absorption raymarching.
+ """
+
+ surface_thickness: int = 1
+ bg_color: Tuple[float, ...] = (0.0,)
+ replicate_last_interval: bool = False
+ background_opacity: float = 0.0
+ density_relu: bool = True
+ blend_output: bool = False
+
+ @property
+ def capping_function_type(self) -> str:
+ raise NotImplementedError()
+
+ @property
+ def weight_function_type(self) -> str:
+ raise NotImplementedError()
+
+ def __post_init__(self):
+ """
+ Args:
+ surface_thickness: Denotes the overlap between the absorption
+ function and the density function.
+ """
+ bg_color = torch.tensor(self.bg_color)
+ if bg_color.ndim != 1:
+ raise ValueError(f"bg_color (shape {bg_color.shape}) should be a 1D tensor")
+
+ self.register_buffer("_bg_color", bg_color, persistent=False)
+
+ self._capping_function: Callable[[_TTensor], _TTensor] = {
+ "exponential": lambda x: 1.0 - torch.exp(-x),
+ "cap1": lambda x: x.clamp(max=1.0),
+ }[self.capping_function_type]
+
+ self._weight_function: Callable[[_TTensor, _TTensor], _TTensor] = {
+ "product": lambda curr, acc: curr * acc,
+ "minimum": lambda curr, acc: torch.minimum(curr, acc),
+ }[self.weight_function_type]
+
+ # pyre-fixme[14]: `forward` overrides method defined in `RaymarcherBase`
+ # inconsistently.
+ def forward(
+ self,
+ rays_densities: torch.Tensor,
+ rays_features: torch.Tensor,
+ aux: Dict[str, Any],
+ ray_lengths: torch.Tensor,
+ ray_deltas: Optional[torch.Tensor] = None,
+ density_noise_std: float = 0.0,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Args:
+ rays_densities: Per-ray density values represented with a tensor
+ of shape `(..., n_points_per_ray, 1)`.
+ rays_features: Per-ray feature values represented with a tensor
+ of shape `(..., n_points_per_ray, feature_dim)`.
+ aux: a dictionary with extra information.
+ ray_lengths: Per-ray depth values represented with a tensor
+ of shape `(..., n_points_per_ray, feature_dim)`.
+ ray_deltas: Optional differences between consecutive elements along the ray bundle
+ represented with a tensor of shape `(..., n_points_per_ray)`. If None,
+ these differences are computed from ray_lengths.
+ density_noise_std: the magnitude of the noise added to densities.
+
+ Returns:
+ features: A tensor of shape `(..., feature_dim)` containing
+ the rendered features for each ray.
+ depth: A tensor of shape `(..., 1)` containing estimated depth.
+ opacities: A tensor of shape `(..., 1)` containing rendered opacities.
+ weights: A tensor of shape `(..., n_points_per_ray)` containing
+ the ray-specific non-negative opacity weights. In general, they
+ don't sum to 1 but do not overcome it, i.e.
+ `(weights.sum(dim=-1) <= 1.0).all()` holds.
+ """
+ _check_raymarcher_inputs(
+ rays_densities,
+ rays_features,
+ ray_lengths,
+ z_can_be_none=True,
+ features_can_be_none=False,
+ density_1d=True,
+ )
+
+ if ray_deltas is None:
+ ray_lengths_diffs = torch.diff(ray_lengths, dim=-1)
+ if self.replicate_last_interval:
+ last_interval = ray_lengths_diffs[..., -1:]
+ else:
+ last_interval = torch.full_like(
+ ray_lengths[..., :1], self.background_opacity
+ )
+ deltas = torch.cat((ray_lengths_diffs, last_interval), dim=-1)
+ else:
+ deltas = ray_deltas
+
+ rays_densities = rays_densities[..., 0]
+
+ if density_noise_std > 0.0:
+ noise: _TTensor = torch.randn_like(rays_densities).mul(density_noise_std)
+ rays_densities = rays_densities + noise
+ if self.density_relu:
+ rays_densities = torch.relu(rays_densities)
+
+ weighted_densities = deltas * rays_densities
+ capped_densities = self._capping_function(weighted_densities)
+
+ rays_opacities = self._capping_function(
+ torch.cumsum(weighted_densities, dim=-1)
+ )
+ opacities = rays_opacities[..., -1:]
+ absorption_shifted = (-rays_opacities + 1.0).roll(
+ self.surface_thickness, dims=-1
+ )
+ absorption_shifted[..., : self.surface_thickness] = 1.0
+
+ weights = self._weight_function(capped_densities, absorption_shifted)
+ features = (weights[..., None] * rays_features).sum(dim=-2)
+ depth = (weights * ray_lengths)[..., None].sum(dim=-2)
+
+ alpha = opacities if self.blend_output else 1
+ if self._bg_color.shape[-1] not in [1, features.shape[-1]]:
+ raise ValueError("Wrong number of background color channels.")
+ features = alpha * features + (1 - opacities) * self._bg_color
+
+ return RendererOutput(
+ features=features,
+ depths=depth,
+ masks=opacities,
+ weights=weights,
+ aux=aux,
+ )
+
+
+@registry.register
+class EmissionAbsorptionRaymarcher(AccumulativeRaymarcherBase):
+ """
+ Implements the EmissionAbsorption raymarcher.
+ """
+
+ background_opacity: float = 1e10
+
+ @property
+ def capping_function_type(self) -> str:
+ return "exponential"
+
+ @property
+ def weight_function_type(self) -> str:
+ return "product"
+
+
+@registry.register
+class CumsumRaymarcher(AccumulativeRaymarcherBase):
+ """
+ Implements the NeuralVolumes' cumulative-sum raymarcher.
+ """
+
+ @property
+ def capping_function_type(self) -> str:
+ return "cap1"
+
+ @property
+ def weight_function_type(self) -> str:
+ return "minimum"
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..de25389680878ac00d9970d6f6dfb6eed1874ca8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py
@@ -0,0 +1,140 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from RenderingNetwork from IDR
+# https://github.com/lioryariv/idr/
+# Copyright (c) 2020 Lior Yariv
+
+# pyre-unsafe
+
+import logging
+from typing import List, Tuple
+
+import torch
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import enable_get_default_args
+from pytorch3d.renderer.implicit import HarmonicEmbedding
+
+from torch import nn
+
+
+logger = logging.getLogger(__name__)
+
+
+class RayNormalColoringNetwork(torch.nn.Module):
+ """
+ Members:
+ d_in and feature_vector_size: Sum of these is the input
+ dimension. These must add up to the sum of
+ - 3 [for the points]
+ - 3 unless mode=no_normal [for the normals]
+ - 3 unless mode=no_view_dir [for view directions]
+ - the feature size, [number of channels in feature_vectors]
+
+ d_out: dimension of output.
+ mode: One of "idr", "no_view_dir" or "no_normal" to allow omitting
+ part of the network input.
+ dims: list of hidden layer sizes.
+ weight_norm: whether to apply weight normalization to each layer.
+ n_harmonic_functions_dir:
+ If >0, use a harmonic embedding with this number of
+ harmonic functions for the view direction. Otherwise view directions
+ are fed without embedding, unless mode is `no_view_dir`.
+ pooled_feature_dim: If a pooling function is in use (provided as
+ pooling_fn to forward()) this must be its number of features.
+ Otherwise this must be set to 0. (If used from GenericModel,
+ this will be set automatically.)
+ """
+
+ def __init__(
+ self,
+ feature_vector_size: int = 3,
+ mode: str = "idr",
+ d_in: int = 9,
+ d_out: int = 3,
+ dims: Tuple[int, ...] = (512, 512, 512, 512),
+ weight_norm: bool = True,
+ n_harmonic_functions_dir: int = 0,
+ pooled_feature_dim: int = 0,
+ ) -> None:
+ super().__init__()
+
+ self.mode = mode
+ self.output_dimensions = d_out
+ dims_full: List[int] = [d_in + feature_vector_size] + list(dims) + [d_out]
+
+ self.embedview_fn = None
+ if n_harmonic_functions_dir > 0:
+ self.embedview_fn = HarmonicEmbedding(
+ n_harmonic_functions_dir, append_input=True
+ )
+ dims_full[0] += self.embedview_fn.get_output_dim() - 3
+
+ if pooled_feature_dim > 0:
+ logger.info("Pooled features in rendering network.")
+ dims_full[0] += pooled_feature_dim
+
+ self.num_layers = len(dims_full)
+
+ layers = []
+ for layer_idx in range(self.num_layers - 1):
+ out_dim = dims_full[layer_idx + 1]
+ lin = nn.Linear(dims_full[layer_idx], out_dim)
+
+ if weight_norm:
+ lin = nn.utils.weight_norm(lin)
+
+ layers.append(lin)
+ self.linear_layers = torch.nn.ModuleList(layers)
+
+ self.relu = nn.ReLU()
+ self.tanh = nn.Tanh()
+
+ def forward(
+ self,
+ feature_vectors: torch.Tensor,
+ points,
+ normals,
+ ray_bundle: ImplicitronRayBundle,
+ masks=None,
+ pooling_fn=None,
+ ):
+ if masks is not None and not masks.any():
+ return torch.zeros_like(normals)
+
+ view_dirs = ray_bundle.directions
+ if masks is not None:
+ # in case of IDR, other outputs are passed here after applying the mask
+ view_dirs = view_dirs.reshape(view_dirs.shape[0], -1, 3)[
+ :, masks.reshape(-1)
+ ]
+
+ if self.embedview_fn is not None:
+ view_dirs = self.embedview_fn(view_dirs)
+
+ if self.mode == "idr":
+ rendering_input = torch.cat(
+ [points, view_dirs, normals, feature_vectors], dim=-1
+ )
+ elif self.mode == "no_view_dir":
+ rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
+ elif self.mode == "no_normal":
+ rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
+ else:
+ raise ValueError(f"Unsupported rendering mode: {self.mode}")
+
+ if pooling_fn is not None:
+ featspool = pooling_fn(points[None])[0]
+ rendering_input = torch.cat((rendering_input, featspool), dim=-1)
+
+ x = rendering_input
+
+ for layer_idx in range(self.num_layers - 1):
+ x = self.linear_layers[layer_idx](x)
+
+ if layer_idx < self.num_layers - 2:
+ x = self.relu(x)
+
+ x = self.tanh(x)
+ return x
+
+
+enable_get_default_args(RayNormalColoringNetwork)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1977ea5577e674622cebd5e5c355b071be12e854
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py
@@ -0,0 +1,277 @@
+# @lint-ignore-every LICENSELINT
+# Adapted from https://github.com/lioryariv/idr/blob/main/code/model/
+# implicit_differentiable_renderer.py
+# Copyright (c) 2020 Lior Yariv
+
+# pyre-unsafe
+import functools
+from typing import List, Optional, Tuple
+
+import torch
+from omegaconf import DictConfig
+from pytorch3d.common.compat import prod
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import (
+ get_default_args_field,
+ registry,
+ run_auto_creation,
+)
+from pytorch3d.implicitron.tools.utils import evaluating
+
+from .base import BaseRenderer, EvaluationMode, ImplicitFunctionWrapper, RendererOutput
+from .ray_tracing import RayTracing
+from .rgb_net import RayNormalColoringNetwork
+
+
+@registry.register
+class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module):
+ render_features_dimensions: int = 3
+ object_bounding_sphere: float = 1.0
+ # pyre-fixme[13]: Attribute `ray_tracer` is never initialized.
+ ray_tracer: RayTracing
+ ray_normal_coloring_network_args: DictConfig = get_default_args_field(
+ RayNormalColoringNetwork
+ )
+ bg_color: Tuple[float, ...] = (0.0,)
+ soft_mask_alpha: float = 50.0
+
+ def __post_init__(
+ self,
+ ):
+ render_features_dimensions = self.render_features_dimensions
+ if len(self.bg_color) not in [1, render_features_dimensions]:
+ raise ValueError(
+ f"Background color should have {render_features_dimensions} entries."
+ )
+
+ run_auto_creation(self)
+
+ self.ray_normal_coloring_network_args["feature_vector_size"] = (
+ render_features_dimensions
+ )
+ self._rgb_network = RayNormalColoringNetwork(
+ **self.ray_normal_coloring_network_args
+ )
+
+ self.register_buffer("_bg_color", torch.tensor(self.bg_color), persistent=False)
+
+ @classmethod
+ def ray_tracer_tweak_args(cls, type, args: DictConfig) -> None:
+ del args["object_bounding_sphere"]
+
+ def create_ray_tracer(self) -> None:
+ self.ray_tracer = RayTracing(
+ **self.ray_tracer_args,
+ object_bounding_sphere=self.object_bounding_sphere,
+ )
+
+ def requires_object_mask(self) -> bool:
+ return True
+
+ def forward(
+ self,
+ ray_bundle: ImplicitronRayBundle,
+ implicit_functions: List[ImplicitFunctionWrapper],
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
+ object_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> RendererOutput:
+ """
+ Args:
+ ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
+ sampled rendering rays.
+ implicit_functions: single element list of ImplicitFunctionWrappers which
+ defines the implicit function to be used.
+ evaluation_mode: one of EvaluationMode.TRAINING or
+ EvaluationMode.EVALUATION which determines the settings used for
+ rendering.
+ kwargs:
+ object_mask: BoolTensor, denoting the silhouette of the object.
+ This is a required keyword argument for SignedDistanceFunctionRenderer
+
+ Returns:
+ instance of RendererOutput
+ """
+ if len(implicit_functions) != 1:
+ raise ValueError(
+ "SignedDistanceFunctionRenderer supports only single pass."
+ )
+
+ if object_mask is None:
+ raise ValueError("Expected object_mask to be provided in the kwargs")
+ object_mask = object_mask.bool()
+
+ implicit_function = implicit_functions[0]
+ implicit_function_gradient = functools.partial(_gradient, implicit_function)
+
+ # object_mask: silhouette of the object
+ batch_size, *spatial_size, _ = ray_bundle.lengths.shape
+ num_pixels = prod(spatial_size)
+
+ cam_loc = ray_bundle.origins.reshape(batch_size, -1, 3)
+ ray_dirs = ray_bundle.directions.reshape(batch_size, -1, 3)
+ object_mask = object_mask.reshape(batch_size, -1)
+
+ with torch.no_grad(), evaluating(implicit_function):
+ points, network_object_mask, dists = self.ray_tracer(
+ sdf=lambda x: implicit_function(rays_points_world=x)[
+ :, 0
+ ], # TODO: get rid of this wrapper
+ cam_loc=cam_loc,
+ object_mask=object_mask,
+ ray_directions=ray_dirs,
+ )
+
+ # TODO: below, cam_loc might as well be different
+ depth = dists.reshape(batch_size, num_pixels, 1)
+ points = (cam_loc + depth * ray_dirs).reshape(-1, 3)
+
+ sdf_output = implicit_function(rays_points_world=points)[:, 0:1]
+ # NOTE most of the intermediate variables are flattened for
+ # no apparent reason (here and in the ray tracer)
+ ray_dirs = ray_dirs.reshape(-1, 3)
+ object_mask = object_mask.reshape(-1)
+
+ # TODO: move it to loss computation
+ if evaluation_mode == EvaluationMode.TRAINING:
+ surface_mask = network_object_mask & object_mask
+ surface_points = points[surface_mask]
+ surface_dists = dists[surface_mask].unsqueeze(-1)
+ surface_ray_dirs = ray_dirs[surface_mask]
+ surface_cam_loc = cam_loc.reshape(-1, 3)[surface_mask]
+ surface_output = sdf_output[surface_mask]
+ N = surface_points.shape[0]
+
+ # Sample points for the eikonal loss
+ eik_bounding_box: float = self.object_bounding_sphere
+ n_eik_points = batch_size * num_pixels // 2
+ eikonal_points = torch.empty(
+ n_eik_points,
+ 3,
+ # but got `Union[device, Tensor, Module]`.
+ device=self._bg_color.device,
+ ).uniform_(-eik_bounding_box, eik_bounding_box)
+ eikonal_pixel_points = points.clone()
+ eikonal_pixel_points = eikonal_pixel_points.detach()
+ eikonal_points = torch.cat([eikonal_points, eikonal_pixel_points], 0)
+
+ points_all = torch.cat([surface_points, eikonal_points], dim=0)
+
+ output = implicit_function(rays_points_world=surface_points)
+ surface_sdf_values = output[
+ :N, 0:1
+ ].detach() # how is it different from sdf_output?
+
+ g = implicit_function_gradient(points_all)
+ surface_points_grad = g[:N, 0, :].clone().detach()
+ grad_theta = g[N:, 0, :]
+
+ differentiable_surface_points = _sample_network(
+ surface_output,
+ surface_sdf_values,
+ surface_points_grad,
+ surface_dists,
+ surface_cam_loc,
+ surface_ray_dirs,
+ )
+
+ else:
+ surface_mask = network_object_mask
+ differentiable_surface_points = points[surface_mask]
+ grad_theta = None
+
+ empty_render = differentiable_surface_points.shape[0] == 0
+ features = implicit_function(rays_points_world=differentiable_surface_points)[
+ None, :, 1:
+ ]
+ normals_full = features.new_zeros(
+ batch_size, *spatial_size, 3, requires_grad=empty_render
+ )
+ render_full = (
+ features.new_ones(
+ batch_size,
+ *spatial_size,
+ self.render_features_dimensions,
+ requires_grad=empty_render,
+ )
+ * self._bg_color
+ )
+ mask_full = features.new_ones(
+ batch_size, *spatial_size, 1, requires_grad=empty_render
+ )
+ if not empty_render:
+ normals = implicit_function_gradient(differentiable_surface_points)[
+ None, :, 0, :
+ ]
+ normals_full.view(-1, 3)[surface_mask] = normals
+ render_full.view(-1, self.render_features_dimensions)[surface_mask] = (
+ self._rgb_network(
+ features,
+ differentiable_surface_points[None],
+ normals,
+ ray_bundle,
+ surface_mask[None, :, None],
+ pooling_fn=None, # TODO
+ )
+ )
+ mask_full.view(-1, 1)[~surface_mask] = torch.sigmoid(
+ # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
+ -self.soft_mask_alpha
+ * sdf_output[~surface_mask]
+ )
+
+ # scatter points with surface_mask
+ points_full = ray_bundle.origins.detach().clone()
+ points_full.view(-1, 3)[surface_mask] = differentiable_surface_points
+
+ # TODO: it is sparse here but otherwise dense
+ return RendererOutput(
+ features=render_full,
+ normals=normals_full,
+ depths=depth.reshape(batch_size, *spatial_size, 1),
+ masks=mask_full, # this is a differentiable approximation, see (7) in the paper
+ points=points_full,
+ aux={"grad_theta": grad_theta}, # TODO: will be moved to eikonal loss
+ # TODO: do we need sdf_output, grad_theta? Only for loss probably
+ )
+
+
+def _sample_network(
+ surface_output,
+ surface_sdf_values,
+ surface_points_grad,
+ surface_dists,
+ surface_cam_loc,
+ surface_ray_dirs,
+ eps: float = 1e-4,
+):
+ # t -> t(theta)
+ surface_ray_dirs_0 = surface_ray_dirs.detach()
+ surface_points_dot = torch.bmm(
+ surface_points_grad.view(-1, 1, 3), surface_ray_dirs_0.view(-1, 3, 1)
+ ).squeeze(-1)
+ dot_sign = (surface_points_dot >= 0).to(surface_points_dot) * 2 - 1
+ surface_dists_theta = surface_dists - (surface_output - surface_sdf_values) / (
+ surface_points_dot.abs().clip(eps) * dot_sign
+ )
+
+ # t(theta) -> x(theta,c,v)
+ surface_points_theta_c_v = surface_cam_loc + surface_dists_theta * surface_ray_dirs
+
+ return surface_points_theta_c_v
+
+
+@torch.enable_grad()
+def _gradient(module, rays_points_world):
+ rays_points_world.requires_grad_(True)
+ y = module.forward(rays_points_world=rays_points_world)[:, :1]
+ d_output = torch.ones_like(y, requires_grad=False, device=y.device)
+ gradients = torch.autograd.grad(
+ outputs=y,
+ inputs=rays_points_world,
+ grad_outputs=d_output,
+ create_graph=True,
+ retain_graph=True,
+ only_inputs=True,
+ )[0]
+ return gradients.unsqueeze(1)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..70b2fc06a690a5be7ea1e08385b39c6a1fbacdbc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/utils.py
@@ -0,0 +1,212 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+# Note: The #noqa comments below are for unused imports of pluggable implementations
+# which are part of implicitron. They ensure that the registry is prepopulated.
+
+import warnings
+from logging import Logger
+from typing import Any, Dict, Optional, Tuple
+
+import torch
+import tqdm
+from pytorch3d.common.compat import prod
+
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+
+from pytorch3d.implicitron.tools import image_utils
+
+from pytorch3d.implicitron.tools.utils import cat_dataclass
+
+
+def preprocess_input(
+ image_rgb: Optional[torch.Tensor],
+ fg_probability: Optional[torch.Tensor],
+ depth_map: Optional[torch.Tensor],
+ mask_images: bool,
+ mask_depths: bool,
+ mask_threshold: float,
+ bg_color: Tuple[float, float, float],
+) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
+ """
+ Helper function to preprocess the input images and optional depth maps
+ to apply masking if required.
+
+ Args:
+ image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images
+ corresponding to the source viewpoints from which features will be extracted
+ fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch
+ of foreground masks with values in [0, 1].
+ depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
+ mask_images: Whether or not to mask the RGB image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ mask_depths: Whether or not to mask the depth image background given the
+ foreground mask (the `fg_probability` argument of `GenericModel.forward`)
+ mask_threshold: If greater than 0.0, the foreground mask is
+ thresholded by this value before being applied to the RGB/Depth images
+ bg_color: RGB values for setting the background color of input image
+ if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own
+ way to determine the background color of its output, unrelated to this.
+
+ Returns:
+ Modified image_rgb, fg_mask, depth_map
+ """
+ if image_rgb is not None and image_rgb.ndim == 3:
+ # The FrameData object is used for both frames and batches of frames,
+ # and a user might get this error if those were confused.
+ # Perhaps a user has a FrameData `fd` representing a single frame and
+ # wrote something like `model(**fd)` instead of
+ # `model(**fd.collate([fd]))`.
+ raise ValueError(
+ "Model received unbatched inputs. "
+ + "Perhaps they came from a FrameData which had not been collated."
+ )
+
+ fg_mask = fg_probability
+ if fg_mask is not None and mask_threshold > 0.0:
+ # threshold masks
+ warnings.warn("Thresholding masks!")
+ fg_mask = (fg_mask >= mask_threshold).type_as(fg_mask)
+
+ if mask_images and fg_mask is not None and image_rgb is not None:
+ # mask the image
+ warnings.warn("Masking images!")
+ image_rgb = image_utils.mask_background(
+ image_rgb, fg_mask, dim_color=1, bg_color=torch.tensor(bg_color)
+ )
+
+ if mask_depths and fg_mask is not None and depth_map is not None:
+ # mask the depths
+ assert (
+ mask_threshold > 0.0
+ ), "Depths should be masked only with thresholded masks"
+ warnings.warn("Masking depths!")
+ depth_map = depth_map * fg_mask
+
+ return image_rgb, fg_mask, depth_map
+
+
+def log_loss_weights(loss_weights: Dict[str, float], logger: Logger) -> None:
+ """
+ Print a table of the loss weights.
+ """
+ loss_weights_message = (
+ "-------\nloss_weights:\n"
+ + "\n".join(f"{k:40s}: {w:1.2e}" for k, w in loss_weights.items())
+ + "-------"
+ )
+ logger.info(loss_weights_message)
+
+
+def weighted_sum_losses(
+ preds: Dict[str, torch.Tensor], loss_weights: Dict[str, float]
+) -> Optional[torch.Tensor]:
+ """
+ A helper function to compute the overall loss as the dot product
+ of individual loss functions with the corresponding weights.
+ """
+ losses_weighted = [
+ preds[k] * float(w)
+ for k, w in loss_weights.items()
+ if (k in preds and w != 0.0)
+ ]
+ if len(losses_weighted) == 0:
+ warnings.warn("No main objective found.")
+ return None
+ loss = sum(losses_weighted)
+ assert torch.is_tensor(loss)
+ return loss
+
+
+def apply_chunked(func, chunk_generator, tensor_collator):
+ """
+ Helper function to apply a function on a sequence of
+ chunked inputs yielded by a generator and collate
+ the result.
+ """
+ processed_chunks = [
+ func(*chunk_args, **chunk_kwargs)
+ for chunk_args, chunk_kwargs in chunk_generator
+ ]
+
+ return cat_dataclass(processed_chunks, tensor_collator)
+
+
+def chunk_generator(
+ chunk_size: int,
+ ray_bundle: ImplicitronRayBundle,
+ chunked_inputs: Dict[str, torch.Tensor],
+ tqdm_trigger_threshold: int,
+ *args,
+ **kwargs,
+):
+ """
+ Helper function which yields chunks of rays from the
+ input ray_bundle, to be used when the number of rays is
+ large and will not fit in memory for rendering.
+ """
+ (
+ batch_size,
+ *spatial_dim,
+ n_pts_per_ray,
+ ) = ray_bundle.lengths.shape # B x ... x n_pts_per_ray
+ if n_pts_per_ray > 0 and chunk_size % n_pts_per_ray != 0:
+ raise ValueError(
+ f"chunk_size_grid ({chunk_size}) should be divisible "
+ f"by n_pts_per_ray ({n_pts_per_ray})"
+ )
+
+ n_rays = prod(spatial_dim)
+ # special handling for raytracing-based methods
+ n_chunks = -(-n_rays * max(n_pts_per_ray, 1) // chunk_size)
+ chunk_size_in_rays = -(-n_rays // n_chunks)
+
+ iter = range(0, n_rays, chunk_size_in_rays)
+ if len(iter) >= tqdm_trigger_threshold:
+ iter = tqdm.tqdm(iter)
+
+ def _safe_slice(
+ tensor: Optional[torch.Tensor], start_idx: int, end_idx: int
+ ) -> Any:
+ return tensor[start_idx:end_idx] if tensor is not None else None
+
+ for start_idx in iter:
+ end_idx = min(start_idx + chunk_size_in_rays, n_rays)
+ bins = (
+ None
+ if ray_bundle.bins is None
+ else ray_bundle.bins.reshape(batch_size, n_rays, n_pts_per_ray + 1)[
+ :, start_idx:end_idx
+ ]
+ )
+ pixel_radii_2d = (
+ None
+ if ray_bundle.pixel_radii_2d is None
+ else ray_bundle.pixel_radii_2d.reshape(batch_size, -1, 1)[
+ :, start_idx:end_idx
+ ]
+ )
+ ray_bundle_chunk = ImplicitronRayBundle(
+ origins=ray_bundle.origins.reshape(batch_size, -1, 3)[:, start_idx:end_idx],
+ directions=ray_bundle.directions.reshape(batch_size, -1, 3)[
+ :, start_idx:end_idx
+ ],
+ lengths=ray_bundle.lengths.reshape(batch_size, n_rays, n_pts_per_ray)[
+ :, start_idx:end_idx
+ ],
+ xys=ray_bundle.xys.reshape(batch_size, -1, 2)[:, start_idx:end_idx],
+ bins=bins,
+ pixel_radii_2d=pixel_radii_2d,
+ camera_ids=_safe_slice(ray_bundle.camera_ids, start_idx, end_idx),
+ camera_counts=_safe_slice(ray_bundle.camera_counts, start_idx, end_idx),
+ )
+ extra_args = kwargs.copy()
+ for k, v in chunked_inputs.items():
+ extra_args[k] = v.flatten(2)[:, :, start_idx:end_idx]
+ yield [ray_bundle_chunk, *args], extra_args
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/feature_aggregator.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/feature_aggregator.py
new file mode 100644
index 0000000000000000000000000000000000000000..c223be22878ef17f72a04e3766a79970c200bf61
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/feature_aggregator.py
@@ -0,0 +1,689 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from abc import ABC, abstractmethod
+from enum import Enum
+from typing import Dict, Optional, Sequence, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+from pytorch3d.implicitron.models.view_pooler.view_sampler import (
+ cameras_points_cartesian_product,
+)
+from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
+from pytorch3d.ops import wmean
+from pytorch3d.renderer.cameras import CamerasBase
+
+
+class ReductionFunction(Enum):
+ AVG = "avg" # simple average
+ MAX = "max" # maximum
+ STD = "std" # standard deviation
+ STD_AVG = "std_avg" # average of per-dimension standard deviations
+
+
+class FeatureAggregatorBase(ABC, ReplaceableBase):
+ """
+ Base class for aggregating features.
+
+ Typically, the aggregated features and their masks are output by `ViewSampler`
+ which samples feature tensors extracted from a set of source images.
+
+ Settings:
+ exclude_target_view: If `True`/`False`, enables/disables pooling
+ from target view to itself.
+ exclude_target_view_mask_features: If `True`,
+ mask the features from the target view before aggregation
+ concatenate_output: If `True`,
+ concatenate the aggregated features into a single tensor,
+ otherwise return a dictionary mapping feature names to tensors.
+ """
+
+ exclude_target_view: bool = True
+ exclude_target_view_mask_features: bool = True
+ concatenate_output: bool = True
+
+ @abstractmethod
+ def forward(
+ self,
+ feats_sampled: Dict[str, torch.Tensor],
+ masks_sampled: torch.Tensor,
+ camera: Optional[CamerasBase] = None,
+ pts: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Args:
+ feats_sampled: A `dict` of sampled feature tensors `{f_i: t_i}`,
+ where each `t_i` is a tensor of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ masks_sampled: A binary mask represented as a tensor of shape
+ `(minibatch, n_source_views, n_samples, 1)` denoting valid
+ sampled features.
+ camera: A batch of `n_source_views` `CamerasBase` objects corresponding
+ to the source view cameras.
+ pts: A tensor of shape `(minibatch, n_samples, 3)` denoting the
+ 3D points whose 2D projections to source views were sampled in
+ order to generate `feats_sampled` and `masks_sampled`.
+
+ Returns:
+ feats_aggregated: If `concatenate_output==True`, a tensor
+ of shape `(minibatch, reduce_dim, n_samples, sum(dim_1, ... dim_N))`
+ containing the concatenation of the aggregated features `feats_sampled`.
+ `reduce_dim` depends on the specific feature aggregator
+ implementation and typically equals 1 or `n_source_views`.
+ If `concatenate_output==False`, the aggregator does not concatenate
+ the aggregated features and returns a dictionary of per-feature
+ aggregations `{f_i: t_i_aggregated}` instead. Each `t_i_aggregated`
+ is of shape `(minibatch, reduce_dim, n_samples, aggr_dim_i)`.
+ """
+ raise NotImplementedError()
+
+ @abstractmethod
+ def get_aggregated_feature_dim(
+ self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
+ ):
+ """
+ Returns the final dimensionality of the output aggregated features.
+
+ Args:
+ feats_or_feats_dim: Either a `dict` of sampled features `{f_i: t_i}` corresponding
+ to the `feats_sampled` argument of `forward`,
+ or an `int` representing the sum of dimensionalities of each `t_i`.
+
+ Returns:
+ aggregated_feature_dim: The final dimensionality of the output
+ aggregated features.
+ """
+ raise NotImplementedError()
+
+ def has_aggregation(self) -> bool:
+ """
+ Specifies whether the aggregator reduces the output `reduce_dim` dimension to 1.
+
+ Returns:
+ has_aggregation: `True` if `reduce_dim==1`, else `False`.
+ """
+ return hasattr(self, "reduction_functions")
+
+
+@registry.register
+class IdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
+ """
+ This aggregator does not perform any feature aggregation. Depending on the
+ settings the aggregator allows to mask target view features and concatenate
+ the outputs.
+ """
+
+ def get_aggregated_feature_dim(
+ self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
+ ):
+ return _get_reduction_aggregator_feature_dim(feats_or_feats_dim, [])
+
+ def forward(
+ self,
+ feats_sampled: Dict[str, torch.Tensor],
+ masks_sampled: torch.Tensor,
+ camera: Optional[CamerasBase] = None,
+ pts: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Args:
+ feats_sampled: A `dict` of sampled feature tensors `{f_i: t_i}`,
+ where each `t_i` is a tensor of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ masks_sampled: A binary mask represented as a tensor of shape
+ `(minibatch, n_source_views, n_samples, 1)` denoting valid
+ sampled features.
+ camera: A batch of `n_source_views` `CamerasBase` objects
+ corresponding to the source view cameras.
+ pts: A tensor of shape `(minibatch, n_samples, 3)` denoting the
+ 3D points whose 2D projections to source views were sampled in
+ order to generate `feats_sampled` and `masks_sampled`.
+
+ Returns:
+ feats_aggregated: If `concatenate_output==True`, a tensor
+ of shape `(minibatch, 1, n_samples, sum(dim_1, ... dim_N))`.
+ If `concatenate_output==False`, a dictionary `{f_i: t_i_aggregated}`
+ with each `t_i_aggregated` of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ """
+ if self.exclude_target_view_mask_features:
+ feats_sampled = _mask_target_view_features(feats_sampled)
+ feats_aggregated = feats_sampled
+ if self.concatenate_output:
+ feats_aggregated = torch.cat(tuple(feats_aggregated.values()), dim=-1)
+ return feats_aggregated
+
+
+@registry.register
+class ReductionFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
+ """
+ Aggregates using a set of predefined `reduction_functions` and concatenates
+ the results of each aggregation function along the
+ channel dimension. The reduction functions singularize the second dimension
+ of the sampled features which stacks the source views.
+
+ Settings:
+ reduction_functions: A list of `ReductionFunction`s` that reduce the
+ the stack of source-view-specific features to a single feature.
+ """
+
+ reduction_functions: Tuple[ReductionFunction, ...] = (
+ ReductionFunction.AVG,
+ ReductionFunction.STD,
+ )
+
+ def get_aggregated_feature_dim(
+ self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
+ ):
+ return _get_reduction_aggregator_feature_dim(
+ feats_or_feats_dim, self.reduction_functions
+ )
+
+ def forward(
+ self,
+ feats_sampled: Dict[str, torch.Tensor],
+ masks_sampled: torch.Tensor,
+ camera: Optional[CamerasBase] = None,
+ pts: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Args:
+ feats_sampled: A `dict` of sampled feature tensors `{f_i: t_i}`,
+ where each `t_i` is a tensor of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ masks_sampled: A binary mask represented as a tensor of shape
+ `(minibatch, n_source_views, n_samples, 1)` denoting valid
+ sampled features.
+ camera: A batch of `n_source_views` `CamerasBase` objects corresponding
+ to the source view cameras.
+ pts: A tensor of shape `(minibatch, n_samples, 3)` denoting the
+ 3D points whose 2D projections to source views were sampled in
+ order to generate `feats_sampled` and `masks_sampled`.
+
+ Returns:
+ feats_aggregated: If `concatenate_output==True`, a tensor
+ of shape `(minibatch, 1, n_samples, sum(dim_1, ... dim_N))`.
+ If `concatenate_output==False`, a dictionary `{f_i: t_i_aggregated}`
+ with each `t_i_aggregated` of shape `(minibatch, 1, n_samples, aggr_dim_i)`.
+ """
+
+ pts_batch, n_cameras = masks_sampled.shape[:2]
+ if self.exclude_target_view_mask_features:
+ feats_sampled = _mask_target_view_features(feats_sampled)
+ sampling_mask = _get_view_sampling_mask(
+ n_cameras,
+ pts_batch,
+ masks_sampled.device,
+ self.exclude_target_view,
+ )
+ aggr_weigths = masks_sampled[..., 0] * sampling_mask[..., None]
+ feats_aggregated = {
+ k: _avgmaxstd_reduction_function(
+ f,
+ aggr_weigths,
+ dim=1,
+ reduction_functions=self.reduction_functions,
+ )
+ for k, f in feats_sampled.items()
+ }
+ if self.concatenate_output:
+ feats_aggregated = torch.cat(tuple(feats_aggregated.values()), dim=-1)
+ return feats_aggregated
+
+
+@registry.register
+class AngleWeightedReductionFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
+ """
+ Performs a weighted aggregation using a set of predefined `reduction_functions`
+ and concatenates the results of each aggregation function along the
+ channel dimension. The weights are proportional to the cosine of the
+ angle between the target ray and the source ray::
+
+ weight = (
+ dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
+ )**self.weight_by_ray_angle_gamma
+
+ The reduction functions singularize the second dimension
+ of the sampled features which stacks the source views.
+
+ Settings:
+ reduction_functions: A list of `ReductionFunction`s that reduce the
+ the stack of source-view-specific features to a single feature.
+ min_ray_angle_weight: The minimum possible aggregation weight
+ before rasising to the power of `self.weight_by_ray_angle_gamma`.
+ weight_by_ray_angle_gamma: The exponent of the cosine of the ray angles
+ used when calculating the angle-based aggregation weights.
+ """
+
+ reduction_functions: Tuple[ReductionFunction, ...] = (
+ ReductionFunction.AVG,
+ ReductionFunction.STD,
+ )
+ weight_by_ray_angle_gamma: float = 1.0
+ min_ray_angle_weight: float = 0.1
+
+ def get_aggregated_feature_dim(
+ self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
+ ):
+ return _get_reduction_aggregator_feature_dim(
+ feats_or_feats_dim, self.reduction_functions
+ )
+
+ def forward(
+ self,
+ feats_sampled: Dict[str, torch.Tensor],
+ masks_sampled: torch.Tensor,
+ camera: Optional[CamerasBase] = None,
+ pts: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Args:
+ feats_sampled: A `dict` of sampled feature tensors `{f_i: t_i}`,
+ where each `t_i` is a tensor of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ masks_sampled: A binary mask represented as a tensor of shape
+ `(minibatch, n_source_views, n_samples, 1)` denoting valid
+ sampled features.
+ camera: A batch of `n_source_views` `CamerasBase` objects
+ corresponding to the source view cameras.
+ pts: A tensor of shape `(minibatch, n_samples, 3)` denoting the
+ 3D points whose 2D projections to source views were sampled in
+ order to generate `feats_sampled` and `masks_sampled`.
+
+ Returns:
+ feats_aggregated: If `concatenate_output==True`, a tensor
+ of shape `(minibatch, 1, n_samples, sum(dim_1, ... dim_N))`.
+ If `concatenate_output==False`, a dictionary `{f_i: t_i_aggregated}`
+ with each `t_i_aggregated` of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ """
+
+ if camera is None:
+ raise ValueError("camera cannot be None for angle weighted aggregation")
+
+ if pts is None:
+ raise ValueError("Points cannot be None for angle weighted aggregation")
+
+ pts_batch, n_cameras = masks_sampled.shape[:2]
+ if self.exclude_target_view_mask_features:
+ feats_sampled = _mask_target_view_features(feats_sampled)
+ view_sampling_mask = _get_view_sampling_mask(
+ n_cameras,
+ pts_batch,
+ masks_sampled.device,
+ self.exclude_target_view,
+ )
+ aggr_weights = _get_angular_reduction_weights(
+ view_sampling_mask,
+ masks_sampled,
+ camera,
+ pts,
+ self.min_ray_angle_weight,
+ self.weight_by_ray_angle_gamma,
+ )
+ assert torch.isfinite(aggr_weights).all()
+ feats_aggregated = {
+ k: _avgmaxstd_reduction_function(
+ f,
+ aggr_weights,
+ dim=1,
+ reduction_functions=self.reduction_functions,
+ )
+ for k, f in feats_sampled.items()
+ }
+ if self.concatenate_output:
+ feats_aggregated = torch.cat(tuple(feats_aggregated.values()), dim=-1)
+ return feats_aggregated
+
+
+@registry.register
+class AngleWeightedIdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
+ """
+ This aggregator does not perform any feature aggregation. It only weights
+ the features by the weights proportional to the cosine of the
+ angle between the target ray and the source ray::
+
+ weight = (
+ dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
+ )**self.weight_by_ray_angle_gamma
+
+ Settings:
+ min_ray_angle_weight: The minimum possible aggregation weight
+ before rasising to the power of `self.weight_by_ray_angle_gamma`.
+ weight_by_ray_angle_gamma: The exponent of the cosine of the ray angles
+ used when calculating the angle-based aggregation weights.
+
+ Additionally the aggregator allows to mask target view features and to concatenate
+ the outputs.
+ """
+
+ weight_by_ray_angle_gamma: float = 1.0
+ min_ray_angle_weight: float = 0.1
+
+ def get_aggregated_feature_dim(
+ self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
+ ):
+ return _get_reduction_aggregator_feature_dim(feats_or_feats_dim, [])
+
+ def forward(
+ self,
+ feats_sampled: Dict[str, torch.Tensor],
+ masks_sampled: torch.Tensor,
+ camera: Optional[CamerasBase] = None,
+ pts: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Args:
+ feats_sampled: A `dict` of sampled feature tensors `{f_i: t_i}`,
+ where each `t_i` is a tensor of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ masks_sampled: A binary mask represented as a tensor of shape
+ `(minibatch, n_source_views, n_samples, 1)` denoting valid
+ sampled features.
+ camera: A batch of `n_source_views` `CamerasBase` objects corresponding
+ to the source view cameras.
+ pts: A tensor of shape `(minibatch, n_samples, 3)` denoting the
+ 3D points whose 2D projections to source views were sampled in
+ order to generate `feats_sampled` and `masks_sampled`.
+
+ Returns:
+ feats_aggregated: If `concatenate_output==True`, a tensor
+ of shape `(minibatch, n_source_views, n_samples, sum(dim_1, ... dim_N))`.
+ If `concatenate_output==False`, a dictionary `{f_i: t_i_aggregated}`
+ with each `t_i_aggregated` of shape
+ `(minibatch, n_source_views, n_samples, dim_i)`.
+ """
+
+ if camera is None:
+ raise ValueError("camera cannot be None for angle weighted aggregation")
+
+ if pts is None:
+ raise ValueError("Points cannot be None for angle weighted aggregation")
+
+ pts_batch, n_cameras = masks_sampled.shape[:2]
+ if self.exclude_target_view_mask_features:
+ feats_sampled = _mask_target_view_features(feats_sampled)
+ view_sampling_mask = _get_view_sampling_mask(
+ n_cameras,
+ pts_batch,
+ masks_sampled.device,
+ self.exclude_target_view,
+ )
+ aggr_weights = _get_angular_reduction_weights(
+ view_sampling_mask,
+ masks_sampled,
+ camera,
+ pts,
+ self.min_ray_angle_weight,
+ self.weight_by_ray_angle_gamma,
+ )
+ feats_aggregated = {
+ k: f * aggr_weights[..., None] for k, f in feats_sampled.items()
+ }
+ if self.concatenate_output:
+ feats_aggregated = torch.cat(tuple(feats_aggregated.values()), dim=-1)
+ return feats_aggregated
+
+
+def _get_reduction_aggregator_feature_dim(
+ feats_or_feats_dim: Union[Dict[str, torch.Tensor], int],
+ reduction_functions: Sequence[ReductionFunction],
+) -> int:
+ if isinstance(feats_or_feats_dim, int):
+ feat_dim = feats_or_feats_dim
+ else:
+ feat_dim = int(sum(f.shape[1] for f in feats_or_feats_dim.values()))
+ if len(reduction_functions) == 0:
+ return feat_dim
+ return sum(
+ _get_reduction_function_output_dim(
+ reduction_function,
+ feat_dim,
+ )
+ for reduction_function in reduction_functions
+ )
+
+
+def _get_reduction_function_output_dim(
+ reduction_function: ReductionFunction,
+ feat_dim: int,
+) -> int:
+ if reduction_function == ReductionFunction.STD_AVG:
+ return 1
+ else:
+ return feat_dim
+
+
+def _get_view_sampling_mask(
+ n_cameras: int,
+ pts_batch: int,
+ device: Union[str, torch.device],
+ exclude_target_view: bool,
+):
+ return (
+ -torch.eye(n_cameras, device=device, dtype=torch.float32)
+ * float(exclude_target_view)
+ + 1.0
+ )[:pts_batch]
+
+
+def _mask_target_view_features(
+ feats_sampled: Dict[str, torch.Tensor],
+):
+ # mask out the sampled features to be sure we dont use them
+ # anywhere later
+ one_feature_sampled = next(iter(feats_sampled.values()))
+ pts_batch, n_cameras = one_feature_sampled.shape[:2]
+ view_sampling_mask = _get_view_sampling_mask(
+ n_cameras,
+ pts_batch,
+ one_feature_sampled.device,
+ True,
+ )
+ view_sampling_mask = view_sampling_mask.view(
+ pts_batch, n_cameras, *([1] * (one_feature_sampled.ndim - 2))
+ )
+ return {k: f * view_sampling_mask for k, f in feats_sampled.items()}
+
+
+def _get_angular_reduction_weights(
+ view_sampling_mask: torch.Tensor,
+ masks_sampled: torch.Tensor,
+ camera: CamerasBase,
+ pts: torch.Tensor,
+ min_ray_angle_weight: float,
+ weight_by_ray_angle_gamma: float,
+):
+ aggr_weights = masks_sampled.clone()[..., 0]
+ assert not any(v is None for v in [camera, pts])
+ angle_weight = _get_ray_angle_weights(
+ camera,
+ pts,
+ min_ray_angle_weight,
+ weight_by_ray_angle_gamma,
+ )
+ assert torch.isfinite(angle_weight).all()
+ # multiply the final aggr weights with ray angles
+ view_sampling_mask = view_sampling_mask.view(
+ *view_sampling_mask.shape[:2], *([1] * (aggr_weights.ndim - 2))
+ )
+ aggr_weights = (
+ aggr_weights * angle_weight.reshape_as(aggr_weights) * view_sampling_mask
+ )
+ return aggr_weights
+
+
+def _get_ray_dir_dot_prods(camera: CamerasBase, pts: torch.Tensor):
+ n_cameras = camera.R.shape[0]
+ pts_batch = pts.shape[0]
+
+ camera_rep, pts_rep = cameras_points_cartesian_product(camera, pts)
+
+ # does not produce nans randomly unlike get_camera_center() below
+ cam_centers_rep = -torch.bmm(
+ camera_rep.T[:, None],
+ camera_rep.R.permute(0, 2, 1),
+ ).reshape(-1, *([1] * (pts.ndim - 2)), 3)
+ # cam_centers_rep = camera_rep.get_camera_center().reshape(
+ # -1, *([1]*(pts.ndim - 2)), 3
+ # )
+
+ ray_dirs = F.normalize(pts_rep - cam_centers_rep, dim=-1)
+ # camera_rep = [ pts_rep = [
+ # camera[0] pts[0],
+ # camera[0] pts[1],
+ # camera[0] ...,
+ # ... pts[batch_pts-1],
+ # camera[1] pts[0],
+ # camera[1] pts[1],
+ # camera[1] ...,
+ # ... pts[batch_pts-1],
+ # ... ...,
+ # camera[n_cameras-1] pts[0],
+ # camera[n_cameras-1] pts[1],
+ # camera[n_cameras-1] ...,
+ # ... pts[batch_pts-1],
+ # ] ]
+
+ ray_dirs_reshape = ray_dirs.view(n_cameras, pts_batch, -1, 3)
+ # [
+ # [pts_0 in cam_0, pts_1 in cam_0, ..., pts_m in cam_0],
+ # [pts_0 in cam_1, pts_1 in cam_1, ..., pts_m in cam_1],
+ # ...
+ # [pts_0 in cam_n, pts_1 in cam_n, ..., pts_m in cam_n],
+ # ]
+
+ ray_dirs_pts = torch.stack([ray_dirs_reshape[i, i] for i in range(pts_batch)])
+ ray_dir_dot_prods = (ray_dirs_pts[None] * ray_dirs_reshape).sum(
+ dim=-1
+ ) # pts_batch x n_cameras x n_pts
+
+ return ray_dir_dot_prods.transpose(0, 1)
+
+
+def _get_ray_angle_weights(
+ camera: CamerasBase,
+ pts: torch.Tensor,
+ min_ray_angle_weight: float,
+ weight_by_ray_angle_gamma: float,
+):
+ ray_dir_dot_prods = _get_ray_dir_dot_prods(
+ camera, pts
+ ) # pts_batch x n_cameras x ... x 3
+ angle_weight_01 = ray_dir_dot_prods * 0.5 + 0.5 # [-1, 1] to [0, 1]
+ angle_weight = (angle_weight_01 + min_ray_angle_weight) ** weight_by_ray_angle_gamma
+ return angle_weight
+
+
+def _avgmaxstd_reduction_function(
+ x: torch.Tensor,
+ w: torch.Tensor,
+ reduction_functions: Sequence[ReductionFunction],
+ dim: int = 1,
+):
+ """
+ Args:
+ x: Features to aggreagate. Tensor of shape `(batch, n_views, ..., dim)`.
+ w: Aggregation weights. Tensor of shape `(batch, n_views, ...,)`.
+ dim: the dimension along which to aggregate.
+ reduction_functions: The set of reduction functions.
+
+ Returns:
+ x_aggr: Aggregation of `x` to a tensor of shape `(batch, 1, ..., dim_aggregate)`.
+ """
+
+ pooled_features = []
+
+ mu = None
+ std = None
+
+ if ReductionFunction.AVG in reduction_functions:
+ # average pool
+ mu = _avg_reduction_function(x, w, dim=dim)
+ pooled_features.append(mu)
+
+ if ReductionFunction.STD in reduction_functions:
+ # standard-dev pool
+ std = _std_reduction_function(x, w, dim=dim, mu=mu)
+ pooled_features.append(std)
+
+ if ReductionFunction.STD_AVG in reduction_functions:
+ # average-of-standard-dev pool
+ stdavg = _std_avg_reduction_function(x, w, dim=dim, mu=mu, std=std)
+ pooled_features.append(stdavg)
+
+ if ReductionFunction.MAX in reduction_functions:
+ max_ = _max_reduction_function(x, w, dim=dim)
+ pooled_features.append(max_)
+
+ # cat all results along the feature dimension (the last dim)
+ x_aggr = torch.cat(pooled_features, dim=-1)
+
+ # zero out features that were all masked out
+ # pyre-fixme[16]: `bool` has no attribute `type_as`.
+ any_active = (w.max(dim=dim, keepdim=True).values > 1e-4).type_as(x_aggr)
+ x_aggr = x_aggr * any_active[..., None]
+
+ # some asserts to check that everything was done right
+ assert torch.isfinite(x_aggr).all()
+ assert x_aggr.shape[1] == 1
+
+ return x_aggr
+
+
+def _avg_reduction_function(
+ x: torch.Tensor,
+ w: torch.Tensor,
+ dim: int = 1,
+):
+ mu = wmean(x, w, dim=dim, eps=1e-2)
+ return mu
+
+
+def _std_reduction_function(
+ x: torch.Tensor,
+ w: torch.Tensor,
+ dim: int = 1,
+ mu: Optional[torch.Tensor] = None, # pre-computed mean
+):
+ if mu is None:
+ mu = _avg_reduction_function(x, w, dim=dim)
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ std = wmean((x - mu) ** 2, w, dim=dim, eps=1e-2).clamp(1e-4).sqrt()
+ # FIXME: somehow this is extremely heavy in mem?
+ return std
+
+
+def _std_avg_reduction_function(
+ x: torch.Tensor,
+ w: torch.Tensor,
+ dim: int = 1,
+ mu: Optional[torch.Tensor] = None, # pre-computed mean
+ std: Optional[torch.Tensor] = None, # pre-computed std
+):
+ if std is None:
+ std = _std_reduction_function(x, w, dim=dim, mu=mu)
+ stdmean = std.mean(dim=-1, keepdim=True)
+ return stdmean
+
+
+def _max_reduction_function(
+ x: torch.Tensor,
+ w: torch.Tensor,
+ dim: int = 1,
+ big_M_factor: float = 10.0,
+):
+ big_M = x.max(dim=dim, keepdim=True).values.abs() * big_M_factor
+ max_ = (x * w - ((1 - w) * big_M)).max(dim=dim, keepdim=True).values
+ return max_
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_pooler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_pooler.py
new file mode 100644
index 0000000000000000000000000000000000000000..47f7258a4181a8696c3db75eb7a01d14674d259a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_pooler.py
@@ -0,0 +1,130 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Dict, List, Optional, Union
+
+import torch
+from pytorch3d.implicitron.tools.config import Configurable, run_auto_creation
+from pytorch3d.renderer.cameras import CamerasBase
+
+from .feature_aggregator import FeatureAggregatorBase
+from .view_sampler import ViewSampler
+
+
+class ViewPooler(Configurable, torch.nn.Module):
+ """
+ Implements sampling of image-based features at the 2d projections of a set
+ of 3D points, and a subsequent aggregation of the resulting set of features
+ per-point.
+
+ Args:
+ view_sampler: An instance of ViewSampler which is used for sampling of
+ image-based features at the 2D projections of a set
+ of 3D points.
+ feature_aggregator_class_type: The name of the feature aggregator class which
+ is available in the global registry.
+ feature_aggregator: A feature aggregator class which inherits from
+ FeatureAggregatorBase. Typically, the aggregated features and their
+ masks are output by a `ViewSampler` which samples feature tensors extracted
+ from a set of source images. FeatureAggregator executes step (4) above.
+ """
+
+ # pyre-fixme[13]: Attribute `view_sampler` is never initialized.
+ view_sampler: ViewSampler
+ feature_aggregator_class_type: str = "AngleWeightedReductionFeatureAggregator"
+ # pyre-fixme[13]: Attribute `feature_aggregator` is never initialized.
+ feature_aggregator: FeatureAggregatorBase
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ def get_aggregated_feature_dim(self, feats: Union[Dict[str, torch.Tensor], int]):
+ """
+ Returns the final dimensionality of the output aggregated features.
+
+ Args:
+ feats: Either a `dict` of sampled features `{f_i: t_i}` corresponding
+ to the `feats_sampled` argument of `feature_aggregator,forward`,
+ or an `int` representing the sum of dimensionalities of each `t_i`.
+
+ Returns:
+ aggregated_feature_dim: The final dimensionality of the output
+ aggregated features.
+ """
+ return self.feature_aggregator.get_aggregated_feature_dim(feats)
+
+ def has_aggregation(self):
+ """
+ Specifies whether the `feature_aggregator` reduces the output `reduce_dim`
+ dimension to 1.
+
+ Returns:
+ has_aggregation: `True` if `reduce_dim==1`, else `False`.
+ """
+ return self.feature_aggregator.has_aggregation()
+
+ def forward(
+ self,
+ *, # force kw args
+ pts: torch.Tensor,
+ seq_id_pts: Union[List[int], List[str], torch.LongTensor],
+ camera: CamerasBase,
+ seq_id_camera: Union[List[int], List[str], torch.LongTensor],
+ feats: Dict[str, torch.Tensor],
+ masks: Optional[torch.Tensor],
+ **kwargs,
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
+ """
+ Project each point cloud from a batch of point clouds to corresponding
+ input cameras, sample features at the 2D projection locations in a batch
+ of source images, and aggregate the pointwise sampled features.
+
+ Args:
+ pts: A tensor of shape `[pts_batch x n_pts x 3]` in world coords.
+ seq_id_pts: LongTensor of shape `[pts_batch]` denoting the ids of the scenes
+ from which `pts` were extracted, or a list of string names.
+ camera: 'n_cameras' cameras, each coresponding to a batch element of `feats`.
+ seq_id_camera: LongTensor of shape `[n_cameras]` denoting the ids of the scenes
+ corresponding to cameras in `camera`, or a list of string names.
+ feats: a dict of tensors of per-image features `{feat_i: T_i}`.
+ Each tensor `T_i` is of shape `[n_cameras x dim_i x H_i x W_i]`.
+ masks: `[n_cameras x 1 x H x W]`, define valid image regions
+ for sampling `feats`.
+ Returns:
+ feats_aggregated: If `feature_aggregator.concatenate_output==True`, a tensor
+ of shape `(pts_batch, reduce_dim, n_pts, sum(dim_1, ... dim_N))`
+ containing the aggregated features. `reduce_dim` depends on
+ the specific feature aggregator implementation and typically
+ equals 1 or `n_cameras`.
+ If `feature_aggregator.concatenate_output==False`, the aggregator
+ does not concatenate the aggregated features and returns a dictionary
+ of per-feature aggregations `{f_i: t_i_aggregated}` instead.
+ Each `t_i_aggregated` is of shape
+ `(pts_batch, reduce_dim, n_pts, aggr_dim_i)`.
+ """
+
+ # (1) Sample features and masks at the ray points
+ sampled_feats, sampled_masks = self.view_sampler(
+ pts=pts,
+ seq_id_pts=seq_id_pts,
+ camera=camera,
+ seq_id_camera=seq_id_camera,
+ feats=feats,
+ masks=masks,
+ )
+
+ # (2) Aggregate features from multiple views
+ # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function.
+ feats_aggregated = self.feature_aggregator( # noqa: E731
+ sampled_feats,
+ sampled_masks,
+ pts=pts,
+ camera=camera,
+ ) # TODO: do we need to pass a callback rather than compute here?
+
+ return feats_aggregated
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..dabb8b7fc359f993845381ef40a729d30aeb8ec6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/view_pooler/view_sampler.py
@@ -0,0 +1,295 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+from typing import Dict, List, Optional, Tuple, Union
+
+import torch
+from pytorch3d.implicitron.tools.config import Configurable
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.renderer.utils import ndc_grid_sample
+
+
+class ViewSampler(Configurable, torch.nn.Module):
+ """
+ Implements sampling of image-based features at the 2d projections of a set
+ of 3D points.
+
+ Args:
+ masked_sampling: If `True`, the `sampled_masks` output of `self.forward`
+ contains the input `masks` sampled at the 2d projections. Otherwise,
+ all entries of `sampled_masks` are set to 1.
+ sampling_mode: Controls the mode of the `torch.nn.functional.grid_sample`
+ function used to interpolate the sampled feature tensors at the
+ locations of the 2d projections.
+ """
+
+ masked_sampling: bool = False
+ sampling_mode: str = "bilinear"
+
+ def forward(
+ self,
+ *, # force kw args
+ pts: torch.Tensor,
+ seq_id_pts: Union[List[int], List[str], torch.LongTensor],
+ camera: CamerasBase,
+ seq_id_camera: Union[List[int], List[str], torch.LongTensor],
+ feats: Dict[str, torch.Tensor],
+ masks: Optional[torch.Tensor],
+ **kwargs,
+ ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
+ """
+ Project each point cloud from a batch of point clouds to corresponding
+ input cameras and sample features at the 2D projection locations.
+
+ Args:
+ pts: A tensor of shape `[pts_batch x n_pts x 3]` in world coords.
+ seq_id_pts: LongTensor of shape `[pts_batch]` denoting the ids of the scenes
+ from which `pts` were extracted, or a list of string names.
+ camera: 'n_cameras' cameras, each coresponding to a batch element of `feats`.
+ seq_id_camera: LongTensor of shape `[n_cameras]` denoting the ids of the scenes
+ corresponding to cameras in `camera`, or a list of string names.
+ feats: a dict of tensors of per-image features `{feat_i: T_i}`.
+ Each tensor `T_i` is of shape `[n_cameras x dim_i x H_i x W_i]`.
+ masks: `[n_cameras x 1 x H x W]`, define valid image regions
+ for sampling `feats`.
+ Returns:
+ sampled_feats: Dict of sampled features `{feat_i: sampled_T_i}`.
+ Each `sampled_T_i` of shape `[pts_batch, n_cameras, n_pts, dim_i]`.
+ sampled_masks: A tensor with mask of the sampled features
+ of shape `(pts_batch, n_cameras, n_pts, 1)`.
+ """
+
+ # convert sequence ids to long tensors
+ seq_id_pts, seq_id_camera = [
+ handle_seq_id(seq_id, pts.device) for seq_id in [seq_id_pts, seq_id_camera]
+ ]
+
+ if self.masked_sampling and masks is None:
+ raise ValueError(
+ "Masks have to be provided for `self.masked_sampling==True`"
+ )
+
+ # project pts to all cameras and sample feats from the locations of
+ # the 2D projections
+ sampled_feats_all_cams, sampled_masks_all_cams = project_points_and_sample(
+ pts,
+ feats,
+ camera,
+ masks if self.masked_sampling else None,
+ sampling_mode=self.sampling_mode,
+ )
+
+ # generate the mask that invalidates features sampled from
+ # non-corresponding cameras
+ camera_pts_mask = (seq_id_camera[None] == seq_id_pts[:, None])[
+ ..., None, None
+ ].to(pts)
+
+ # mask the sampled features and masks
+ sampled_feats = {
+ k: f * camera_pts_mask for k, f in sampled_feats_all_cams.items()
+ }
+ sampled_masks = sampled_masks_all_cams * camera_pts_mask
+
+ return sampled_feats, sampled_masks
+
+
+def project_points_and_sample(
+ pts: torch.Tensor,
+ feats: Dict[str, torch.Tensor],
+ camera: CamerasBase,
+ masks: Optional[torch.Tensor],
+ eps: float = 1e-2,
+ sampling_mode: str = "bilinear",
+) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
+ """
+ Project each point cloud from a batch of point clouds to all input cameras
+ and sample features at the 2D projection locations.
+
+ Args:
+ pts: `(pts_batch, n_pts, 3)` tensor containing a batch of 3D point clouds.
+ feats: A dict `{feat_i: feat_T_i}` of features to sample,
+ where each `feat_T_i` is a tensor of shape
+ `(n_cameras, feat_i_dim, feat_i_H, feat_i_W)`
+ of `feat_i_dim`-dimensional features extracted from `n_cameras`
+ source views.
+ camera: A batch of `n_cameras` cameras corresponding to their feature
+ tensors `feat_T_i` from `feats`.
+ masks: A tensor of shape `(n_cameras, 1, mask_H, mask_W)` denoting
+ valid locations for sampling.
+ eps: A small constant controlling the minimum depth of projections
+ of `pts` to avoid divisons by zero in the projection operation.
+ sampling_mode: Sampling mode of the grid sampler.
+
+ Returns:
+ sampled_feats: Dict of sampled features `{feat_i: sampled_T_i}`.
+ Each `sampled_T_i` is of shape
+ `(pts_batch, n_cameras, n_pts, feat_i_dim)`.
+ sampled_masks: A tensor with the mask of the sampled features
+ of shape `(pts_batch, n_cameras, n_pts, 1)`.
+ If `masks` is `None`, the returned `sampled_masks` will be
+ filled with 1s.
+ """
+
+ n_cameras = camera.R.shape[0]
+ pts_batch = pts.shape[0]
+ n_pts = pts.shape[1:-1]
+
+ camera_rep, pts_rep = cameras_points_cartesian_product(camera, pts)
+
+ # The eps here is super-important to avoid NaNs in backprop!
+ proj_rep = camera_rep.transform_points(
+ pts_rep.reshape(n_cameras * pts_batch, -1, 3), eps=eps
+ )[..., :2]
+ # [ pts1 in cam1, pts2 in cam1, pts3 in cam1,
+ # pts1 in cam2, pts2 in cam2, pts3 in cam2,
+ # pts1 in cam3, pts2 in cam3, pts3 in cam3 ]
+
+ # reshape for the grid sampler
+ sampling_grid_ndc = proj_rep.view(n_cameras, pts_batch, -1, 2)
+ # [ [pts1 in cam1, pts2 in cam1, pts3 in cam1],
+ # [pts1 in cam2, pts2 in cam2, pts3 in cam2],
+ # [pts1 in cam3, pts2 in cam3, pts3 in cam3] ]
+ # n_cameras x pts_batch x n_pts x 2
+
+ # sample both feats
+ feats_sampled = {
+ k: ndc_grid_sample(
+ f,
+ sampling_grid_ndc,
+ mode=sampling_mode,
+ align_corners=False,
+ )
+ .permute(2, 0, 3, 1)
+ .reshape(pts_batch, n_cameras, *n_pts, -1)
+ for k, f in feats.items()
+ } # {k: pts_batch x n_cameras x *n_pts x dim} for each feat type "k"
+
+ if masks is not None:
+ # sample masks
+ masks_sampled = (
+ ndc_grid_sample(
+ masks,
+ sampling_grid_ndc,
+ mode=sampling_mode,
+ align_corners=False,
+ )
+ .permute(2, 0, 3, 1)
+ .reshape(pts_batch, n_cameras, *n_pts, 1)
+ )
+ else:
+ masks_sampled = sampling_grid_ndc.new_ones(pts_batch, n_cameras, *n_pts, 1)
+
+ return feats_sampled, masks_sampled
+
+
+def handle_seq_id(
+ seq_id: Union[torch.LongTensor, List[str], List[int]],
+ device,
+) -> torch.LongTensor:
+ """
+ Converts the input sequence id to a LongTensor.
+
+ Args:
+ seq_id: A sequence of sequence ids.
+ device: The target device of the output.
+ Returns
+ long_seq_id: `seq_id` converted to a `LongTensor` and moved to `device`.
+ """
+ if not torch.is_tensor(seq_id):
+ if isinstance(seq_id[0], str):
+ seq_id = [hash(s) for s in seq_id]
+ # pyre-fixme[9]: seq_id has type `Union[List[int], List[str], LongTensor]`;
+ # used as `Tensor`.
+ seq_id = torch.tensor(seq_id, dtype=torch.long, device=device)
+ # pyre-fixme[16]: Item `List` of `Union[List[int], List[str], LongTensor]` has
+ # no attribute `to`.
+ return seq_id.to(device)
+
+
+def cameras_points_cartesian_product(
+ camera: CamerasBase, pts: torch.Tensor
+) -> Tuple[CamerasBase, torch.Tensor]:
+ """
+ Generates all pairs of pairs of elements from 'camera' and 'pts' and returns
+ `camera_rep` and `pts_rep` such that::
+
+ camera_rep = [ pts_rep = [
+ camera[0] pts[0],
+ camera[0] pts[1],
+ camera[0] ...,
+ ... pts[batch_pts-1],
+ camera[1] pts[0],
+ camera[1] pts[1],
+ camera[1] ...,
+ ... pts[batch_pts-1],
+ ... ...,
+ camera[n_cameras-1] pts[0],
+ camera[n_cameras-1] pts[1],
+ camera[n_cameras-1] ...,
+ ... pts[batch_pts-1],
+ ] ]
+
+ Args:
+ camera: A batch of `n_cameras` cameras.
+ pts: A batch of `batch_pts` points of shape `(batch_pts, ..., dim)`
+
+ Returns:
+ camera_rep: A batch of batch_pts*n_cameras cameras such that::
+
+ camera_rep = [
+ camera[0]
+ camera[0]
+ camera[0]
+ ...
+ camera[1]
+ camera[1]
+ camera[1]
+ ...
+ ...
+ camera[n_cameras-1]
+ camera[n_cameras-1]
+ camera[n_cameras-1]
+ ]
+
+
+ pts_rep: Repeated `pts` of shape `(batch_pts*n_cameras, ..., dim)`,
+ such that::
+
+ pts_rep = [
+ pts[0],
+ pts[1],
+ ...,
+ pts[batch_pts-1],
+ pts[0],
+ pts[1],
+ ...,
+ pts[batch_pts-1],
+ ...,
+ pts[0],
+ pts[1],
+ ...,
+ pts[batch_pts-1],
+ ]
+
+ """
+ n_cameras = camera.R.shape[0]
+ batch_pts = pts.shape[0]
+ pts_rep = pts.repeat(n_cameras, *[1 for _ in pts.shape[1:]])
+ idx_cams = (
+ torch.arange(n_cameras)[:, None]
+ .expand(
+ n_cameras,
+ batch_pts,
+ )
+ .reshape(batch_pts * n_cameras)
+ )
+ # pyre-fixme[6]: For 1st param expected `Union[List[int], int, LongTensor]` but
+ # got `Tensor`.
+ camera_rep = camera[idx_cams]
+ return camera_rep, pts_rep
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6267ddbf92b36f9ebed9b194fd0eab924ad35556
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ca3de5bbb610deda34c7e736d4b86e7f27de11
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python3
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import logging
+import math
+import os
+import random
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ TYPE_CHECKING,
+ Union,
+)
+
+import numpy as np
+import torch
+import torch.nn.functional as Fu
+from pytorch3d.implicitron.dataset.dataset_base import DatasetBase, FrameData
+from pytorch3d.implicitron.dataset.utils import is_train_frame
+from pytorch3d.implicitron.models.base_model import EvaluationMode
+from pytorch3d.implicitron.tools.eval_video_trajectory import (
+ generate_eval_video_cameras,
+)
+from pytorch3d.implicitron.tools.video_writer import VideoWriter
+from pytorch3d.implicitron.tools.vis_utils import (
+ get_visdom_connection,
+ make_depth_image,
+)
+from tqdm import tqdm
+
+if TYPE_CHECKING:
+ from visdom import Visdom
+
+logger = logging.getLogger(__name__)
+
+
+def render_flyaround(
+ dataset: DatasetBase,
+ sequence_name: str,
+ model: torch.nn.Module,
+ output_video_path: str,
+ n_flyaround_poses: int = 40,
+ fps: int = 20,
+ trajectory_type: str = "circular_lsq_fit",
+ max_angle: float = 2 * math.pi,
+ trajectory_scale: float = 1.1,
+ scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0),
+ up: Tuple[float, float, float] = (0.0, -1.0, 0.0),
+ traj_offset: float = 0.0,
+ n_source_views: int = 9,
+ visdom_show_preds: bool = False,
+ visdom_environment: str = "render_flyaround",
+ visdom_server: str = "http://127.0.0.1",
+ visdom_port: int = 8097,
+ num_workers: int = 10,
+ device: Union[str, torch.device] = "cuda",
+ seed: Optional[int] = None,
+ video_resize: Optional[Tuple[int, int]] = None,
+ output_video_frames_dir: Optional[str] = None,
+ visualize_preds_keys: Sequence[str] = (
+ "images_render",
+ "masks_render",
+ "depths_render",
+ "_all_source_images",
+ ),
+) -> None:
+ """
+ Uses `model` to generate a video consisting of renders of a scene imaged from
+ a camera flying around the scene. The scene is specified with the `dataset` object and
+ `sequence_name` which denotes the name of the scene whose frames are in `dataset`.
+
+ Args:
+ dataset: The dataset object containing frames from a sequence in `sequence_name`.
+ sequence_name: Name of a sequence from `dataset`.
+ model: The model whose predictions are going to be visualized.
+ output_video_path: The path to the video output by this script.
+ n_flyaround_poses: The number of camera poses of the flyaround trajectory.
+ fps: Framerate of the output video.
+ trajectory_type: The type of the camera trajectory. Can be one of:
+ circular_lsq_fit: Camera centers follow a trajectory obtained
+ by fitting a 3D circle to train_cameras centers.
+ All cameras are looking towards scene_center.
+ figure_eight: Figure-of-8 trajectory around the center of the
+ central camera of the training dataset.
+ trefoil_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot).
+ figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a figure-eight knot
+ (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)).
+ trajectory_type: The type of the camera trajectory. Can be one of:
+ circular_lsq_fit: Camera centers follow a trajectory obtained
+ by fitting a 3D circle to train_cameras centers.
+ All cameras are looking towards scene_center.
+ figure_eight: Figure-of-8 trajectory around the center of the
+ central camera of the training dataset.
+ trefoil_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot).
+ figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a figure-eight knot
+ (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)).
+ max_angle: Defines the total length of the generated camera trajectory.
+ All possible trajectories (set with the `trajectory_type` argument) are
+ periodic with the period of `time==2pi`.
+ E.g. setting `trajectory_type=circular_lsq_fit` and `time=4pi` will generate
+ a trajectory of camera poses rotating the total of 720 deg around the object.
+ trajectory_scale: The extent of the trajectory.
+ scene_center: The center of the scene in world coordinates which all
+ the cameras from the generated trajectory look at.
+ up: The "up" vector of the scene (=the normal of the scene floor).
+ Active for the `trajectory_type="circular"`.
+ traj_offset: 3D offset vector added to each point of the trajectory.
+ n_source_views: The number of source views sampled from the known views of the
+ training sequence added to each evaluation batch.
+ visdom_show_preds: If `True`, exports the visualizations to visdom.
+ visdom_environment: The name of the visdom environment.
+ visdom_server: The address of the visdom server.
+ visdom_port: The visdom port.
+ num_workers: The number of workers used to load the training data.
+ seed: The random seed used for reproducible sampling of the source views.
+ video_resize: Optionally, defines the size of the output video.
+ output_video_frames_dir: If specified, the frames of the output video are going
+ to be permanently stored in this directory.
+ visualize_preds_keys: The names of the model predictions to visualize.
+ """
+
+ if seed is None:
+ seed = hash(sequence_name)
+
+ if visdom_show_preds:
+ viz = get_visdom_connection(server=visdom_server, port=visdom_port)
+ else:
+ viz = None
+
+ logger.info(f"Loading all data of sequence '{sequence_name}'.")
+ seq_idx = list(dataset.sequence_indices_in_order(sequence_name))
+ train_data = _load_whole_dataset(dataset, seq_idx, num_workers=num_workers)
+ assert all(train_data.sequence_name[0] == sn for sn in train_data.sequence_name)
+ # pyre-ignore[6]
+ sequence_set_name = "train" if is_train_frame(train_data.frame_type)[0] else "test"
+ logger.info(f"Sequence set = {sequence_set_name}.")
+ train_cameras = train_data.camera
+ time = torch.linspace(0, max_angle, n_flyaround_poses + 1)[:n_flyaround_poses]
+ test_cameras = generate_eval_video_cameras(
+ train_cameras,
+ time=time,
+ n_eval_cams=n_flyaround_poses,
+ trajectory_type=trajectory_type,
+ trajectory_scale=trajectory_scale,
+ scene_center=scene_center,
+ up=up,
+ focal_length=None,
+ principal_point=torch.zeros(n_flyaround_poses, 2),
+ traj_offset_canonical=(0.0, 0.0, traj_offset),
+ )
+
+ # sample the source views reproducibly
+ with torch.random.fork_rng():
+ torch.manual_seed(seed)
+ source_views_i = torch.randperm(len(seq_idx))[:n_source_views]
+
+ # add the first dummy view that will get replaced with the target camera
+ source_views_i = Fu.pad(source_views_i, [1, 0])
+ source_views = [seq_idx[i] for i in source_views_i.tolist()]
+ batch = _load_whole_dataset(dataset, source_views, num_workers=num_workers)
+ assert all(batch.sequence_name[0] == sn for sn in batch.sequence_name)
+
+ preds_total = []
+ for n in tqdm(range(n_flyaround_poses), total=n_flyaround_poses):
+ # set the first batch camera to the target camera
+ for k in ("R", "T", "focal_length", "principal_point"):
+ getattr(batch.camera, k)[0] = getattr(test_cameras[n], k)
+
+ # Move to cuda
+ net_input = batch.to(device)
+ with torch.no_grad():
+ preds = model(**{**net_input, "evaluation_mode": EvaluationMode.EVALUATION})
+
+ # make sure we dont overwrite something
+ assert all(k not in preds for k in net_input.keys())
+ preds.update(net_input) # merge everything into one big dict
+
+ # Render the predictions to images
+ rendered_pred = _images_from_preds(preds, extract_keys=visualize_preds_keys)
+ preds_total.append(rendered_pred)
+
+ # show the preds every 5% of the export iterations
+ if visdom_show_preds and (
+ n % max(n_flyaround_poses // 20, 1) == 0 or n == n_flyaround_poses - 1
+ ):
+ assert viz is not None
+ _show_predictions(
+ preds_total,
+ sequence_name=batch.sequence_name[0],
+ viz=viz,
+ viz_env=visdom_environment,
+ predicted_keys=visualize_preds_keys,
+ )
+
+ logger.info(f"Exporting videos for sequence {sequence_name} ...")
+ _generate_prediction_videos(
+ preds_total,
+ sequence_name=batch.sequence_name[0],
+ viz=viz,
+ viz_env=visdom_environment,
+ fps=fps,
+ video_path=output_video_path,
+ resize=video_resize,
+ video_frames_dir=output_video_frames_dir,
+ predicted_keys=visualize_preds_keys,
+ )
+
+
+def _load_whole_dataset(
+ dataset: torch.utils.data.Dataset, idx: Sequence[int], num_workers: int = 10
+) -> FrameData:
+ load_all_dataloader = torch.utils.data.DataLoader(
+ torch.utils.data.Subset(dataset, idx),
+ batch_size=len(idx),
+ num_workers=num_workers,
+ shuffle=False,
+ collate_fn=FrameData.collate,
+ )
+ return next(iter(load_all_dataloader))
+
+
+def _images_from_preds(
+ preds: Dict[str, Any],
+ extract_keys: Iterable[str] = (
+ "image_rgb",
+ "images_render",
+ "fg_probability",
+ "masks_render",
+ "depths_render",
+ "depth_map",
+ "_all_source_images",
+ ),
+) -> Dict[str, torch.Tensor]:
+ imout = {}
+ for k in extract_keys:
+ if k == "_all_source_images" and "image_rgb" in preds:
+ src_ims = preds["image_rgb"][1:].cpu().detach().clone()
+ v = _stack_images(src_ims, None)[None]
+ else:
+ if k not in preds or preds[k] is None:
+ print(f"cant show {k}")
+ continue
+ v = preds[k].cpu().detach().clone()
+ if k.startswith("depth"):
+ mask_resize = Fu.interpolate(
+ preds["masks_render"],
+ size=preds[k].shape[2:],
+ mode="nearest",
+ )
+ v = make_depth_image(preds[k], mask_resize)
+ if v.shape[1] == 1:
+ v = v.repeat(1, 3, 1, 1)
+ imout[k] = v.detach().cpu()
+
+ return imout
+
+
+def _stack_images(ims: torch.Tensor, size: Optional[Tuple[int, int]]) -> torch.Tensor:
+ ba = ims.shape[0]
+ H = int(np.ceil(np.sqrt(ba)))
+ W = H
+ n_add = H * W - ba
+ if n_add > 0:
+ ims = torch.cat((ims, torch.zeros_like(ims[:1]).repeat(n_add, 1, 1, 1)))
+
+ ims = ims.view(H, W, *ims.shape[1:])
+ cated = torch.cat([torch.cat(list(row), dim=2) for row in ims], dim=1)
+ if size is not None:
+ cated = Fu.interpolate(cated[None], size=size, mode="bilinear")[0]
+ return cated.clamp(0.0, 1.0)
+
+
+def _show_predictions(
+ preds: List[Dict[str, Any]],
+ sequence_name: str,
+ viz: "Visdom",
+ viz_env: str = "visualizer",
+ predicted_keys: Sequence[str] = (
+ "images_render",
+ "masks_render",
+ "depths_render",
+ "_all_source_images",
+ ),
+ n_samples=10,
+ one_image_width=200,
+) -> None:
+ """Given a list of predictions visualize them into a single image using visdom."""
+ assert isinstance(preds, list)
+
+ pred_all = []
+ # Randomly choose a subset of the rendered images, sort by ordr in the sequence
+ n_samples = min(n_samples, len(preds))
+ pred_idx = sorted(random.sample(list(range(len(preds))), n_samples))
+ for predi in pred_idx:
+ # Make the concatentation for the same camera vertically
+ pred_all.append(
+ torch.cat(
+ [
+ torch.nn.functional.interpolate(
+ preds[predi][k].cpu(),
+ scale_factor=one_image_width / preds[predi][k].shape[3],
+ mode="bilinear",
+ ).clamp(0.0, 1.0)
+ for k in predicted_keys
+ ],
+ dim=2,
+ )
+ )
+ # Concatenate the images horizontally
+ pred_all_cat = torch.cat(pred_all, dim=3)[0]
+ viz.image(
+ pred_all_cat,
+ win="show_predictions",
+ env=viz_env,
+ opts={"title": f"pred_{sequence_name}"},
+ )
+
+
+def _generate_prediction_videos(
+ preds: List[Dict[str, Any]],
+ sequence_name: str,
+ viz: Optional["Visdom"] = None,
+ viz_env: str = "visualizer",
+ predicted_keys: Sequence[str] = (
+ "images_render",
+ "masks_render",
+ "depths_render",
+ "_all_source_images",
+ ),
+ fps: int = 20,
+ video_path: str = "/tmp/video",
+ video_frames_dir: Optional[str] = None,
+ resize: Optional[Tuple[int, int]] = None,
+) -> None:
+ """Given a list of predictions create and visualize rotating videos of the
+ objects using visdom.
+ """
+
+ # make sure the target video directory exists
+ os.makedirs(os.path.dirname(video_path), exist_ok=True)
+
+ # init a video writer for each predicted key
+ vws = {}
+ for k in predicted_keys:
+ if k not in preds[0]:
+ logger.warn(f"Cannot generate video for prediction key '{k}'")
+ continue
+ cache_dir = (
+ None
+ if video_frames_dir is None
+ else os.path.join(video_frames_dir, f"{sequence_name}_{k}")
+ )
+ vws[k] = VideoWriter(
+ fps=fps,
+ out_path=f"{video_path}_{sequence_name}_{k}.mp4",
+ cache_dir=cache_dir,
+ )
+
+ for rendered_pred in tqdm(preds):
+ for k in vws:
+ vws[k].write_frame(
+ rendered_pred[k][0].clip(0.0, 1.0).detach().cpu().numpy(),
+ resize=resize,
+ )
+
+ for k in predicted_keys:
+ if k not in vws:
+ continue
+ vws[k].get_video()
+ logger.info(f"Generated {vws[k].out_path}.")
+ if viz is not None:
+ viz.video(
+ videofile=vws[k].out_path,
+ env=viz_env,
+ win=k, # we reuse the same window otherwise visdom dies
+ opts={"title": sequence_name + " " + k},
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ad9ebeb73e938b019b358b265b9013fac9434d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py
@@ -0,0 +1,255 @@
+# a copy-paste from https://github.com/vsitzmann/scene-representation-networks/blob/master/hyperlayers.py
+# fmt: off
+# flake8: noqa
+
+# pyre-unsafe
+'''Pytorch implementations of hyper-network modules.
+'''
+import functools
+
+import torch
+import torch.nn as nn
+
+from . import pytorch_prototyping
+
+
+def partialclass(cls, *args, **kwds):
+ class NewCls(cls):
+ __init__ = functools.partialmethod(cls.__init__, *args, **kwds)
+
+ return NewCls
+
+
+class LookupLayer(nn.Module):
+ def __init__(self, in_ch, out_ch, num_objects):
+ super().__init__()
+
+ self.out_ch = out_ch
+ self.lookup_lin = LookupLinear(in_ch, out_ch, num_objects=num_objects)
+ self.norm_nl = nn.Sequential(
+ nn.LayerNorm([self.out_ch], elementwise_affine=False), nn.ReLU(inplace=True)
+ )
+
+ def forward(self, obj_idx):
+ net = nn.Sequential(self.lookup_lin(obj_idx), self.norm_nl)
+ return net
+
+
+class LookupFC(nn.Module):
+ def __init__(
+ self,
+ hidden_ch,
+ num_hidden_layers,
+ num_objects,
+ in_ch,
+ out_ch,
+ outermost_linear=False,
+ ):
+ super().__init__()
+ self.layers = nn.ModuleList()
+ self.layers.append(
+ LookupLayer(in_ch=in_ch, out_ch=hidden_ch, num_objects=num_objects)
+ )
+
+ for i in range(num_hidden_layers):
+ self.layers.append(
+ LookupLayer(in_ch=hidden_ch, out_ch=hidden_ch, num_objects=num_objects)
+ )
+
+ if outermost_linear:
+ self.layers.append(
+ LookupLinear(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects)
+ )
+ else:
+ self.layers.append(
+ LookupLayer(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects)
+ )
+
+ def forward(self, obj_idx):
+ net = []
+ for i in range(len(self.layers)):
+ net.append(self.layers[i](obj_idx))
+
+ return nn.Sequential(*net)
+
+
+class LookupLinear(nn.Module):
+ def __init__(self, in_ch, out_ch, num_objects):
+ super().__init__()
+ self.in_ch = in_ch
+ self.out_ch = out_ch
+
+ self.hypo_params = nn.Embedding(num_objects, in_ch * out_ch + out_ch)
+
+ for i in range(num_objects):
+ nn.init.kaiming_normal_(
+ self.hypo_params.weight.data[i, : self.in_ch * self.out_ch].view(
+ self.out_ch, self.in_ch
+ ),
+ a=0.0,
+ nonlinearity="relu",
+ mode="fan_in",
+ )
+ self.hypo_params.weight.data[i, self.in_ch * self.out_ch :].fill_(0.0)
+
+ def forward(self, obj_idx):
+ hypo_params = self.hypo_params(obj_idx)
+
+ # Indices explicit to catch erros in shape of output layer
+ weights = hypo_params[..., : self.in_ch * self.out_ch]
+ biases = hypo_params[
+ ..., self.in_ch * self.out_ch : (self.in_ch * self.out_ch) + self.out_ch
+ ]
+
+ biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
+ weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
+
+ return BatchLinear(weights=weights, biases=biases)
+
+
+class HyperLayer(nn.Module):
+ """A hypernetwork that predicts a single Dense Layer, including LayerNorm and a ReLU."""
+
+ def __init__(
+ self, in_ch, out_ch, hyper_in_ch, hyper_num_hidden_layers, hyper_hidden_ch
+ ):
+ super().__init__()
+
+ self.hyper_linear = HyperLinear(
+ in_ch=in_ch,
+ out_ch=out_ch,
+ hyper_in_ch=hyper_in_ch,
+ hyper_num_hidden_layers=hyper_num_hidden_layers,
+ hyper_hidden_ch=hyper_hidden_ch,
+ )
+ self.norm_nl = nn.Sequential(
+ nn.LayerNorm([out_ch], elementwise_affine=False), nn.ReLU(inplace=True)
+ )
+
+ def forward(self, hyper_input):
+ """
+ :param hyper_input: input to hypernetwork.
+ :return: nn.Module; predicted fully connected network.
+ """
+ return nn.Sequential(self.hyper_linear(hyper_input), self.norm_nl)
+
+
+class HyperFC(nn.Module):
+ """Builds a hypernetwork that predicts a fully connected neural network."""
+
+ def __init__(
+ self,
+ hyper_in_ch,
+ hyper_num_hidden_layers,
+ hyper_hidden_ch,
+ hidden_ch,
+ num_hidden_layers,
+ in_ch,
+ out_ch,
+ outermost_linear=False,
+ ):
+ super().__init__()
+
+ PreconfHyperLinear = partialclass(
+ HyperLinear,
+ hyper_in_ch=hyper_in_ch,
+ hyper_num_hidden_layers=hyper_num_hidden_layers,
+ hyper_hidden_ch=hyper_hidden_ch,
+ )
+ PreconfHyperLayer = partialclass(
+ HyperLayer,
+ hyper_in_ch=hyper_in_ch,
+ hyper_num_hidden_layers=hyper_num_hidden_layers,
+ hyper_hidden_ch=hyper_hidden_ch,
+ )
+
+ self.layers = nn.ModuleList()
+ self.layers.append(PreconfHyperLayer(in_ch=in_ch, out_ch=hidden_ch))
+
+ for i in range(num_hidden_layers):
+ self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=hidden_ch))
+
+ if outermost_linear:
+ self.layers.append(PreconfHyperLinear(in_ch=hidden_ch, out_ch=out_ch))
+ else:
+ self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=out_ch))
+
+ def forward(self, hyper_input):
+ """
+ :param hyper_input: Input to hypernetwork.
+ :return: nn.Module; Predicted fully connected neural network.
+ """
+ net = []
+ for i in range(len(self.layers)):
+ net.append(self.layers[i](hyper_input))
+
+ return nn.Sequential(*net)
+
+
+class BatchLinear(nn.Module):
+ def __init__(self, weights, biases):
+ """Implements a batch linear layer.
+
+ :param weights: Shape: (batch, out_ch, in_ch)
+ :param biases: Shape: (batch, 1, out_ch)
+ """
+ super().__init__()
+
+ self.weights = weights
+ self.biases = biases
+
+ def __repr__(self):
+ return "BatchLinear(in_ch=%d, out_ch=%d)" % (
+ self.weights.shape[-1],
+ self.weights.shape[-2],
+ )
+
+ def forward(self, input):
+ output = input.matmul(
+ self.weights.permute(
+ *[i for i in range(len(self.weights.shape) - 2)], -1, -2
+ )
+ )
+ output += self.biases
+ return output
+
+
+def last_hyper_layer_init(m) -> None:
+ if type(m) == nn.Linear:
+ nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in")
+ m.weight.data *= 1e-1
+
+
+class HyperLinear(nn.Module):
+ """A hypernetwork that predicts a single linear layer (weights & biases)."""
+
+ def __init__(
+ self, in_ch, out_ch, hyper_in_ch, hyper_num_hidden_layers, hyper_hidden_ch
+ ):
+
+ super().__init__()
+ self.in_ch = in_ch
+ self.out_ch = out_ch
+
+ self.hypo_params = pytorch_prototyping.FCBlock(
+ in_features=hyper_in_ch,
+ hidden_ch=hyper_hidden_ch,
+ num_hidden_layers=hyper_num_hidden_layers,
+ out_features=(in_ch * out_ch) + out_ch,
+ outermost_linear=True,
+ )
+ self.hypo_params[-1].apply(last_hyper_layer_init)
+
+ def forward(self, hyper_input):
+ hypo_params = self.hypo_params(hyper_input)
+
+ # Indices explicit to catch erros in shape of output layer
+ weights = hypo_params[..., : self.in_ch * self.out_ch]
+ biases = hypo_params[
+ ..., self.in_ch * self.out_ch : (self.in_ch * self.out_ch) + self.out_ch
+ ]
+
+ biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
+ weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
+
+ return BatchLinear(weights=weights, biases=biases)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e2366d545fae2306428c6c578fa29b7ec4537cb
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py
@@ -0,0 +1,773 @@
+# a copy-paste from https://raw.githubusercontent.com/vsitzmann/pytorch_prototyping/10f49b1e7df38a58fd78451eac91d7ac1a21df64/pytorch_prototyping.py
+# fmt: off
+# flake8: noqa
+
+# pyre-unsafe
+'''A number of custom pytorch modules with sane defaults that I find useful for model prototyping.
+'''
+import torch
+import torch.nn as nn
+import torchvision.utils
+from torch.nn import functional as F
+
+
+class FCLayer(nn.Module):
+ def __init__(self, in_features, out_features):
+ super().__init__()
+ self.net = nn.Sequential(
+ nn.Linear(in_features, out_features),
+ nn.LayerNorm([out_features]),
+ nn.ReLU(inplace=True),
+ )
+
+ def forward(self, input):
+ return self.net(input)
+
+
+# From https://gist.github.com/wassname/ecd2dac6fc8f9918149853d17e3abf02
+class LayerNormConv2d(nn.Module):
+ def __init__(self, num_features, eps=1e-5, affine=True):
+ super().__init__()
+ self.num_features = num_features
+ self.affine = affine
+ self.eps = eps
+
+ if self.affine:
+ self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
+ self.beta = nn.Parameter(torch.zeros(num_features))
+
+ def forward(self, x):
+ shape = [-1] + [1] * (x.dim() - 1)
+ mean = x.view(x.size(0), -1).mean(1).view(*shape)
+ std = x.view(x.size(0), -1).std(1).view(*shape)
+
+ y = (x - mean) / (std + self.eps)
+ if self.affine:
+ shape = [1, -1] + [1] * (x.dim() - 2)
+ y = self.gamma.view(*shape) * y + self.beta.view(*shape)
+ return y
+
+
+class FCBlock(nn.Module):
+ def __init__(
+ self,
+ hidden_ch,
+ num_hidden_layers,
+ in_features,
+ out_features,
+ outermost_linear=False,
+ ):
+ super().__init__()
+
+ self.net = []
+ self.net.append(FCLayer(in_features=in_features, out_features=hidden_ch))
+
+ for i in range(num_hidden_layers):
+ self.net.append(FCLayer(in_features=hidden_ch, out_features=hidden_ch))
+
+ if outermost_linear:
+ self.net.append(nn.Linear(in_features=hidden_ch, out_features=out_features))
+ else:
+ self.net.append(FCLayer(in_features=hidden_ch, out_features=out_features))
+
+ self.net = nn.Sequential(*self.net)
+ self.net.apply(self.init_weights)
+
+ def __getitem__(self, item):
+ return self.net[item]
+
+ def init_weights(self, m):
+ if type(m) == nn.Linear:
+ nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in")
+
+ def forward(self, input):
+ return self.net(input)
+
+
+class DownBlock3D(nn.Module):
+ """A 3D convolutional downsampling block."""
+
+ def __init__(self, in_channels, out_channels, norm=nn.BatchNorm3d):
+ super().__init__()
+
+ self.net = [
+ nn.ReplicationPad3d(1),
+ nn.Conv3d(
+ in_channels,
+ out_channels,
+ kernel_size=4,
+ padding=0,
+ stride=2,
+ bias=False if norm is not None else True,
+ ),
+ ]
+
+ if norm is not None:
+ self.net += [norm(out_channels, affine=True)]
+
+ self.net += [nn.LeakyReLU(0.2, True)]
+ self.net = nn.Sequential(*self.net)
+
+ def forward(self, x):
+ return self.net(x)
+
+
+class UpBlock3D(nn.Module):
+ """A 3D convolutional upsampling block."""
+
+ def __init__(self, in_channels, out_channels, norm=nn.BatchNorm3d):
+ super().__init__()
+
+ self.net = [
+ nn.ConvTranspose3d(
+ in_channels,
+ out_channels,
+ kernel_size=4,
+ stride=2,
+ padding=1,
+ bias=False if norm is not None else True,
+ ),
+ ]
+
+ if norm is not None:
+ self.net += [norm(out_channels, affine=True)]
+
+ self.net += [nn.ReLU(True)]
+ self.net = nn.Sequential(*self.net)
+
+ def forward(self, x, skipped=None):
+ if skipped is not None:
+ input = torch.cat([skipped, x], dim=1)
+ else:
+ input = x
+ return self.net(input)
+
+
+class Conv3dSame(torch.nn.Module):
+ """3D convolution that pads to keep spatial dimensions equal.
+ Cannot deal with stride. Only quadratic kernels (=scalar kernel_size).
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ bias=True,
+ padding_layer=nn.ReplicationPad3d,
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param kernel_size: Scalar. Spatial dimensions of kernel (only quadratic kernels supported).
+ :param bias: Whether or not to use bias.
+ :param padding_layer: Which padding to use. Default is reflection padding.
+ """
+ super().__init__()
+ ka = kernel_size // 2
+ kb = ka - 1 if kernel_size % 2 == 0 else ka
+ self.net = nn.Sequential(
+ padding_layer((ka, kb, ka, kb, ka, kb)),
+ nn.Conv3d(in_channels, out_channels, kernel_size, bias=bias, stride=1),
+ )
+
+ def forward(self, x):
+ return self.net(x)
+
+
+class Conv2dSame(torch.nn.Module):
+ """2D convolution that pads to keep spatial dimensions equal.
+ Cannot deal with stride. Only quadratic kernels (=scalar kernel_size).
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ bias=True,
+ padding_layer=nn.ReflectionPad2d,
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param kernel_size: Scalar. Spatial dimensions of kernel (only quadratic kernels supported).
+ :param bias: Whether or not to use bias.
+ :param padding_layer: Which padding to use. Default is reflection padding.
+ """
+ super().__init__()
+ ka = kernel_size // 2
+ kb = ka - 1 if kernel_size % 2 == 0 else ka
+ self.net = nn.Sequential(
+ padding_layer((ka, kb, ka, kb)),
+ nn.Conv2d(in_channels, out_channels, kernel_size, bias=bias, stride=1),
+ )
+
+ self.weight = self.net[1].weight
+ self.bias = self.net[1].bias
+
+ def forward(self, x):
+ return self.net(x)
+
+
+class UpBlock(nn.Module):
+ """A 2d-conv upsampling block with a variety of options for upsampling, and following best practices / with
+ reasonable defaults. (LeakyReLU, kernel size multiple of stride)
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ post_conv=True,
+ use_dropout=False,
+ dropout_prob=0.1,
+ norm=nn.BatchNorm2d,
+ upsampling_mode="transpose",
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param post_conv: Whether to have another convolutional layer after the upsampling layer.
+ :param use_dropout: bool. Whether to use dropout or not.
+ :param dropout_prob: Float. The dropout probability (if use_dropout is True)
+ :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity.
+ :param upsampling_mode: Which upsampling mode:
+ transpose: Upsampling with stride-2, kernel size 4 transpose convolutions.
+ bilinear: Feature map is upsampled with bilinear upsampling, then a conv layer.
+ nearest: Feature map is upsampled with nearest neighbor upsampling, then a conv layer.
+ shuffle: Feature map is upsampled with pixel shuffling, then a conv layer.
+ """
+ super().__init__()
+
+ net = list()
+
+ if upsampling_mode == "transpose":
+ net += [
+ nn.ConvTranspose2d(
+ in_channels,
+ out_channels,
+ kernel_size=4,
+ stride=2,
+ padding=1,
+ bias=True if norm is None else False,
+ )
+ ]
+ elif upsampling_mode == "bilinear":
+ net += [nn.UpsamplingBilinear2d(scale_factor=2)]
+ net += [
+ Conv2dSame(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ bias=True if norm is None else False,
+ )
+ ]
+ elif upsampling_mode == "nearest":
+ net += [nn.UpsamplingNearest2d(scale_factor=2)]
+ net += [
+ Conv2dSame(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ bias=True if norm is None else False,
+ )
+ ]
+ elif upsampling_mode == "shuffle":
+ net += [nn.PixelShuffle(upscale_factor=2)]
+ net += [
+ Conv2dSame(
+ in_channels // 4,
+ out_channels,
+ kernel_size=3,
+ bias=True if norm is None else False,
+ )
+ ]
+ else:
+ raise ValueError("Unknown upsampling mode!")
+
+ if norm is not None:
+ net += [norm(out_channels, affine=True)]
+
+ net += [nn.ReLU(True)]
+
+ if use_dropout:
+ net += [nn.Dropout2d(dropout_prob, False)]
+
+ if post_conv:
+ net += [
+ Conv2dSame(
+ out_channels,
+ out_channels,
+ kernel_size=3,
+ bias=True if norm is None else False,
+ )
+ ]
+
+ if norm is not None:
+ net += [norm(out_channels, affine=True)]
+
+ net += [nn.ReLU(True)]
+
+ if use_dropout:
+ net += [nn.Dropout2d(0.1, False)]
+
+ self.net = nn.Sequential(*net)
+
+ def forward(self, x, skipped=None):
+ if skipped is not None:
+ input = torch.cat([skipped, x], dim=1)
+ else:
+ input = x
+ return self.net(input)
+
+
+class DownBlock(nn.Module):
+ """A 2D-conv downsampling block following best practices / with reasonable defaults
+ (LeakyReLU, kernel size multiple of stride)
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ prep_conv=True,
+ middle_channels=None,
+ use_dropout=False,
+ dropout_prob=0.1,
+ norm=nn.BatchNorm2d,
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param prep_conv: Whether to have another convolutional layer before the downsampling layer.
+ :param middle_channels: If prep_conv is true, this sets the number of channels between the prep and downsampling
+ convs.
+ :param use_dropout: bool. Whether to use dropout or not.
+ :param dropout_prob: Float. The dropout probability (if use_dropout is True)
+ :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity.
+ """
+ super().__init__()
+
+ if middle_channels is None:
+ middle_channels = in_channels
+
+ net = list()
+
+ if prep_conv:
+ net += [
+ nn.ReflectionPad2d(1),
+ nn.Conv2d(
+ in_channels,
+ middle_channels,
+ kernel_size=3,
+ padding=0,
+ stride=1,
+ bias=True if norm is None else False,
+ ),
+ ]
+
+ if norm is not None:
+ net += [norm(middle_channels, affine=True)]
+
+ net += [nn.LeakyReLU(0.2, True)]
+
+ if use_dropout:
+ net += [nn.Dropout2d(dropout_prob, False)]
+
+ net += [
+ nn.ReflectionPad2d(1),
+ nn.Conv2d(
+ middle_channels,
+ out_channels,
+ kernel_size=4,
+ padding=0,
+ stride=2,
+ bias=True if norm is None else False,
+ ),
+ ]
+
+ if norm is not None:
+ net += [norm(out_channels, affine=True)]
+
+ net += [nn.LeakyReLU(0.2, True)]
+
+ if use_dropout:
+ net += [nn.Dropout2d(dropout_prob, False)]
+
+ self.net = nn.Sequential(*net)
+
+ def forward(self, x):
+ return self.net(x)
+
+
+class Unet3d(nn.Module):
+ """A 3d-Unet implementation with sane defaults."""
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ nf0,
+ num_down,
+ max_channels,
+ norm=nn.BatchNorm3d,
+ outermost_linear=False,
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param nf0: Number of features at highest level of U-Net
+ :param num_down: Number of downsampling stages.
+ :param max_channels: Maximum number of channels (channels multiply by 2 with every downsampling stage)
+ :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity.
+ :param outermost_linear: Whether the output layer should be a linear layer or a nonlinear one.
+ """
+ super().__init__()
+
+ assert num_down > 0, "Need at least one downsampling layer in UNet3d."
+
+ # Define the in block
+ self.in_layer = [Conv3dSame(in_channels, nf0, kernel_size=3, bias=False)]
+
+ if norm is not None:
+ self.in_layer += [norm(nf0, affine=True)]
+
+ self.in_layer += [nn.LeakyReLU(0.2, True)]
+ self.in_layer = nn.Sequential(*self.in_layer)
+
+ # Define the center UNet block. The feature map has height and width 1 --> no batchnorm.
+ self.unet_block = UnetSkipConnectionBlock3d(
+ int(min(2 ** (num_down - 1) * nf0, max_channels)),
+ int(min(2 ** (num_down - 1) * nf0, max_channels)),
+ norm=None,
+ )
+ for i in list(range(0, num_down - 1))[::-1]:
+ self.unet_block = UnetSkipConnectionBlock3d(
+ int(min(2 ** i * nf0, max_channels)),
+ int(min(2 ** (i + 1) * nf0, max_channels)),
+ submodule=self.unet_block,
+ norm=norm,
+ )
+
+ # Define the out layer. Each unet block concatenates its inputs with its outputs - so the output layer
+ # automatically receives the output of the in_layer and the output of the last unet layer.
+ self.out_layer = [
+ Conv3dSame(2 * nf0, out_channels, kernel_size=3, bias=outermost_linear)
+ ]
+
+ if not outermost_linear:
+ if norm is not None:
+ self.out_layer += [norm(out_channels, affine=True)]
+ self.out_layer += [nn.ReLU(True)]
+ self.out_layer = nn.Sequential(*self.out_layer)
+
+ def forward(self, x):
+ in_layer = self.in_layer(x)
+ unet = self.unet_block(in_layer)
+ out_layer = self.out_layer(unet)
+ return out_layer
+
+
+class UnetSkipConnectionBlock3d(nn.Module):
+ """Helper class for building a 3D unet."""
+
+ def __init__(self, outer_nc, inner_nc, norm=nn.BatchNorm3d, submodule=None):
+ super().__init__()
+
+ if submodule is None:
+ model = [
+ DownBlock3D(outer_nc, inner_nc, norm=norm),
+ UpBlock3D(inner_nc, outer_nc, norm=norm),
+ ]
+ else:
+ model = [
+ DownBlock3D(outer_nc, inner_nc, norm=norm),
+ submodule,
+ UpBlock3D(2 * inner_nc, outer_nc, norm=norm),
+ ]
+
+ self.model = nn.Sequential(*model)
+
+ def forward(self, x):
+ forward_passed = self.model(x)
+ return torch.cat([x, forward_passed], 1)
+
+
+class UnetSkipConnectionBlock(nn.Module):
+ """Helper class for building a 2D unet."""
+
+ def __init__(
+ self,
+ outer_nc,
+ inner_nc,
+ upsampling_mode,
+ norm=nn.BatchNorm2d,
+ submodule=None,
+ use_dropout=False,
+ dropout_prob=0.1,
+ ):
+ super().__init__()
+
+ if submodule is None:
+ model = [
+ DownBlock(
+ outer_nc,
+ inner_nc,
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=norm,
+ ),
+ UpBlock(
+ inner_nc,
+ outer_nc,
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=norm,
+ upsampling_mode=upsampling_mode,
+ ),
+ ]
+ else:
+ model = [
+ DownBlock(
+ outer_nc,
+ inner_nc,
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=norm,
+ ),
+ submodule,
+ UpBlock(
+ 2 * inner_nc,
+ outer_nc,
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=norm,
+ upsampling_mode=upsampling_mode,
+ ),
+ ]
+
+ self.model = nn.Sequential(*model)
+
+ def forward(self, x):
+ forward_passed = self.model(x)
+ return torch.cat([x, forward_passed], 1)
+
+
+class Unet(nn.Module):
+ """A 2d-Unet implementation with sane defaults."""
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ nf0,
+ num_down,
+ max_channels,
+ use_dropout,
+ upsampling_mode="transpose",
+ dropout_prob=0.1,
+ norm=nn.BatchNorm2d,
+ outermost_linear=False,
+ ):
+ """
+ :param in_channels: Number of input channels
+ :param out_channels: Number of output channels
+ :param nf0: Number of features at highest level of U-Net
+ :param num_down: Number of downsampling stages.
+ :param max_channels: Maximum number of channels (channels multiply by 2 with every downsampling stage)
+ :param use_dropout: Whether to use dropout or no.
+ :param dropout_prob: Dropout probability if use_dropout=True.
+ :param upsampling_mode: Which type of upsampling should be used. See "UpBlock" for documentation.
+ :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity.
+ :param outermost_linear: Whether the output layer should be a linear layer or a nonlinear one.
+ """
+ super().__init__()
+
+ assert num_down > 0, "Need at least one downsampling layer in UNet."
+
+ # Define the in block
+ self.in_layer = [
+ Conv2dSame(
+ in_channels, nf0, kernel_size=3, bias=True if norm is None else False
+ )
+ ]
+ if norm is not None:
+ self.in_layer += [norm(nf0, affine=True)]
+ self.in_layer += [nn.LeakyReLU(0.2, True)]
+
+ if use_dropout:
+ self.in_layer += [nn.Dropout2d(dropout_prob)]
+ self.in_layer = nn.Sequential(*self.in_layer)
+
+ # Define the center UNet block
+ self.unet_block = UnetSkipConnectionBlock(
+ min(2 ** (num_down - 1) * nf0, max_channels),
+ min(2 ** (num_down - 1) * nf0, max_channels),
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=None, # Innermost has no norm (spatial dimension 1)
+ upsampling_mode=upsampling_mode,
+ )
+
+ for i in list(range(0, num_down - 1))[::-1]:
+ self.unet_block = UnetSkipConnectionBlock(
+ min(2 ** i * nf0, max_channels),
+ min(2 ** (i + 1) * nf0, max_channels),
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ submodule=self.unet_block,
+ norm=norm,
+ upsampling_mode=upsampling_mode,
+ )
+
+ # Define the out layer. Each unet block concatenates its inputs with its outputs - so the output layer
+ # automatically receives the output of the in_layer and the output of the last unet layer.
+ self.out_layer = [
+ Conv2dSame(
+ 2 * nf0,
+ out_channels,
+ kernel_size=3,
+ bias=outermost_linear or (norm is None),
+ )
+ ]
+
+ if not outermost_linear:
+ if norm is not None:
+ self.out_layer += [norm(out_channels, affine=True)]
+ self.out_layer += [nn.ReLU(True)]
+
+ if use_dropout:
+ self.out_layer += [nn.Dropout2d(dropout_prob)]
+ self.out_layer = nn.Sequential(*self.out_layer)
+
+ self.out_layer_weight = self.out_layer[0].weight
+
+ def forward(self, x):
+ in_layer = self.in_layer(x)
+ unet = self.unet_block(in_layer)
+ out_layer = self.out_layer(unet)
+ return out_layer
+
+
+class Identity(nn.Module):
+ """Helper module to allow Downsampling and Upsampling nets to default to identity if they receive an empty list."""
+
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, input):
+ return input
+
+
+class DownsamplingNet(nn.Module):
+ """A subnetwork that downsamples a 2D feature map with strided convolutions."""
+
+ def __init__(
+ self,
+ per_layer_out_ch,
+ in_channels,
+ use_dropout,
+ dropout_prob=0.1,
+ last_layer_one=False,
+ norm=nn.BatchNorm2d,
+ ):
+ """
+ :param per_layer_out_ch: python list of integers. Defines the number of output channels per layer. Length of
+ list defines number of downsampling steps (each step dowsamples by factor of 2.)
+ :param in_channels: Number of input channels.
+ :param use_dropout: Whether or not to use dropout.
+ :param dropout_prob: Dropout probability.
+ :param last_layer_one: Whether the output of the last layer will have a spatial size of 1. In that case,
+ the last layer will not have batchnorm, else, it will.
+ :param norm: Which norm to use. Defaults to BatchNorm.
+ """
+ super().__init__()
+
+ if not len(per_layer_out_ch):
+ self.downs = Identity()
+ else:
+ self.downs = list()
+ self.downs.append(
+ DownBlock(
+ in_channels,
+ per_layer_out_ch[0],
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ middle_channels=per_layer_out_ch[0],
+ norm=norm,
+ )
+ )
+ for i in range(0, len(per_layer_out_ch) - 1):
+ if last_layer_one and (i == len(per_layer_out_ch) - 2):
+ norm = None
+ self.downs.append(
+ DownBlock(
+ per_layer_out_ch[i],
+ per_layer_out_ch[i + 1],
+ dropout_prob=dropout_prob,
+ use_dropout=use_dropout,
+ norm=norm,
+ )
+ )
+ self.downs = nn.Sequential(*self.downs)
+
+ def forward(self, input):
+ return self.downs(input)
+
+
+class UpsamplingNet(nn.Module):
+ """A subnetwork that upsamples a 2D feature map with a variety of upsampling options."""
+
+ def __init__(
+ self,
+ per_layer_out_ch,
+ in_channels,
+ upsampling_mode,
+ use_dropout,
+ dropout_prob=0.1,
+ first_layer_one=False,
+ norm=nn.BatchNorm2d,
+ ):
+ """
+ :param per_layer_out_ch: python list of integers. Defines the number of output channels per layer. Length of
+ list defines number of upsampling steps (each step upsamples by factor of 2.)
+ :param in_channels: Number of input channels.
+ :param upsampling_mode: Mode of upsampling. For documentation, see class "UpBlock"
+ :param use_dropout: Whether or not to use dropout.
+ :param dropout_prob: Dropout probability.
+ :param first_layer_one: Whether the input to the last layer will have a spatial size of 1. In that case,
+ the first layer will not have a norm, else, it will.
+ :param norm: Which norm to use. Defaults to BatchNorm.
+ """
+ super().__init__()
+
+ if not len(per_layer_out_ch):
+ self.ups = Identity()
+ else:
+ self.ups = list()
+ self.ups.append(
+ UpBlock(
+ in_channels,
+ per_layer_out_ch[0],
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=None if first_layer_one else norm,
+ upsampling_mode=upsampling_mode,
+ )
+ )
+ for i in range(0, len(per_layer_out_ch) - 1):
+ self.ups.append(
+ UpBlock(
+ per_layer_out_ch[i],
+ per_layer_out_ch[i + 1],
+ use_dropout=use_dropout,
+ dropout_prob=dropout_prob,
+ norm=norm,
+ upsampling_mode=upsampling_mode,
+ )
+ )
+ self.ups = nn.Sequential(*self.ups)
+
+ def forward(self, input):
+ return self.ups(input)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f6cd1a05ee2f0a691fa1540f5dc454282e7662c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py
@@ -0,0 +1,240 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import warnings
+from dataclasses import dataclass
+from math import pi
+from typing import Optional
+
+import torch
+
+
+def get_rotation_to_best_fit_xy(
+ points: torch.Tensor, centroid: Optional[torch.Tensor] = None
+) -> torch.Tensor:
+ """
+ Returns a rotation R such that `points @ R` has a best fit plane
+ parallel to the xy plane
+
+ Args:
+ points: (*, N, 3) tensor of points in 3D
+ centroid: (*, 1, 3), (3,) or scalar: their centroid
+
+ Returns:
+ (*, 3, 3) tensor rotation matrix
+ """
+ if centroid is None:
+ centroid = points.mean(dim=-2, keepdim=True)
+
+ points_centered = points - centroid
+ _, evec = torch.linalg.eigh(points_centered.transpose(-1, -2) @ points_centered)
+ # in general, evec can form either right- or left-handed basis,
+ # but we need the former to have a proper rotation (not reflection)
+ return torch.cat(
+ (evec[..., 1:], torch.cross(evec[..., 1], evec[..., 2])[..., None]), dim=-1
+ )
+
+
+def _signed_area(path: torch.Tensor) -> torch.Tensor:
+ """
+ Calculates the signed area / Lévy area of a 2D path. If the path is closed,
+ i.e. ends where it starts, this is the integral of the winding number over
+ the whole plane. If not, consider a closed path made by adding a straight
+ line from the end to the start; the signed area is the integral of the
+ winding number (also over the plane) with respect to that closed path.
+
+ If this number is positive, it indicates in some sense that the path
+ turns anticlockwise more than clockwise, and vice versa.
+
+ Args:
+ path: N x 2 tensor of points.
+
+ Returns:
+ signed area, shape ()
+ """
+ # This calculation is a sum of areas of triangles of the form
+ # (path[0], path[i], path[i+1]), where each triangle is half a
+ # parallelogram.
+ x, y = (path[1:] - path[:1]).unbind(1)
+ return (y[1:] * x[:-1] - x[1:] * y[:-1]).sum() * 0.5
+
+
+@dataclass(frozen=True)
+class Circle2D:
+ """
+ Contains details of a circle in a plane.
+ Members
+ center: tensor shape (2,)
+ radius: tensor shape ()
+ generated_points: points around the circle, shape (n_points, 2)
+ """
+
+ center: torch.Tensor
+ radius: torch.Tensor
+ generated_points: torch.Tensor
+
+
+def fit_circle_in_2d(
+ points2d, *, n_points: int = 0, angles: Optional[torch.Tensor] = None
+) -> Circle2D:
+ """
+ Simple best fitting of a circle to 2D points. In particular, the circle which
+ minimizes the sum of the squares of the squared-distances to the circle.
+
+ Finds (a,b) and r to minimize the sum of squares (over the x,y pairs) of
+ r**2 - [(x-a)**2+(y-b)**2]
+ i.e.
+ (2*a)*x + (2*b)*y + (r**2 - a**2 - b**2)*1 - (x**2 + y**2)
+
+ In addition, generates points along the circle. If angles is None (default)
+ then n_points around the circle equally spaced are given. These begin at the
+ point closest to the first input point. They continue in the direction which
+ seems to match the movement of points in points2d, as judged by its
+ signed area. If `angles` are provided, then n_points is ignored, and points
+ along the circle at the given angles are returned, with the starting point
+ and direction as before.
+
+ (Note that `generated_points` is affected by the order of the points in
+ points2d, but the other outputs are not.)
+
+ Args:
+ points2d: N x 2 tensor of 2D points
+ n_points: number of points to generate on the circle, if angles not given
+ angles: optional angles in radians of points to generate.
+
+ Returns:
+ Circle2D object
+ """
+ design = torch.cat([points2d, torch.ones_like(points2d[:, :1])], dim=1)
+ rhs = (points2d**2).sum(1)
+ n_provided = points2d.shape[0]
+ if n_provided < 3:
+ raise ValueError(f"{n_provided} points are not enough to determine a circle")
+ solution = torch.linalg.lstsq(design, rhs[:, None]).solution
+ center = solution[:2, 0] / 2
+ radius = torch.sqrt(solution[2, 0] + (center**2).sum())
+ if n_points > 0:
+ if angles is not None:
+ warnings.warn("n_points ignored because angles provided")
+ else:
+ angles = torch.linspace(0, 2 * pi, n_points, device=points2d.device)
+
+ if angles is not None:
+ initial_direction_xy = (points2d[0] - center).unbind()
+ initial_angle = torch.atan2(initial_direction_xy[1], initial_direction_xy[0])
+ with torch.no_grad():
+ anticlockwise = _signed_area(points2d) > 0
+ if anticlockwise:
+ use_angles = initial_angle + angles
+ else:
+ use_angles = initial_angle - angles
+ generated_points = center[None] + radius * torch.stack(
+ [torch.cos(use_angles), torch.sin(use_angles)], dim=-1
+ )
+ else:
+ generated_points = points2d.new_zeros(0, 2)
+ return Circle2D(center=center, radius=radius, generated_points=generated_points)
+
+
+@dataclass(frozen=True)
+class Circle3D:
+ """
+ Contains details of a circle in 3D.
+ Members
+ center: tensor shape (3,)
+ radius: tensor shape ()
+ normal: tensor shape (3,)
+ generated_points: points around the circle, shape (n_points, 3)
+ """
+
+ center: torch.Tensor
+ radius: torch.Tensor
+ normal: torch.Tensor
+ generated_points: torch.Tensor
+
+
+def fit_circle_in_3d(
+ points,
+ *,
+ n_points: int = 0,
+ angles: Optional[torch.Tensor] = None,
+ offset: Optional[torch.Tensor] = None,
+ up: Optional[torch.Tensor] = None,
+) -> Circle3D:
+ """
+ Simple best fit circle to 3D points. Uses circle_2d in the
+ least-squares best fit plane.
+
+ In addition, generates points along the circle. If angles is None (default)
+ then n_points around the circle equally spaced are given. These begin at the
+ point closest to the first input point. They continue in the direction which
+ seems to be match the movement of points. If angles is provided, then n_points
+ is ignored, and points along the circle at the given angles are returned,
+ with the starting point and direction as before.
+
+ Further, an offset can be given to add to the generated points; this is
+ interpreted in a rotated coordinate system where (0, 0, 1) is normal to the
+ circle, specifically the normal which is approximately in the direction of a
+ given `up` vector. The remaining rotation is disambiguated in an unspecified
+ but deterministic way.
+
+ (Note that `generated_points` is affected by the order of the points in
+ points, but the other outputs are not.)
+
+ Args:
+ points2d: N x 3 tensor of 3D points
+ n_points: number of points to generate on the circle
+ angles: optional angles in radians of points to generate.
+ offset: optional tensor (3,), a displacement expressed in a "canonical"
+ coordinate system to add to the generated points.
+ up: optional tensor (3,), a vector which helps define the
+ "canonical" coordinate system for interpretting `offset`.
+ Required if offset is used.
+
+
+ Returns:
+ Circle3D object
+ """
+ centroid = points.mean(0)
+ r = get_rotation_to_best_fit_xy(points, centroid)
+ normal = r[:, 2]
+ rotated_points = (points - centroid) @ r
+ result_2d = fit_circle_in_2d(
+ rotated_points[:, :2], n_points=n_points, angles=angles
+ )
+ center_3d = result_2d.center @ r[:, :2].t() + centroid
+ n_generated_points = result_2d.generated_points.shape[0]
+ if n_generated_points > 0:
+ generated_points_in_plane = torch.cat(
+ [
+ result_2d.generated_points,
+ torch.zeros_like(result_2d.generated_points[:, :1]),
+ ],
+ dim=1,
+ )
+ if offset is not None:
+ if up is None:
+ raise ValueError("Missing `up` input for interpreting offset")
+ with torch.no_grad():
+ swap = torch.dot(up, normal) < 0
+ if swap:
+ # We need some rotation which takes +z to -z. Here's one.
+ generated_points_in_plane += offset * offset.new_tensor([1, -1, -1])
+ else:
+ generated_points_in_plane += offset
+
+ generated_points = generated_points_in_plane @ r.t() + centroid
+ else:
+ generated_points = points.new_zeros(0, 3)
+
+ return Circle3D(
+ radius=result_2d.radius,
+ center=center_3d,
+ normal=normal,
+ generated_points=generated_points,
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/config.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..4479fe65562fce952d39f15d520285650977299c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/config.py
@@ -0,0 +1,1210 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import dataclasses
+import inspect
+import itertools
+import sys
+import warnings
+from collections import Counter, defaultdict
+from enum import Enum
+from functools import partial
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ get_args,
+ get_origin,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+from omegaconf import DictConfig, OmegaConf, open_dict
+
+
+"""
+This functionality allows a configurable system to be determined in a dataclass-type
+way. It is a generalization of omegaconf's "structured", in the dataclass case.
+Core functionality:
+
+- Configurable -- A base class used to label a class as being one which uses this
+ system. Uses class members and __post_init__ like a dataclass.
+
+- expand_args_fields -- Expands a class like `dataclasses.dataclass`. Runs automatically.
+
+- get_default_args -- gets an omegaconf.DictConfig for initializing a given class.
+
+- run_auto_creation -- Initialises nested members. To be called in __post_init__.
+
+
+In addition, a Configurable may contain members whose type is decided at runtime.
+
+- ReplaceableBase -- As a base instead of Configurable, labels a class to say that
+ any child class can be used instead.
+
+- registry -- A global store of named child classes of ReplaceableBase classes.
+ Used as `@registry.register` decorator on class definition.
+
+
+Additional utility functions:
+
+- remove_unused_components -- used for simplifying a DictConfig instance.
+- get_default_args_field -- default for DictConfig member of another configurable.
+- enable_get_default_args -- Allows get_default_args on a function or plain class.
+
+
+1. The simplest usage of this functionality is as follows. First a schema is defined
+in dataclass style.
+
+ class A(Configurable):
+ n: int = 9
+
+ class B(Configurable):
+ a: A
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+Then it can be used like
+
+ b_args = get_default_args(B)
+ b = B(**b_args)
+
+In this case, get_default_args(B) returns an omegaconf.DictConfig with the right
+members {"a_args": {"n": 9}}. It also modifies the definitions of the classes to
+something like the following. (The modification itself is done by the function
+`expand_args_fields`, which is called inside `get_default_args`.)
+
+ @dataclasses.dataclass
+ class A:
+ n: int = 9
+
+ @dataclasses.dataclass
+ class B:
+ a_args: DictConfig = dataclasses.field(default_factory=lambda: DictConfig({"n": 9}))
+
+ def __post_init__(self):
+ self.a = A(**self.a_args)
+
+2. Pluggability. Instead of a dataclass-style member being given a concrete class,
+it can be given a base class and the implementation will be looked up by name in the
+global `registry` in this module. E.g.
+
+ class A(ReplaceableBase):
+ k: int = 1
+
+ @registry.register
+ class A1(A):
+ m: int = 3
+
+ @registry.register
+ class A2(A):
+ n: str = "2"
+
+ class B(Configurable):
+ a: A
+ a_class_type: str = "A2"
+ b: Optional[A]
+ b_class_type: Optional[str] = "A2"
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+will expand to
+
+ @dataclasses.dataclass
+ class A:
+ k: int = 1
+
+ @dataclasses.dataclass
+ class A1(A):
+ m: int = 3
+
+ @dataclasses.dataclass
+ class A2(A):
+ n: str = "2"
+
+ @dataclasses.dataclass
+ class B:
+ a_class_type: str = "A2"
+ a_A1_args: DictConfig = dataclasses.field(
+ default_factory=lambda: DictConfig({"k": 1, "m": 3}
+ )
+ a_A2_args: DictConfig = dataclasses.field(
+ default_factory=lambda: DictConfig({"k": 1, "n": 2}
+ )
+ b_class_type: Optional[str] = "A2"
+ b_A1_args: DictConfig = dataclasses.field(
+ default_factory=lambda: DictConfig({"k": 1, "m": 3}
+ )
+ b_A2_args: DictConfig = dataclasses.field(
+ default_factory=lambda: DictConfig({"k": 1, "n": 2}
+ )
+
+ def __post_init__(self):
+ if self.a_class_type == "A1":
+ self.a = A1(**self.a_A1_args)
+ elif self.a_class_type == "A2":
+ self.a = A2(**self.a_A2_args)
+ else:
+ raise ValueError(...)
+
+ if self.b_class_type is None:
+ self.b = None
+ elif self.b_class_type == "A1":
+ self.b = A1(**self.b_A1_args)
+ elif self.b_class_type == "A2":
+ self.b = A2(**self.b_A2_args)
+ else:
+ raise ValueError(...)
+
+3. Aside from these classes, the members of these classes should be things
+which DictConfig is happy with: e.g. (bool, int, str, None, float) and what
+can be built from them with `DictConfig`s and lists of them.
+
+In addition, you can call `get_default_args` on a function or class to get
+the `DictConfig` of its defaulted arguments, assuming those are all things
+which `DictConfig` is happy with, so long as you add a call to
+`enable_get_default_args` after its definition. If you want to use such a
+thing as the default for a member of another configured class,
+`get_default_args_field` is a helper.
+"""
+
+
+TYPE_SUFFIX: str = "_class_type"
+ARGS_SUFFIX: str = "_args"
+ENABLED_SUFFIX: str = "_enabled"
+CREATE_PREFIX: str = "create_"
+IMPL_SUFFIX: str = "_impl"
+TWEAK_SUFFIX: str = "_tweak_args"
+_DATACLASS_INIT: str = "__dataclass_own_init__"
+PRE_EXPAND_NAME: str = "pre_expand"
+
+
+class ReplaceableBase:
+ """
+ Base class for a class (a "replaceable") which is a base class for
+ dataclass-style implementations. The implementations can be stored
+ in the registry. They get expanded into dataclasses with expand_args_fields.
+ This expansion is delayed.
+ """
+
+ def __new__(cls, *args, **kwargs):
+ """
+ These classes should be expanded only when needed (because processing
+ fixes the list of replaceable subclasses of members of the class). It
+ is safer if users expand the classes explicitly. But if the class gets
+ instantiated when it hasn't been processed, we expand it here.
+ """
+ obj = super().__new__(cls)
+ if cls is not ReplaceableBase and not _is_actually_dataclass(cls):
+ expand_args_fields(cls)
+ return obj
+
+
+class Configurable:
+ """
+ Base class for dataclass-style classes which are not replaceable. These get
+ expanded into a dataclass with expand_args_fields.
+ This expansion is delayed.
+ """
+
+ def __new__(cls, *args, **kwargs):
+ """
+ These classes should be expanded only when needed (because processing
+ fixes the list of replaceable subclasses of members of the class). It
+ is safer if users expand the classes explicitly. But if the class gets
+ instantiated when it hasn't been processed, we expand it here.
+ """
+ obj = super().__new__(cls)
+ if cls is not Configurable and not _is_actually_dataclass(cls):
+ expand_args_fields(cls)
+ return obj
+
+
+_X = TypeVar("X", bound=ReplaceableBase)
+_Y = TypeVar("Y", bound=Union[ReplaceableBase, Configurable])
+
+
+class _Registry:
+ """
+ Register from names to classes. In particular, we say that direct subclasses of
+ ReplaceableBase are "base classes" and we register subclasses of each base class
+ in a separate namespace.
+ """
+
+ def __init__(self) -> None:
+ self._mapping: Dict[Type[ReplaceableBase], Dict[str, Type[ReplaceableBase]]] = (
+ defaultdict(dict)
+ )
+
+ def register(self, some_class: Type[_X]) -> Type[_X]:
+ """
+ A class decorator, to register a class in self.
+ """
+ name = some_class.__name__
+ self._register(some_class, name=name)
+ return some_class
+
+ def _register(
+ self,
+ some_class: Type[ReplaceableBase],
+ *,
+ base_class: Optional[Type[ReplaceableBase]] = None,
+ name: str,
+ ) -> None:
+ """
+ Register a new member.
+
+ Args:
+ cls: the new member
+ base_class: (optional) what the new member is a type for
+ name: name for the new member
+ """
+ if base_class is None:
+ base_class = self._base_class_from_class(some_class)
+ if base_class is None:
+ raise ValueError(
+ f"Cannot register {some_class}. Cannot tell what it is."
+ )
+ self._mapping[base_class][name] = some_class
+
+ def get(self, base_class_wanted: Type[_X], name: str) -> Type[_X]:
+ """
+ Retrieve a class from the registry by name
+
+ Args:
+ base_class_wanted: parent type of type we are looking for.
+ It determines the namespace.
+ This will typically be a direct subclass of ReplaceableBase.
+ name: what to look for
+
+ Returns:
+ class type
+ """
+ if self._is_base_class(base_class_wanted):
+ base_class = base_class_wanted
+ else:
+ base_class = self._base_class_from_class(base_class_wanted)
+ if base_class is None:
+ raise ValueError(
+ f"Cannot look up {base_class_wanted}. Cannot tell what it is."
+ )
+ if not isinstance(name, str):
+ raise ValueError(
+ f"Cannot look up a {type(name)} in the registry. Got {name}."
+ )
+ result = self._mapping[base_class].get(name)
+ if result is None:
+ raise ValueError(f"{name} has not been registered.")
+ if not issubclass(result, base_class_wanted):
+ raise ValueError(
+ f"{name} resolves to {result} which does not subclass {base_class_wanted}"
+ )
+ # pyre-ignore[7]
+ return result
+
+ def get_all(
+ self, base_class_wanted: Type[ReplaceableBase]
+ ) -> List[Type[ReplaceableBase]]:
+ """
+ Retrieve all registered implementations from the registry
+
+ Args:
+ base_class_wanted: parent type of type we are looking for.
+ It determines the namespace.
+ This will typically be a direct subclass of ReplaceableBase.
+ Returns:
+ list of class types in alphabetical order of registered name.
+ """
+ if self._is_base_class(base_class_wanted):
+ source = self._mapping[base_class_wanted]
+ return [source[key] for key in sorted(source)]
+
+ base_class = self._base_class_from_class(base_class_wanted)
+ if base_class is None:
+ raise ValueError(
+ f"Cannot look up {base_class_wanted}. Cannot tell what it is."
+ )
+ source = self._mapping[base_class]
+ return [
+ source[key]
+ for key in sorted(source)
+ if issubclass(source[key], base_class_wanted)
+ and source[key] is not base_class_wanted
+ ]
+
+ @staticmethod
+ def _is_base_class(some_class: Type[ReplaceableBase]) -> bool:
+ """
+ Return whether the given type is a direct subclass of ReplaceableBase
+ and so gets used as a namespace.
+ """
+ return ReplaceableBase in some_class.__bases__
+
+ @staticmethod
+ def _base_class_from_class(
+ some_class: Type[ReplaceableBase],
+ ) -> Optional[Type[ReplaceableBase]]:
+ """
+ Find the parent class of some_class which inherits ReplaceableBase, or None
+ """
+ for base in some_class.mro()[-3::-1]:
+ if base is not ReplaceableBase and issubclass(base, ReplaceableBase):
+ return base
+ return None
+
+
+# Global instance of the registry
+registry = _Registry()
+
+
+class _ProcessType(Enum):
+ """
+ Type of member which gets rewritten by expand_args_fields.
+ """
+
+ CONFIGURABLE = 1
+ REPLACEABLE = 2
+ OPTIONAL_CONFIGURABLE = 3
+ OPTIONAL_REPLACEABLE = 4
+
+
+def _default_create(
+ name: str, type_: Type, process_type: _ProcessType
+) -> Callable[[Any], None]:
+ """
+ Return the default creation function for a member. This is a function which
+ could be called in __post_init__ to initialise the member, and will be called
+ from run_auto_creation.
+
+ Args:
+ name: name of the member
+ type_: type of the member (with any Optional removed)
+ process_type: Shows whether member's declared type inherits ReplaceableBase,
+ in which case the actual type to be created is decided at
+ runtime.
+
+ Returns:
+ Function taking one argument, the object whose member should be
+ initialized, i.e. self.
+ """
+ impl_name = f"{CREATE_PREFIX}{name}{IMPL_SUFFIX}"
+
+ def inner(self):
+ expand_args_fields(type_)
+ impl = getattr(self, impl_name)
+ args = getattr(self, name + ARGS_SUFFIX)
+ impl(True, args)
+
+ def inner_optional(self):
+ expand_args_fields(type_)
+ impl = getattr(self, impl_name)
+ enabled = getattr(self, name + ENABLED_SUFFIX)
+ args = getattr(self, name + ARGS_SUFFIX)
+ impl(enabled, args)
+
+ def inner_pluggable(self):
+ type_name = getattr(self, name + TYPE_SUFFIX)
+ impl = getattr(self, impl_name)
+ if type_name is None:
+ args = None
+ else:
+ args = getattr(self, f"{name}_{type_name}{ARGS_SUFFIX}", None)
+ impl(type_name, args)
+
+ if process_type == _ProcessType.OPTIONAL_CONFIGURABLE:
+ return inner_optional
+ return inner if process_type == _ProcessType.CONFIGURABLE else inner_pluggable
+
+
+def _default_create_impl(
+ name: str, type_: Type, process_type: _ProcessType
+) -> Callable[[Any, Any, DictConfig], None]:
+ """
+ Return the default internal function for initialising a member. This is a function
+ which could be called in the create_ function to initialise the member.
+
+ Args:
+ name: name of the member
+ type_: type of the member (with any Optional removed)
+ process_type: Shows whether member's declared type inherits ReplaceableBase,
+ in which case the actual type to be created is decided at
+ runtime.
+
+ Returns:
+ Function taking
+ - self, the object whose member should be initialized.
+ - option for what to do. This is
+ - for pluggables, the type to initialise or None to do nothing
+ - for non pluggables, a bool indicating whether to initialise.
+ - the args for initializing the member.
+ """
+
+ def create_configurable(self, enabled, args):
+ if enabled:
+ expand_args_fields(type_)
+ setattr(self, name, type_(**args))
+ else:
+ setattr(self, name, None)
+
+ def create_pluggable(self, type_name, args):
+ if type_name is None:
+ setattr(self, name, None)
+ return
+
+ if not isinstance(type_name, str):
+ raise ValueError(
+ f"A {type(type_name)} was received as the type of {name}."
+ + f" Perhaps this is from {name}{TYPE_SUFFIX}?"
+ )
+ chosen_class = registry.get(type_, type_name)
+ if self._known_implementations.get(type_name, chosen_class) is not chosen_class:
+ # If this warning is raised, it means that a new definition of
+ # the chosen class has been registered since our class was processed
+ # (i.e. expanded). A DictConfig which comes from our get_default_args
+ # (which might have triggered the processing) will contain the old default
+ # values for the members of the chosen class. Changes to those defaults which
+ # were made in the redefinition will not be reflected here.
+ warnings.warn(f"New implementation of {type_name} is being chosen.")
+ expand_args_fields(chosen_class)
+ setattr(self, name, chosen_class(**args))
+
+ if process_type in (_ProcessType.CONFIGURABLE, _ProcessType.OPTIONAL_CONFIGURABLE):
+ return create_configurable
+ return create_pluggable
+
+
+def run_auto_creation(self: Any) -> None:
+ """
+ Run all the functions named in self._creation_functions.
+ """
+ for create_function in self._creation_functions:
+ getattr(self, create_function)()
+
+
+def _is_configurable_class(C) -> bool:
+ return isinstance(C, type) and issubclass(C, (Configurable, ReplaceableBase))
+
+
+def get_default_args(C, *, _do_not_process: Tuple[type, ...] = ()) -> DictConfig:
+ """
+ Get the DictConfig corresponding to the defaults in a dataclass or
+ configurable. Normal use is to provide a dataclass can be provided as C.
+ If enable_get_default_args has been called on a function or plain class,
+ then that function or class can be provided as C.
+
+ If C is a subclass of Configurable or ReplaceableBase, we make sure
+ it has been processed with expand_args_fields.
+
+ Args:
+ C: the class or function to be processed
+ _do_not_process: (internal use) When this function is called from
+ expand_args_fields, we specify any class currently being
+ processed, to make sure we don't try to process a class
+ while it is already being processed.
+
+ Returns:
+ new DictConfig object, which is typed.
+ """
+ if C is None:
+ return DictConfig({})
+
+ if _is_configurable_class(C):
+ if C in _do_not_process:
+ raise ValueError(
+ f"Internal recursion error. Need processed {C},"
+ f" but cannot get it. _do_not_process={_do_not_process}"
+ )
+ # This is safe to run multiple times. It will return
+ # straight away if C has already been processed.
+ expand_args_fields(C, _do_not_process=_do_not_process)
+
+ if dataclasses.is_dataclass(C):
+ # Note that if get_default_args_field is used somewhere in C,
+ # this call is recursive. No special care is needed,
+ # because in practice get_default_args_field is used for
+ # separate types than the outer type.
+
+ try:
+ out: DictConfig = OmegaConf.structured(C)
+ except Exception:
+ print(f"### OmegaConf.structured({C}) failed ###")
+ # We don't use `raise From` here, because that gets the original
+ # exception hidden by the OC_CAUSE logic in the case where we are
+ # called by hydra.
+ raise
+ exclude = getattr(C, "_processed_members", ())
+ with open_dict(out):
+ for field in exclude:
+ out.pop(field, None)
+ return out
+
+ if _is_configurable_class(C):
+ raise ValueError(f"Failed to process {C}")
+
+ if not inspect.isfunction(C) and not inspect.isclass(C):
+ raise ValueError(f"Unexpected {C}")
+
+ dataclass_name = _dataclass_name_for_function(C)
+ dataclass = getattr(sys.modules[C.__module__], dataclass_name, None)
+ if dataclass is None:
+ raise ValueError(
+ f"Cannot get args for {C}. Was enable_get_default_args forgotten?"
+ )
+
+ try:
+ out: DictConfig = OmegaConf.structured(dataclass)
+ except Exception:
+ print(f"### OmegaConf.structured failed for {C.__name__} ###")
+ raise
+ return out
+
+
+def _dataclass_name_for_function(C: Any) -> str:
+ """
+ Returns the name of the dataclass which enable_get_default_args(C)
+ creates.
+ """
+ name = f"_{C.__name__}_default_args_"
+ return name
+
+
+def _field_annotations_for_default_args(
+ C: Any,
+) -> List[Tuple[str, Any, dataclasses.Field]]:
+ """
+ If C is a function or a plain class with an __init__ function,
+ return the fields which `enable_get_default_args(C)` will need
+ to make a dataclass with.
+
+ Args:
+ C: a function, or a class with an __init__ function. Must
+ have types for all its defaulted args.
+
+ Returns:
+ a list of fields for a dataclass.
+ """
+
+ field_annotations = []
+ for pname, defval in _params_iter(C):
+ default = defval.default
+ if default == inspect.Parameter.empty:
+ # we do not have a default value for the parameter
+ continue
+
+ if defval.annotation == inspect._empty:
+ raise ValueError(
+ "All arguments of the input to enable_get_default_args have to"
+ f" be typed. Argument '{pname}' does not have a type annotation."
+ )
+
+ _, annotation = _resolve_optional(defval.annotation)
+
+ if isinstance(default, set): # force OmegaConf to convert it to ListConfig
+ default = tuple(default)
+
+ if isinstance(default, (list, dict)):
+ # OmegaConf will convert to [Dict|List]Config, so it is safe to reuse the value
+ field_ = dataclasses.field(default_factory=lambda default=default: default)
+ elif not _is_immutable_type(annotation, default):
+ continue
+ else:
+ # we can use a simple default argument for dataclass.field
+ field_ = dataclasses.field(default=default)
+ field_annotations.append((pname, defval.annotation, field_))
+
+ return field_annotations
+
+
+def enable_get_default_args(C: Any, *, overwrite: bool = True) -> None:
+ """
+ If C is a function or a plain class with an __init__ function,
+ and you want get_default_args(C) to work, then add
+ `enable_get_default_args(C)` straight after the definition of C.
+ This makes a dataclass corresponding to the default arguments of C
+ and stores it in the same module as C.
+
+ Args:
+ C: a function, or a class with an __init__ function. Must
+ have types for all its defaulted args.
+ overwrite: whether to allow calling this a second time on
+ the same function.
+ """
+ if not inspect.isfunction(C) and not inspect.isclass(C):
+ raise ValueError(f"Unexpected {C}")
+
+ field_annotations = _field_annotations_for_default_args(C)
+
+ name = _dataclass_name_for_function(C)
+ module = sys.modules[C.__module__]
+ if hasattr(module, name):
+ if overwrite:
+ warnings.warn(f"Overwriting {name} in {C.__module__}.")
+ else:
+ raise ValueError(f"Cannot overwrite {name} in {C.__module__}.")
+ dc = dataclasses.make_dataclass(name, field_annotations)
+ dc.__module__ = C.__module__
+ setattr(module, name, dc)
+
+
+def _params_iter(C):
+ """Returns dict of keyword args of a class or function C."""
+ if inspect.isclass(C):
+ return itertools.islice( # exclude `self`
+ inspect.signature(C.__init__).parameters.items(), 1, None
+ )
+
+ return inspect.signature(C).parameters.items()
+
+
+def _is_immutable_type(type_: Type, val: Any) -> bool:
+ if val is None:
+ return True
+
+ PRIMITIVE_TYPES = (int, float, bool, str, bytes, tuple)
+ # sometimes type can be too relaxed (e.g. Any), so we also check values
+ if isinstance(val, PRIMITIVE_TYPES):
+ return True
+
+ return type_ in PRIMITIVE_TYPES or (
+ inspect.isclass(type_) and issubclass(type_, Enum)
+ )
+
+
+# copied from OmegaConf
+def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
+ """Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
+ if get_origin(type_) is Union:
+ args = get_args(type_)
+ if len(args) == 2 and args[1] == type(None): # noqa E721
+ return True, args[0]
+ if type_ is Any:
+ return True, Any
+
+ return False, type_
+
+
+def _is_actually_dataclass(some_class) -> bool:
+ # Return whether the class some_class has been processed with
+ # the dataclass annotation. This is more specific than
+ # dataclasses.is_dataclass which returns True on anything
+ # deriving from a dataclass.
+
+ # Checking for __init__ would also work for our purpose.
+ return "__dataclass_fields__" in some_class.__dict__
+
+
+def expand_args_fields(
+ some_class: Type[_Y], *, _do_not_process: Tuple[type, ...] = ()
+) -> Type[_Y]:
+ """
+ This expands a class which inherits Configurable or ReplaceableBase classes,
+ including dataclass processing. some_class is modified in place by this function.
+ If expand_args_fields(some_class) has already been called, subsequent calls do
+ nothing and return some_class unmodified.
+ For classes of type ReplaceableBase, you can add some_class to the registry before
+ or after calling this function. But potential inner classes need to be registered
+ before this function is run on the outer class.
+
+ The transformations this function makes, before the concluding
+ dataclasses.dataclass, are as follows. If X is a base class with registered
+ subclasses Y and Z, replace a class member
+
+ x: X
+
+ and optionally
+
+ x_class_type: str = "Y"
+ def create_x(self):...
+
+ with
+
+ x_Y_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Y))
+ x_Z_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Z))
+ def create_x(self):
+ args = self.getattr(f"x_{self.x_class_type}_args")
+ self.create_x_impl(self.x_class_type, args)
+ def create_x_impl(self, x_type, args):
+ x_type = registry.get(X, x_type)
+ expand_args_fields(x_type)
+ self.x = x_type(**args)
+ x_class_type: str = "UNDEFAULTED"
+
+ without adding the optional attributes if they are already there.
+
+ Similarly, replace
+
+ x: Optional[X]
+
+ and optionally
+
+ x_class_type: Optional[str] = "Y"
+ def create_x(self):...
+
+ with
+
+ x_Y_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Y))
+ x_Z_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Z))
+ def create_x(self):
+ if self.x_class_type is None:
+ args = None
+ else:
+ args = self.getattr(f"x_{self.x_class_type}_args", None)
+ self.create_x_impl(self.x_class_type, args)
+ def create_x_impl(self, x_class_type, args):
+ if x_class_type is None:
+ self.x = None
+ return
+
+ x_type = registry.get(X, x_class_type)
+ expand_args_fields(x_type)
+ assert args is not None
+ self.x = x_type(**args)
+ x_class_type: Optional[str] = "UNDEFAULTED"
+
+ without adding the optional attributes if they are already there.
+
+ Similarly, if X is a subclass of Configurable,
+
+ x: X
+
+ and optionally
+
+ def create_x(self):...
+
+ will be replaced with
+
+ x_args: dict = dataclasses.field(default_factory=lambda: get_default_args(X))
+ def create_x(self):
+ self.create_x_impl(True, self.x_args)
+
+ def create_x_impl(self, enabled, args):
+ if enabled:
+ expand_args_fields(X)
+ self.x = X(**args)
+ else:
+ self.x = None
+
+ Similarly, replace,
+
+ x: Optional[X]
+ x_enabled: bool = ...
+
+ and optionally
+
+ def create_x(self):...
+
+ with
+
+ x_args: dict = dataclasses.field(default_factory=lambda: get_default_args(X))
+ x_enabled: bool = ...
+ def create_x(self):
+ self.create_x_impl(self.x_enabled, self.x_args)
+
+ def create_x_impl(self, enabled, args):
+ if enabled:
+ expand_args_fields(X)
+ self.x = X(**args)
+ else:
+ self.x = None
+
+
+ Also adds the following class members, unannotated so that dataclass
+ ignores them.
+ - _creation_functions: Tuple[str, ...] of all the create_ functions,
+ including those from base classes (not the create_x_impl ones).
+ - _known_implementations: Dict[str, Type] containing the classes which
+ have been found from the registry.
+ (used only to raise a warning if it one has been overwritten)
+ - _processed_members: a Dict[str, Any] of all the members which have been
+ transformed, with values giving the types they were declared to have.
+ (E.g. {"x": X} or {"x": Optional[X]} in the cases above.)
+
+ In addition, if the class has a member function
+
+ @classmethod
+ def x_tweak_args(cls, member_type: Type, args: DictConfig) -> None
+
+ then the default_factory of x_args will also have a call to x_tweak_args(X, x_args) and
+ the default_factory of x_Y_args will also have a call to x_tweak_args(Y, x_Y_args).
+
+ In addition, if the class inherits torch.nn.Module, the generated __init__ will
+ call torch.nn.Module's __init__ before doing anything else.
+
+ Before any transformation of the class, if the class has a classmethod called
+ `pre_expand`, it will be called with no arguments.
+
+ Note that although the *_args members are intended to have type DictConfig, they
+ are actually internally annotated as dicts. OmegaConf is happy to see a DictConfig
+ in place of a dict, but not vice-versa. Allowing dict lets a class user specify
+ x_args as an explicit dict without getting an incomprehensible error.
+
+ Args:
+ some_class: the class to be processed
+ _do_not_process: Internal use for get_default_args: Because get_default_args calls
+ and is called by this function, we let it specify any class currently
+ being processed, to make sure we don't try to process a class while
+ it is already being processed.
+
+
+ Returns:
+ some_class itself, which has been modified in place. This
+ allows this function to be used as a class decorator.
+ """
+ if _is_actually_dataclass(some_class):
+ return some_class
+
+ if hasattr(some_class, PRE_EXPAND_NAME):
+ getattr(some_class, PRE_EXPAND_NAME)()
+
+ # The functions this class's run_auto_creation will run.
+ creation_functions: List[str] = []
+ # The classes which this type knows about from the registry
+ # We could use a weakref.WeakValueDictionary here which would mean
+ # that we don't warn if the class we should have expected is elsewhere
+ # unused.
+ known_implementations: Dict[str, Type] = {}
+ # Names of members which have been processed.
+ processed_members: Dict[str, Any] = {}
+
+ # For all bases except ReplaceableBase and Configurable and object,
+ # we need to process them before our own processing. This is
+ # because dataclasses expect to inherit dataclasses and not unprocessed
+ # dataclasses.
+ for base in some_class.mro()[-3:0:-1]:
+ if base is ReplaceableBase:
+ continue
+ if base is Configurable:
+ continue
+ if not issubclass(base, (Configurable, ReplaceableBase)):
+ continue
+ expand_args_fields(base, _do_not_process=_do_not_process)
+ if "_creation_functions" in base.__dict__:
+ creation_functions.extend(base._creation_functions)
+ if "_known_implementations" in base.__dict__:
+ known_implementations.update(base._known_implementations)
+ if "_processed_members" in base.__dict__:
+ processed_members.update(base._processed_members)
+
+ to_process: List[Tuple[str, Type, _ProcessType]] = []
+ if "__annotations__" in some_class.__dict__:
+ for name, type_ in some_class.__annotations__.items():
+ underlying_and_process_type = _get_type_to_process(type_)
+ if underlying_and_process_type is None:
+ continue
+ underlying_type, process_type = underlying_and_process_type
+ to_process.append((name, underlying_type, process_type))
+
+ for name, underlying_type, process_type in to_process:
+ processed_members[name] = some_class.__annotations__[name]
+ _process_member(
+ name=name,
+ type_=underlying_type,
+ process_type=process_type,
+ some_class=some_class,
+ creation_functions=creation_functions,
+ _do_not_process=_do_not_process,
+ known_implementations=known_implementations,
+ )
+
+ for key, count in Counter(creation_functions).items():
+ if count > 1:
+ warnings.warn(f"Clash with {key} in a base class.")
+ some_class._creation_functions = tuple(creation_functions)
+ some_class._processed_members = processed_members
+ some_class._known_implementations = known_implementations
+
+ dataclasses.dataclass(eq=False)(some_class)
+ _fixup_class_init(some_class)
+ return some_class
+
+
+def _fixup_class_init(some_class) -> None:
+ """
+ In-place modification of the some_class class which happens
+ after dataclass processing.
+
+ If the dataclass some_class inherits torch.nn.Module, then
+ makes torch.nn.Module's __init__ be called before anything else
+ on instantiation of some_class.
+ This is a bit like attr's __pre_init__.
+ """
+
+ assert _is_actually_dataclass(some_class)
+ try:
+ import torch
+ except ModuleNotFoundError:
+ return
+
+ if not issubclass(some_class, torch.nn.Module):
+ return
+
+ def init(self, *args, **kwargs) -> None:
+ torch.nn.Module.__init__(self)
+ getattr(self, _DATACLASS_INIT)(*args, **kwargs)
+
+ assert _DATACLASS_INIT not in some_class.__dict__
+
+ setattr(some_class, _DATACLASS_INIT, some_class.__init__)
+ some_class.__init__ = init
+
+
+def get_default_args_field(
+ C,
+ *,
+ _do_not_process: Tuple[type, ...] = (),
+ _hook: Optional[Callable[[DictConfig], None]] = None,
+):
+ """
+ Get a dataclass field which defaults to get_default_args(...)
+
+ Args:
+ C: As for get_default_args.
+ _do_not_process: As for get_default_args
+ _hook: Function called on the result before returning.
+
+ Returns:
+ function to return new DictConfig object
+ """
+
+ def create():
+ args = get_default_args(C, _do_not_process=_do_not_process)
+ if _hook is not None:
+ with open_dict(args):
+ _hook(args)
+ return args
+
+ return dataclasses.field(default_factory=create)
+
+
+def _get_default_args_field_from_registry(
+ *,
+ base_class_wanted: Type[_X],
+ name: str,
+ _do_not_process: Tuple[type, ...] = (),
+ _hook: Optional[Callable[[DictConfig], None]] = None,
+):
+ """
+ Get a dataclass field which defaults to
+ get_default_args(registry.get(base_class_wanted, name)).
+
+ This is used internally in place of get_default_args_field in
+ order that default values are updated if a class is redefined.
+
+ Args:
+ base_class_wanted: As for registry.get.
+ name: As for registry.get.
+ _do_not_process: As for get_default_args
+ _hook: Function called on the result before returning.
+
+ Returns:
+ function to return new DictConfig object
+ """
+
+ def create():
+ C = registry.get(base_class_wanted=base_class_wanted, name=name)
+ args = get_default_args(C, _do_not_process=_do_not_process)
+ if _hook is not None:
+ with open_dict(args):
+ _hook(args)
+ return args
+
+ return dataclasses.field(default_factory=create)
+
+
+def _get_type_to_process(type_) -> Optional[Tuple[Type, _ProcessType]]:
+ """
+ If a member is annotated as `type_`, and that should expanded in
+ expand_args_fields, return how it should be expanded.
+ """
+ if get_origin(type_) == Union:
+ # We look for Optional[X] which is a Union of X with None.
+ args = get_args(type_)
+ if len(args) != 2 or all(a is not type(None) for a in args): # noqa: E721
+ return
+ underlying = args[0] if args[1] is type(None) else args[1] # noqa: E721
+ if (
+ isinstance(underlying, type)
+ and issubclass(underlying, ReplaceableBase)
+ and ReplaceableBase in underlying.__bases__
+ ):
+ return underlying, _ProcessType.OPTIONAL_REPLACEABLE
+
+ if isinstance(underlying, type) and issubclass(underlying, Configurable):
+ return underlying, _ProcessType.OPTIONAL_CONFIGURABLE
+
+ if not isinstance(type_, type):
+ # e.g. any other Union or Tuple. Or ClassVar.
+ return
+
+ if issubclass(type_, ReplaceableBase) and ReplaceableBase in type_.__bases__:
+ return type_, _ProcessType.REPLACEABLE
+
+ if issubclass(type_, Configurable):
+ return type_, _ProcessType.CONFIGURABLE
+
+
+def _process_member(
+ *,
+ name: str,
+ type_: Type,
+ process_type: _ProcessType,
+ some_class: Type,
+ creation_functions: List[str],
+ _do_not_process: Tuple[type, ...],
+ known_implementations: Dict[str, Type],
+) -> None:
+ """
+ Make the modification (of expand_args_fields) to some_class for a single member.
+
+ Args:
+ name: member name
+ type_: member type (with Optional removed if needed)
+ process_type: whether member has dynamic type
+ some_class: (MODIFIED IN PLACE) the class being processed
+ creation_functions: (MODIFIED IN PLACE) the names of the create functions
+ _do_not_process: as for expand_args_fields.
+ known_implementations: (MODIFIED IN PLACE) known types from the registry
+ """
+ # Because we are adding defaultable members, make
+ # sure they go at the end of __annotations__ in case
+ # there are non-defaulted standard class members.
+ del some_class.__annotations__[name]
+ hook = getattr(some_class, name + TWEAK_SUFFIX, None)
+
+ if process_type in (_ProcessType.REPLACEABLE, _ProcessType.OPTIONAL_REPLACEABLE):
+ type_name = name + TYPE_SUFFIX
+ if type_name not in some_class.__annotations__:
+ if process_type == _ProcessType.OPTIONAL_REPLACEABLE:
+ some_class.__annotations__[type_name] = Optional[str]
+ else:
+ some_class.__annotations__[type_name] = str
+ setattr(some_class, type_name, "UNDEFAULTED")
+
+ for derived_type in registry.get_all(type_):
+ if derived_type in _do_not_process:
+ continue
+ if issubclass(derived_type, some_class):
+ # When derived_type is some_class we have a simple
+ # recursion to avoid. When it's a strict subclass the
+ # situation is even worse.
+ continue
+ known_implementations[derived_type.__name__] = derived_type
+ args_name = f"{name}_{derived_type.__name__}{ARGS_SUFFIX}"
+ if args_name in some_class.__annotations__:
+ raise ValueError(
+ f"Cannot generate {args_name} because it is already present."
+ )
+ some_class.__annotations__[args_name] = dict
+ if hook is not None:
+ hook_closed = partial(hook, derived_type)
+ else:
+ hook_closed = None
+ setattr(
+ some_class,
+ args_name,
+ _get_default_args_field_from_registry(
+ base_class_wanted=type_,
+ name=derived_type.__name__,
+ _do_not_process=_do_not_process + (some_class,),
+ _hook=hook_closed,
+ ),
+ )
+ else:
+ args_name = name + ARGS_SUFFIX
+ if args_name in some_class.__annotations__:
+ raise ValueError(
+ f"Cannot generate {args_name} because it is already present."
+ )
+ if issubclass(type_, some_class) or type_ in _do_not_process:
+ raise ValueError(f"Cannot process {type_} inside {some_class}")
+
+ some_class.__annotations__[args_name] = dict
+ if hook is not None:
+ hook_closed = partial(hook, type_)
+ else:
+ hook_closed = None
+ setattr(
+ some_class,
+ args_name,
+ get_default_args_field(
+ type_,
+ _do_not_process=_do_not_process + (some_class,),
+ _hook=hook_closed,
+ ),
+ )
+ if process_type == _ProcessType.OPTIONAL_CONFIGURABLE:
+ enabled_name = name + ENABLED_SUFFIX
+ if enabled_name not in some_class.__annotations__:
+ raise ValueError(
+ f"{name} is an Optional[{type_.__name__}] member "
+ f"but there is no corresponding member {enabled_name}."
+ )
+
+ creation_function_name = f"{CREATE_PREFIX}{name}"
+ if not hasattr(some_class, creation_function_name):
+ setattr(
+ some_class,
+ creation_function_name,
+ _default_create(name, type_, process_type),
+ )
+ creation_functions.append(creation_function_name)
+
+ creation_function_impl_name = f"{CREATE_PREFIX}{name}{IMPL_SUFFIX}"
+ if not hasattr(some_class, creation_function_impl_name):
+ setattr(
+ some_class,
+ creation_function_impl_name,
+ _default_create_impl(name, type_, process_type),
+ )
+
+
+def remove_unused_components(dict_: DictConfig) -> None:
+ """
+ Assuming dict_ represents the state of a configurable,
+ modify it to remove all the portions corresponding to
+ pluggable parts which are not in use.
+ For example, if renderer_class_type is SignedDistanceFunctionRenderer,
+ the renderer_MultiPassEmissionAbsorptionRenderer_args will be
+ removed. Also, if chocolate_enabled is False, then chocolate_args will
+ be removed.
+
+ Args:
+ dict_: (MODIFIED IN PLACE) a DictConfig instance
+ """
+ keys = [key for key in dict_ if isinstance(key, str)]
+ suffix_length = len(TYPE_SUFFIX)
+ replaceables = [key[:-suffix_length] for key in keys if key.endswith(TYPE_SUFFIX)]
+ args_keys = [key for key in keys if key.endswith(ARGS_SUFFIX)]
+ for replaceable in replaceables:
+ selected_type = dict_[replaceable + TYPE_SUFFIX]
+ if selected_type is None:
+ expect = ""
+ else:
+ expect = replaceable + "_" + selected_type + ARGS_SUFFIX
+ with open_dict(dict_):
+ for key in args_keys:
+ if key.startswith(replaceable + "_") and key != expect:
+ del dict_[key]
+
+ suffix_length = len(ENABLED_SUFFIX)
+ enableables = [key[:-suffix_length] for key in keys if key.endswith(ENABLED_SUFFIX)]
+ for enableable in enableables:
+ enabled = dict_[enableable + ENABLED_SUFFIX]
+ if not enabled:
+ with open_dict(dict_):
+ dict_.pop(enableable + ARGS_SUFFIX, None)
+
+ for key in dict_:
+ if isinstance(dict_.get(key), DictConfig):
+ remove_unused_components(dict_[key])
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..89d48d37c37c9660171506eaf378a31c6f058082
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py
@@ -0,0 +1,115 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import torch
+import torch.nn.functional as Fu
+from pytorch3d.ops import wmean
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.structures import Pointclouds
+
+
+def cleanup_eval_depth(
+ point_cloud: Pointclouds,
+ camera: CamerasBase,
+ depth: torch.Tensor,
+ mask: torch.Tensor,
+ sigma: float = 0.01,
+ image=None,
+):
+
+ ba, _, H, W = depth.shape
+
+ pcl = point_cloud.points_padded()
+ n_pts = point_cloud.num_points_per_cloud()
+ pcl_mask = (
+ torch.arange(pcl.shape[1], dtype=torch.int64, device=pcl.device)[None]
+ < n_pts[:, None]
+ ).type_as(pcl)
+
+ pcl_proj = camera.transform_points(pcl, eps=1e-2)[..., :-1]
+ pcl_depth = camera.get_world_to_view_transform().transform_points(pcl)[..., -1]
+
+ depth_and_idx = torch.cat(
+ (
+ depth,
+ torch.arange(H * W).view(1, 1, H, W).expand(ba, 1, H, W).type_as(depth),
+ ),
+ dim=1,
+ )
+
+ depth_and_idx_sampled = Fu.grid_sample(
+ depth_and_idx, -pcl_proj[:, None], mode="nearest"
+ )[:, :, 0].view(ba, 2, -1)
+
+ depth_sampled, idx_sampled = depth_and_idx_sampled.split([1, 1], dim=1)
+ df = (depth_sampled[:, 0] - pcl_depth).abs()
+
+ # the threshold is a sigma-multiple of the standard deviation of the depth
+ mu = wmean(depth.view(ba, -1, 1), mask.view(ba, -1)).view(ba, 1)
+ std = (
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ wmean((depth.view(ba, -1) - mu).view(ba, -1, 1) ** 2, mask.view(ba, -1))
+ .clamp(1e-4)
+ .sqrt()
+ .view(ba, -1)
+ )
+ good_df_thr = std * sigma
+ good_depth = (df <= good_df_thr).float() * pcl_mask
+
+ # perc_kept = good_depth.sum(dim=1) / pcl_mask.sum(dim=1).clamp(1)
+ # print(f'Kept {100.0 * perc_kept.mean():1.3f} % points')
+
+ good_depth_raster = torch.zeros_like(depth).view(ba, -1)
+ good_depth_raster.scatter_add_(1, torch.round(idx_sampled[:, 0]).long(), good_depth)
+
+ good_depth_mask = (good_depth_raster.view(ba, 1, H, W) > 0).float()
+
+ # if float(torch.rand(1)) > 0.95:
+ # depth_ok = depth * good_depth_mask
+
+ # # visualize
+ # visdom_env = 'depth_cleanup_dbg'
+ # from visdom import Visdom
+ # # from tools.vis_utils import make_depth_image
+ # from pytorch3d.vis.plotly_vis import plot_scene
+ # viz = Visdom()
+
+ # show_pcls = {
+ # 'pointclouds': point_cloud,
+ # }
+ # for d, nm in zip(
+ # (depth, depth_ok),
+ # ('pointclouds_unproj', 'pointclouds_unproj_ok'),
+ # ):
+ # pointclouds_unproj = get_rgbd_point_cloud(
+ # camera, image, d,
+ # )
+ # if int(pointclouds_unproj.num_points_per_cloud()) > 0:
+ # show_pcls[nm] = pointclouds_unproj
+
+ # scene_dict = {'1': {
+ # **show_pcls,
+ # 'cameras': camera,
+ # }}
+ # scene = plot_scene(
+ # scene_dict,
+ # pointcloud_max_points=5000,
+ # pointcloud_marker_size=1.5,
+ # camera_scale=1.0,
+ # )
+ # viz.plotlyplot(scene, env=visdom_env, win='scene')
+
+ # # depth_image_ok = make_depth_image(depths_ok, masks)
+ # # viz.images(depth_image_ok, env=visdom_env, win='depth_ok')
+ # # depth_image = make_depth_image(depths, masks)
+ # # viz.images(depth_image, env=visdom_env, win='depth')
+ # # # viz.images(rgb_rendered, env=visdom_env, win='images_render')
+ # # viz.images(images, env=visdom_env, win='images')
+ # import pdb; pdb.set_trace()
+
+ return good_depth_mask
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py
new file mode 100644
index 0000000000000000000000000000000000000000..f46fe292f8e45c1d40ef157f444941ee23761e16
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py
@@ -0,0 +1,270 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import logging
+import math
+from typing import Optional, Tuple
+
+import torch
+from pytorch3d.implicitron.tools import utils
+from pytorch3d.implicitron.tools.circle_fitting import fit_circle_in_3d
+from pytorch3d.renderer import look_at_view_transform, PerspectiveCameras
+from pytorch3d.transforms import Scale
+
+
+logger = logging.getLogger(__name__)
+
+
+def generate_eval_video_cameras(
+ train_cameras,
+ n_eval_cams: int = 100,
+ trajectory_type: str = "figure_eight",
+ trajectory_scale: float = 0.2,
+ scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0),
+ up: Tuple[float, float, float] = (0.0, 0.0, 1.0),
+ focal_length: Optional[torch.Tensor] = None,
+ principal_point: Optional[torch.Tensor] = None,
+ time: Optional[torch.Tensor] = None,
+ infer_up_as_plane_normal: bool = True,
+ traj_offset: Optional[Tuple[float, float, float]] = None,
+ traj_offset_canonical: Optional[Tuple[float, float, float]] = None,
+ remove_outliers_rate: float = 0.0,
+) -> PerspectiveCameras:
+ """
+ Generate a camera trajectory rendering a scene from multiple viewpoints.
+
+ Args:
+ train_cameras: The set of cameras from the training dataset object.
+ n_eval_cams: Number of cameras in the trajectory.
+ trajectory_type: The type of the camera trajectory. Can be one of:
+ circular_lsq_fit: Camera centers follow a trajectory obtained
+ by fitting a 3D circle to train_cameras centers.
+ All cameras are looking towards scene_center.
+ figure_eight: Figure-of-8 trajectory around the center of the
+ central camera of the training dataset.
+ trefoil_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot).
+ figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape
+ of a figure-eight knot
+ (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)).
+ trajectory_scale: The extent of the trajectory.
+ scene_center: The center of the scene in world coordinates which all
+ the cameras from the generated trajectory look at.
+ up: The "circular_lsq_fit" vector of the scene (=the normal of the scene floor).
+ Active for the `trajectory_type="circular"`.
+ focal_length: The focal length of the output cameras. If `None`, an average
+ focal length of the train_cameras is used.
+ principal_point: The principal point of the output cameras. If `None`, an average
+ principal point of all train_cameras is used.
+ time: Defines the total length of the generated camera trajectory. All possible
+ trajectories (set with the `trajectory_type` argument) are periodic with
+ the period of `time=2pi`.
+ E.g. setting `trajectory_type=circular_lsq_fit` and `time=4pi`, will generate
+ a trajectory of camera poses rotating the total of 720 deg around the object.
+ infer_up_as_plane_normal: Infer the camera `up` vector automatically as the normal
+ of the plane fit to the optical centers of `train_cameras`.
+ traj_offset: 3D offset vector added to each point of the trajectory.
+ traj_offset_canonical: 3D offset vector expressed in the local coordinates of
+ the estimated trajectory which is added to each point of the trajectory.
+ remove_outliers_rate: the number between 0 and 1; if > 0,
+ some outlier train_cameras will be removed from trajectory estimation;
+ the filtering is based on camera center coordinates; top and
+ bottom `remove_outliers_rate` cameras on each dimension are removed.
+ Returns:
+ Batch of camera instances which can be used as the test dataset
+ """
+ if remove_outliers_rate > 0.0:
+ train_cameras = _remove_outlier_cameras(train_cameras, remove_outliers_rate)
+
+ if trajectory_type in ("figure_eight", "trefoil_knot", "figure_eight_knot"):
+ cam_centers = train_cameras.get_camera_center()
+ # get the nearest camera center to the mean of centers
+ mean_camera_idx = (
+ ((cam_centers - cam_centers.mean(dim=0)[None]) ** 2)
+ .sum(dim=1)
+ .min(dim=0)
+ .indices
+ )
+ # generate the knot trajectory in canonical coords
+ if time is None:
+ time = torch.linspace(0, 2 * math.pi, n_eval_cams + 1)[:n_eval_cams]
+ else:
+ assert time.numel() == n_eval_cams
+ if trajectory_type == "trefoil_knot":
+ traj = _trefoil_knot(time)
+ elif trajectory_type == "figure_eight_knot":
+ traj = _figure_eight_knot(time)
+ elif trajectory_type == "figure_eight":
+ traj = _figure_eight(time)
+ else:
+ raise ValueError(f"bad trajectory type: {trajectory_type}")
+ traj[:, 2] -= traj[:, 2].max()
+
+ # transform the canonical knot to the coord frame of the mean camera
+ mean_camera = PerspectiveCameras(
+ **{
+ k: getattr(train_cameras, k)[[int(mean_camera_idx)]]
+ for k in ("focal_length", "principal_point", "R", "T")
+ }
+ )
+ traj_trans = Scale(cam_centers.std(dim=0).mean() * trajectory_scale).compose(
+ mean_camera.get_world_to_view_transform().inverse()
+ )
+
+ if traj_offset_canonical is not None:
+ traj_trans = traj_trans.translate(
+ torch.FloatTensor(traj_offset_canonical)[None].to(traj)
+ )
+
+ traj = traj_trans.transform_points(traj)
+
+ plane_normal = _fit_plane(cam_centers)[:, 0]
+ if infer_up_as_plane_normal:
+ up = _disambiguate_normal(plane_normal, up)
+
+ elif trajectory_type == "circular_lsq_fit":
+ ### fit plane to the camera centers
+
+ # get the center of the plane as the median of the camera centers
+ cam_centers = train_cameras.get_camera_center()
+
+ if time is not None:
+ angle = time
+ else:
+ angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams).to(cam_centers)
+
+ fit = fit_circle_in_3d(
+ cam_centers,
+ angles=angle,
+ offset=(
+ angle.new_tensor(traj_offset_canonical)
+ if traj_offset_canonical is not None
+ else None
+ ),
+ up=angle.new_tensor(up),
+ )
+ traj = fit.generated_points
+
+ # scalethe trajectory
+ _t_mu = traj.mean(dim=0, keepdim=True)
+ traj = (traj - _t_mu) * trajectory_scale + _t_mu
+
+ plane_normal = fit.normal
+
+ if infer_up_as_plane_normal:
+ up = _disambiguate_normal(plane_normal, up)
+
+ else:
+ raise ValueError(f"Uknown trajectory_type {trajectory_type}.")
+
+ if traj_offset is not None:
+ traj = traj + torch.FloatTensor(traj_offset)[None].to(traj)
+
+ # point all cameras towards the center of the scene
+ R, T = look_at_view_transform(
+ eye=traj,
+ at=(scene_center,), # (1, 3)
+ up=(up,), # (1, 3)
+ device=traj.device,
+ )
+
+ # get the average focal length and principal point
+ if focal_length is None:
+ focal_length = train_cameras.focal_length.mean(dim=0).repeat(n_eval_cams, 1)
+ if principal_point is None:
+ principal_point = train_cameras.principal_point.mean(dim=0).repeat(
+ n_eval_cams, 1
+ )
+
+ test_cameras = PerspectiveCameras(
+ focal_length=focal_length,
+ principal_point=principal_point,
+ R=R,
+ T=T,
+ device=focal_length.device,
+ )
+
+ # _visdom_plot_scene(
+ # train_cameras,
+ # test_cameras,
+ # )
+
+ return test_cameras
+
+
+def _remove_outlier_cameras(
+ cameras: PerspectiveCameras, outlier_rate: float
+) -> PerspectiveCameras:
+ keep_indices = utils.get_inlier_indicators(
+ cameras.get_camera_center(), dim=0, outlier_rate=outlier_rate
+ )
+ # pyre-fixme[6]: For 1st param expected `Union[List[int], int, BoolTensor,
+ # LongTensor]` but got `Tensor`.
+ clean_cameras = cameras[keep_indices]
+ logger.info(
+ "Filtered outlier cameras when estimating the trajectory: "
+ f"{len(cameras)} → {len(clean_cameras)}"
+ )
+ # pyre-fixme[7]: Expected `PerspectiveCameras` but got `CamerasBase`.
+ return clean_cameras
+
+
+def _disambiguate_normal(normal, up):
+ up_t = torch.tensor(up).to(normal)
+ flip = (up_t * normal).sum().sign()
+ up = normal * flip
+ up = up.tolist()
+ return up
+
+
+def _fit_plane(x):
+ x = x - x.mean(dim=0)[None]
+ cov = (x.t() @ x) / x.shape[0]
+ _, e_vec = torch.linalg.eigh(cov)
+ return e_vec
+
+
+def _visdom_plot_scene(
+ train_cameras,
+ test_cameras,
+) -> None:
+ from pytorch3d.vis.plotly_vis import plot_scene
+
+ p = plot_scene(
+ {
+ "scene": {
+ "train_cams": train_cameras,
+ "test_cams": test_cameras,
+ }
+ }
+ )
+ from visdom import Visdom
+
+ viz = Visdom()
+ viz.plotlyplot(p, env="cam_traj_dbg", win="cam_trajs")
+
+
+def _figure_eight_knot(t: torch.Tensor, z_scale: float = 0.5):
+ x = (2 + (2 * t).cos()) * (3 * t).cos()
+ y = (2 + (2 * t).cos()) * (3 * t).sin()
+ z = (4 * t).sin() * z_scale
+ return torch.stack((x, y, z), dim=-1)
+
+
+def _trefoil_knot(t: torch.Tensor, z_scale: float = 0.5):
+ x = t.sin() + 2 * (2 * t).sin()
+ y = t.cos() - 2 * (2 * t).cos()
+ z = -(3 * t).sin() * z_scale
+ return torch.stack((x, y, z), dim=-1)
+
+
+def _figure_eight(t: torch.Tensor, z_scale: float = 0.5):
+ x = t.cos()
+ y = (2 * t).sin() / 2
+ z = t.sin() * z_scale
+ return torch.stack((x, y, z), dim=-1)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4debeef422e489e7ce58a048eb2174cc6201c2b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py
@@ -0,0 +1,237 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import math
+from typing import Optional, Tuple
+
+import torch
+from torch.nn import functional as F
+
+
+def eval_depth(
+ pred: torch.Tensor,
+ gt: torch.Tensor,
+ crop: int = 1,
+ mask: Optional[torch.Tensor] = None,
+ get_best_scale: bool = True,
+ mask_thr: float = 0.5,
+ best_scale_clamp_thr: float = 1e-4,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Evaluate the depth error between the prediction `pred` and the ground
+ truth `gt`.
+
+ Args:
+ pred: A tensor of shape (N, 1, H, W) denoting the predicted depth maps.
+ gt: A tensor of shape (N, 1, H, W) denoting the ground truth depth maps.
+ crop: The number of pixels to crop from the border.
+ mask: A mask denoting the valid regions of the gt depth.
+ get_best_scale: If `True`, estimates a scaling factor of the predicted depth
+ that yields the best mean squared error between `pred` and `gt`.
+ This is typically enabled for cases where predicted reconstructions
+ are inherently defined up to an arbitrary scaling factor.
+ mask_thr: A constant used to threshold the `mask` to specify the valid
+ regions.
+ best_scale_clamp_thr: The threshold for clamping the divisor in best
+ scale estimation.
+
+ Returns:
+ mse_depth: Mean squared error between `pred` and `gt`.
+ abs_depth: Mean absolute difference between `pred` and `gt`.
+ """
+
+ # chuck out the border
+ if crop > 0:
+ gt = gt[:, :, crop:-crop, crop:-crop]
+ pred = pred[:, :, crop:-crop, crop:-crop]
+
+ if mask is not None:
+ # mult gt by mask
+ if crop > 0:
+ mask = mask[:, :, crop:-crop, crop:-crop]
+ gt = gt * (mask > mask_thr).float()
+
+ dmask = (gt > 0.0).float()
+ dmask_mass = torch.clamp(dmask.sum((1, 2, 3)), 1e-4)
+
+ if get_best_scale:
+ # mult preds by a scalar "scale_best"
+ # s.t. we get best possible mse error
+ scale_best = estimate_depth_scale_factor(pred, gt, dmask, best_scale_clamp_thr)
+ pred = pred * scale_best[:, None, None, None]
+
+ df = gt - pred
+
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ mse_depth = (dmask * (df**2)).sum((1, 2, 3)) / dmask_mass
+ abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass
+
+ return mse_depth, abs_depth
+
+
+def estimate_depth_scale_factor(pred, gt, mask, clamp_thr):
+ xy = pred * gt * mask
+ xx = pred * pred * mask
+ scale_best = xy.mean((1, 2, 3)) / torch.clamp(xx.mean((1, 2, 3)), clamp_thr)
+ return scale_best
+
+
+def calc_psnr(
+ x: torch.Tensor,
+ y: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ """
+ Calculates the Peak-signal-to-noise ratio between tensors `x` and `y`.
+ """
+ mse = calc_mse(x, y, mask=mask)
+ psnr = torch.log10(mse.clamp(1e-10)) * (-10.0)
+ return psnr
+
+
+def calc_mse(
+ x: torch.Tensor,
+ y: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ """
+ Calculates the mean square error between tensors `x` and `y`.
+ """
+ if mask is None:
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ return torch.mean((x - y) ** 2)
+ else:
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ return (((x - y) ** 2) * mask).sum() / mask.expand_as(x).sum().clamp(1e-5)
+
+
+def calc_bce(
+ pred: torch.Tensor,
+ gt: torch.Tensor,
+ equal_w: bool = True,
+ pred_eps: float = 0.01,
+ mask: Optional[torch.Tensor] = None,
+ lerp_bound: Optional[float] = None,
+) -> torch.Tensor:
+ """
+ Calculates the binary cross entropy.
+ """
+ if pred_eps > 0.0:
+ # up/low bound the predictions
+ pred = torch.clamp(pred, pred_eps, 1.0 - pred_eps)
+
+ if mask is None:
+ mask = torch.ones_like(gt)
+
+ if equal_w:
+ mask_fg = (gt > 0.5).float() * mask
+ mask_bg = (1 - mask_fg) * mask
+ weight = mask_fg / mask_fg.sum().clamp(1.0) + mask_bg / mask_bg.sum().clamp(1.0)
+ # weight sum should be at this point ~2
+ # pyre-fixme[58]: `/` is not supported for operand types `int` and `Tensor`.
+ weight = weight * (weight.numel() / weight.sum().clamp(1.0))
+ else:
+ weight = torch.ones_like(gt) * mask
+
+ if lerp_bound is not None:
+ return binary_cross_entropy_lerp(pred, gt, weight, lerp_bound)
+ else:
+ return F.binary_cross_entropy(pred, gt, reduction="mean", weight=weight)
+
+
+def binary_cross_entropy_lerp(
+ pred: torch.Tensor,
+ gt: torch.Tensor,
+ weight: torch.Tensor,
+ lerp_bound: float,
+):
+ """
+ Binary cross entropy which avoids exploding gradients by linearly
+ extrapolating the log function for log(1-pred) mad log(pred) whenever
+ pred or 1-pred is smaller than lerp_bound.
+ """
+ loss = log_lerp(1 - pred, lerp_bound) * (1 - gt) + log_lerp(pred, lerp_bound) * gt
+ loss_reduced = -(loss * weight).sum() / weight.sum().clamp(1e-4)
+ return loss_reduced
+
+
+def log_lerp(x: torch.Tensor, b: float):
+ """
+ Linearly extrapolated log for x < b.
+ """
+ assert b > 0
+ return torch.where(x >= b, x.log(), math.log(b) + (x - b) / b)
+
+
+def rgb_l1(
+ pred: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None
+) -> torch.Tensor:
+ """
+ Calculates the mean absolute error between the predicted colors `pred`
+ and ground truth colors `target`.
+ """
+ if mask is None:
+ mask = torch.ones_like(pred[:, :1])
+ return ((pred - target).abs() * mask).sum(dim=(1, 2, 3)) / mask.sum(
+ dim=(1, 2, 3)
+ ).clamp(1)
+
+
+def huber(dfsq: torch.Tensor, scaling: float = 0.03) -> torch.Tensor:
+ """
+ Calculates the huber function of the input squared error `dfsq`.
+ The function smoothly transitions from a region with unit gradient
+ to a hyperbolic function at `dfsq=scaling`.
+ """
+ loss = (safe_sqrt(1 + dfsq / (scaling * scaling), eps=1e-4) - 1) * scaling
+ return loss
+
+
+def neg_iou_loss(
+ predict: torch.Tensor,
+ target: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ """
+ This is a great loss because it emphasizes on the active
+ regions of the predict and targets
+ """
+ return 1.0 - iou(predict, target, mask=mask)
+
+
+def safe_sqrt(A: torch.Tensor, eps: float = 1e-4) -> torch.Tensor:
+ """
+ performs safe differentiable sqrt
+ """
+ return (torch.clamp(A, float(0)) + eps).sqrt()
+
+
+def iou(
+ predict: torch.Tensor,
+ target: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+) -> torch.Tensor:
+ """
+ This is a great loss because it emphasizes on the active
+ regions of the predict and targets
+ """
+ dims = tuple(range(predict.dim())[1:])
+ if mask is not None:
+ predict = predict * mask
+ target = target * mask
+ intersect = (predict * target).sum(dims)
+ union = (predict + target - predict * target).sum(dims) + 1e-4
+ return (intersect / union).sum() / intersect.numel()
+
+
+def beta_prior(pred: torch.Tensor, cap: float = 0.1) -> torch.Tensor:
+ if cap <= 0.0:
+ raise ValueError("capping should be positive to avoid unbound loss")
+
+ min_value = math.log(cap) + math.log(cap + 1.0)
+ return (torch.log(pred + cap) + torch.log(1.0 - pred + cap)).mean() - min_value
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/model_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/model_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6bc499d698632fdb2424141769ef4b16eb7168a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/model_io.py
@@ -0,0 +1,175 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import glob
+import logging
+import os
+import shutil
+import tempfile
+from typing import Optional
+
+import torch
+
+
+logger = logging.getLogger(__name__)
+
+
+def load_stats(flstats):
+ from pytorch3d.implicitron.tools.stats import Stats
+
+ if not os.path.isfile(flstats):
+ return None
+
+ return Stats.load(flstats)
+
+
+def get_model_path(fl) -> str:
+ fl = os.path.splitext(fl)[0]
+ flmodel = "%s.pth" % fl
+ return flmodel
+
+
+def get_optimizer_path(fl) -> str:
+ fl = os.path.splitext(fl)[0]
+ flopt = "%s_opt.pth" % fl
+ return flopt
+
+
+def get_stats_path(fl, eval_results: bool = False) -> str:
+ fl = os.path.splitext(fl)[0]
+ if eval_results:
+ for postfix in ("_2", ""):
+ flstats = os.path.join(os.path.dirname(fl), f"stats_test{postfix}.jgz")
+ if os.path.isfile(flstats):
+ break
+ else:
+ flstats = "%s_stats.jgz" % fl
+ # pyre-fixme[61]: `flstats` is undefined, or not always defined.
+ return flstats
+
+
+def safe_save_model(model, stats, fl, optimizer=None, cfg=None) -> None:
+ """
+ This functions stores model files safely so that no model files exist on the
+ file system in case the saving procedure gets interrupted.
+
+ This is done first by saving the model files to a temporary directory followed
+ by (atomic) moves to the target location. Note, that this can still result
+ in a corrupt set of model files in case interruption happens while performing
+ the moves. It is however quite improbable that a crash would occur right at
+ this time.
+ """
+ logger.info(f"saving model files safely to {fl}")
+ # first store everything to a tmpdir
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpfl = os.path.join(tmpdir, os.path.split(fl)[-1])
+ stored_tmp_fls = save_model(model, stats, tmpfl, optimizer=optimizer, cfg=cfg)
+ tgt_fls = [
+ (
+ os.path.join(os.path.split(fl)[0], os.path.split(tmpfl)[-1])
+ if (tmpfl is not None)
+ else None
+ )
+ for tmpfl in stored_tmp_fls
+ ]
+ # then move from the tmpdir to the right location
+ for tmpfl, tgt_fl in zip(stored_tmp_fls, tgt_fls):
+ if tgt_fl is None:
+ continue
+ shutil.move(tmpfl, tgt_fl)
+
+
+def save_model(model, stats, fl, optimizer=None, cfg=None):
+ flstats = get_stats_path(fl)
+ flmodel = get_model_path(fl)
+ logger.info("saving model to %s" % flmodel)
+ torch.save(model.state_dict(), flmodel)
+ flopt = None
+ if optimizer is not None:
+ flopt = get_optimizer_path(fl)
+ logger.info("saving optimizer to %s" % flopt)
+ torch.save(optimizer.state_dict(), flopt)
+ logger.info("saving model stats to %s" % flstats)
+ stats.save(flstats)
+
+ return flstats, flmodel, flopt
+
+
+def save_stats(stats, fl, cfg=None):
+ flstats = get_stats_path(fl)
+ logger.info("saving model stats to %s" % flstats)
+ stats.save(flstats)
+ return flstats
+
+
+def load_model(fl, map_location: Optional[dict]):
+ flstats = get_stats_path(fl)
+ flmodel = get_model_path(fl)
+ flopt = get_optimizer_path(fl)
+ model_state_dict = torch.load(flmodel, map_location=map_location)
+ stats = load_stats(flstats)
+ if os.path.isfile(flopt):
+ optimizer = torch.load(flopt, map_location=map_location)
+ else:
+ optimizer = None
+
+ return model_state_dict, stats, optimizer
+
+
+def parse_epoch_from_model_path(model_path) -> int:
+ return int(
+ os.path.split(model_path)[-1].replace(".pth", "").replace("model_epoch_", "")
+ )
+
+
+def get_checkpoint(exp_dir, epoch):
+ fl = os.path.join(exp_dir, "model_epoch_%08d.pth" % epoch)
+ return fl
+
+
+def find_last_checkpoint(
+ exp_dir, any_path: bool = False, all_checkpoints: bool = False
+):
+ if any_path:
+ exts = [".pth", "_stats.jgz", "_opt.pth"]
+ else:
+ exts = [".pth"]
+
+ for ext in exts:
+ fls = sorted(
+ glob.glob(
+ os.path.join(glob.escape(exp_dir), "model_epoch_" + "[0-9]" * 8 + ext)
+ )
+ )
+ if len(fls) > 0:
+ break
+ # pyre-fixme[61]: `fls` is undefined, or not always defined.
+ if len(fls) == 0:
+ fl = None
+ else:
+ if all_checkpoints:
+ # pyre-fixme[61]: `fls` is undefined, or not always defined.
+ fl = [f[0 : -len(ext)] + ".pth" for f in fls]
+ else:
+ # pyre-fixme[61]: `ext` is undefined, or not always defined.
+ fl = fls[-1][0 : -len(ext)] + ".pth"
+
+ return fl
+
+
+def purge_epoch(exp_dir, epoch) -> None:
+ model_path = get_checkpoint(exp_dir, epoch)
+
+ for file_path in [
+ model_path,
+ get_optimizer_path(model_path),
+ get_stats_path(model_path),
+ ]:
+ if os.path.isfile(file_path):
+ logger.info("deleting %s" % file_path)
+ os.remove(file_path)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c30a2318c44790bba009f392c6eacf174bcf395
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py
@@ -0,0 +1,196 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+from typing import cast, Optional, Tuple
+
+import torch
+import torch.nn.functional as Fu
+from pytorch3d.renderer import (
+ AlphaCompositor,
+ NDCMultinomialRaysampler,
+ PointsRasterizationSettings,
+ PointsRasterizer,
+ ray_bundle_to_ray_points,
+)
+from pytorch3d.renderer.cameras import CamerasBase
+from pytorch3d.structures import Pointclouds
+
+
+def get_rgbd_point_cloud(
+ camera: CamerasBase,
+ image_rgb: torch.Tensor,
+ depth_map: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+ mask_thr: float = 0.5,
+ *,
+ euclidean: bool = False,
+) -> Pointclouds:
+ """
+ Given a batch of images, depths, masks and cameras, generate a single colored
+ point cloud by unprojecting depth maps and coloring with the source
+ pixel colors.
+
+ Arguments:
+ camera: Batch of N cameras
+ image_rgb: Batch of N images of shape (N, C, H, W).
+ For RGB images C=3.
+ depth_map: Batch of N depth maps of shape (N, 1, H', W').
+ Only positive values here are used to generate points.
+ If euclidean=False (default) this contains perpendicular distances
+ from each point to the camera plane (z-values).
+ If euclidean=True, this contains distances from each point to
+ the camera center.
+ mask: If provided, batch of N masks of the same shape as depth_map.
+ If provided, values in depth_map are ignored if the corresponding
+ element of mask is smaller than mask_thr.
+ mask_thr: used in interpreting mask
+ euclidean: used in interpreting depth_map.
+
+ Returns:
+ Pointclouds object containing one point cloud.
+ """
+ imh, imw = depth_map.shape[2:]
+
+ # convert the depth maps to point clouds using the grid ray sampler
+ pts_3d = ray_bundle_to_ray_points(
+ NDCMultinomialRaysampler(
+ image_width=imw,
+ image_height=imh,
+ n_pts_per_ray=1,
+ min_depth=1.0,
+ max_depth=1.0,
+ unit_directions=euclidean,
+ )(camera)._replace(lengths=depth_map[:, 0, ..., None])
+ )
+
+ pts_mask = depth_map > 0.0
+ if mask is not None:
+ pts_mask *= mask > mask_thr
+ pts_mask = pts_mask.reshape(-1)
+
+ pts_3d = pts_3d.reshape(-1, 3)[pts_mask]
+
+ pts_colors = torch.nn.functional.interpolate(
+ image_rgb,
+ size=[imh, imw],
+ mode="bilinear",
+ align_corners=False,
+ )
+ pts_colors = pts_colors.permute(0, 2, 3, 1).reshape(-1, image_rgb.shape[1])[
+ pts_mask
+ ]
+
+ return Pointclouds(points=pts_3d[None], features=pts_colors[None])
+
+
+def render_point_cloud_pytorch3d(
+ camera,
+ point_cloud,
+ render_size: Tuple[int, int],
+ point_radius: float = 0.03,
+ topk: int = 10,
+ eps: float = 1e-2,
+ bg_color=None,
+ bin_size: Optional[int] = None,
+ **kwargs,
+):
+
+ # feature dimension
+ featdim = point_cloud.features_packed().shape[-1]
+
+ # move to the camera coordinates; using identity cameras in the renderer
+ point_cloud = _transform_points(camera, point_cloud, eps, **kwargs)
+ camera_trivial = camera.clone()
+ camera_trivial.R[:] = torch.eye(3)
+ camera_trivial.T *= 0.0
+
+ bin_size = (
+ bin_size
+ if bin_size is not None
+ else (64 if int(max(render_size)) > 1024 else None)
+ )
+ rasterizer = PointsRasterizer(
+ cameras=camera_trivial,
+ raster_settings=PointsRasterizationSettings(
+ image_size=render_size,
+ radius=point_radius,
+ points_per_pixel=topk,
+ bin_size=bin_size,
+ ),
+ )
+
+ fragments = rasterizer(point_cloud, **kwargs)
+
+ # Construct weights based on the distance of a point to the true point.
+ # However, this could be done differently: e.g. predicted as opposed
+ # to a function of the weights.
+ r = rasterizer.raster_settings.radius
+
+ # set up the blending weights
+ dists2 = fragments.dists
+ weights = 1 - dists2 / (r * r)
+ ok = cast(torch.BoolTensor, (fragments.idx >= 0)).float()
+
+ weights = weights * ok
+
+ fragments_prm = fragments.idx.long().permute(0, 3, 1, 2)
+ weights_prm = weights.permute(0, 3, 1, 2)
+ images = AlphaCompositor()(
+ fragments_prm,
+ weights_prm,
+ point_cloud.features_packed().permute(1, 0),
+ background_color=bg_color if bg_color is not None else [0.0] * featdim,
+ **kwargs,
+ )
+
+ # get the depths ...
+ # weighted_fs[b,c,i,j] = sum_k cum_alpha_k * features[c,pointsidx[b,k,i,j]]
+ # cum_alpha_k = alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j])
+ cumprod = torch.cumprod(1 - weights, dim=-1)
+ cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1)
+ depths = (weights * cumprod * fragments.zbuf).sum(dim=-1)
+ # add the rendering mask
+ render_mask = -torch.prod(1.0 - weights, dim=-1) + 1.0
+
+ # cat depths and render mask
+ rendered_blob = torch.cat((images, depths[:, None], render_mask[:, None]), dim=1)
+
+ # reshape back
+ rendered_blob = Fu.interpolate(
+ rendered_blob,
+ size=tuple(render_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+
+ data_rendered, depth_rendered, render_mask = rendered_blob.split(
+ [rendered_blob.shape[1] - 2, 1, 1],
+ dim=1,
+ )
+
+ return data_rendered, render_mask, depth_rendered
+
+
+def _signed_clamp(x, eps):
+ sign = x.sign() + (x == 0.0).type_as(x)
+ x_clamp = sign * torch.clamp(x.abs(), eps)
+ return x_clamp
+
+
+def _transform_points(cameras, point_clouds, eps, **kwargs):
+ pts_world = point_clouds.points_padded()
+ pts_view = cameras.get_world_to_view_transform(**kwargs).transform_points(
+ pts_world, eps=eps
+ )
+ # it is crucial to actually clamp the points as well ...
+ pts_view = torch.cat(
+ (pts_view[..., :-1], _signed_clamp(pts_view[..., -1:], eps)), dim=-1
+ )
+ point_clouds = point_clouds.update_padded(pts_view)
+ return point_clouds
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8764c6011e8eba806ea7b755d0bfb81dfd816a21
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+from .obj_io import load_obj, load_objs_as_meshes, save_obj
+from .pluggable import IO
+from .ply_io import load_ply, save_ply
+
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/mtl_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/mtl_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb6af3fec05101b3f25cedc406025c36507780d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/mtl_io.py
@@ -0,0 +1,538 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+"""This module implements utility functions for loading .mtl files and textures."""
+import os
+import warnings
+from typing import Dict, List, Optional, Tuple
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from iopath.common.file_io import PathManager
+from pytorch3d.common.compat import meshgrid_ij
+from pytorch3d.common.datatypes import Device
+from pytorch3d.io.utils import _open_file, _read_image
+
+
+def make_mesh_texture_atlas(
+ material_properties: Dict,
+ texture_images: Dict,
+ face_material_names,
+ faces_uvs: torch.Tensor,
+ verts_uvs: torch.Tensor,
+ texture_size: int,
+ texture_wrap: Optional[str],
+) -> torch.Tensor:
+ """
+ Given properties for materials defined in the .mtl file, and the face texture uv
+ coordinates, construct an (F, R, R, 3) texture atlas where R is the texture_size
+ and F is the number of faces in the mesh.
+
+ Args:
+ material_properties: dict of properties for each material. If a material
+ does not have any properties it will have an empty dict.
+ texture_images: dict of material names and texture images
+ face_material_names: numpy array of the material name corresponding to each
+ face. Faces which don't have an associated material will be an empty string.
+ For these faces, a uniform white texture is assigned.
+ faces_uvs: LongTensor of shape (F, 3,) giving the index into the verts_uvs for
+ each face in the mesh.
+ verts_uvs: FloatTensor of shape (V, 2) giving the uv coordinates for each vertex.
+ texture_size: the resolution of the per face texture map returned by this function.
+ Each face will have a texture map of shape (texture_size, texture_size, 3).
+ texture_wrap: string, one of ["repeat", "clamp", None]
+ If `texture_wrap="repeat"` for uv values outside the range [0, 1] the integer part
+ is ignored and a repeating pattern is formed.
+ If `texture_wrap="clamp"` the values are clamped to the range [0, 1].
+ If None, do nothing.
+
+ Returns:
+ atlas: FloatTensor of shape (F, texture_size, texture_size, 3) giving the per
+ face texture map.
+ """
+ # Create an R x R texture map per face in the mesh
+ R = texture_size
+ F = faces_uvs.shape[0]
+
+ # Initialize the per face texture map to a white color.
+ # TODO: allow customization of this base color?
+ atlas = torch.ones(size=(F, R, R, 3), dtype=torch.float32, device=faces_uvs.device)
+
+ # Check for empty materials.
+ if not material_properties and not texture_images:
+ return atlas
+
+ # Iterate through the material properties - not
+ # all materials have texture images so this is
+ # done first separately to the texture interpolation.
+ for material_name, props in material_properties.items():
+ # Bool to indicate which faces use this texture map.
+ faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
+ faces_uvs.device
+ )
+ if faces_material_ind.sum() > 0:
+ # For these faces, update the base color to the
+ # diffuse material color.
+ if "diffuse_color" not in props:
+ continue
+ atlas[faces_material_ind, ...] = props["diffuse_color"][None, :]
+
+ # If there are vertex texture coordinates, create an (F, 3, 2)
+ # tensor of the vertex textures per face.
+ faces_verts_uvs = verts_uvs[faces_uvs] if len(verts_uvs) > 0 else None
+
+ # Some meshes only have material properties and no texture image.
+ # In this case, return the atlas here.
+ if faces_verts_uvs is None:
+ return atlas
+
+ if texture_wrap == "repeat":
+ # If texture uv coordinates are outside the range [0, 1] follow
+ # the convention GL_REPEAT in OpenGL i.e the integer part of the coordinate
+ # will be ignored and a repeating pattern is formed.
+ # Shapenet data uses this format see:
+ # https://shapenet.org/qaforum/index.php?qa=15&qa_1=why-is-the-texture-coordinate-in-the-obj-file-not-in-the-range # noqa: B950
+ if (faces_verts_uvs > 1).any() or (faces_verts_uvs < 0).any():
+ msg = "Texture UV coordinates outside the range [0, 1]. \
+ The integer part will be ignored to form a repeating pattern."
+ warnings.warn(msg)
+ faces_verts_uvs = faces_verts_uvs % 1
+ elif texture_wrap == "clamp":
+ # Clamp uv coordinates to the [0, 1] range.
+ faces_verts_uvs = faces_verts_uvs.clamp(0.0, 1.0)
+
+ # Iterate through the materials used in this mesh. Update the
+ # texture atlas for the faces which use this material.
+ # Faces without texture are white.
+ for material_name, image in list(texture_images.items()):
+ # Only use the RGB colors
+ if image.shape[2] == 4:
+ image = image[:, :, :3]
+
+ # Reverse the image y direction
+ image = torch.flip(image, [0]).type_as(faces_verts_uvs)
+
+ # Bool to indicate which faces use this texture map.
+ faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
+ faces_verts_uvs.device
+ )
+
+ # Find the subset of faces which use this texture with this texture image
+ uvs_subset = faces_verts_uvs[faces_material_ind, :, :]
+
+ # Update the texture atlas for the faces which use this texture.
+ # TODO: should the texture map values be multiplied
+ # by the diffuse material color (i.e. use *= as the atlas has
+ # been initialized to the diffuse color)?. This is
+ # not being done in SoftRas.
+ atlas[faces_material_ind, :, :] = make_material_atlas(image, uvs_subset, R)
+
+ return atlas
+
+
+def make_material_atlas(
+ image: torch.Tensor, faces_verts_uvs: torch.Tensor, texture_size: int
+) -> torch.Tensor:
+ r"""
+ Given a single texture image and the uv coordinates for all the
+ face vertices, create a square texture map per face using
+ the formulation from [1].
+
+ For a triangle with vertices (v0, v1, v2) we can create a barycentric coordinate system
+ with the x axis being the vector (v0 - v2) and the y axis being the vector (v1 - v2).
+ The barycentric coordinates range from [0, 1] in the +x and +y direction so this creates
+ a triangular texture space with vertices at (0, 1), (0, 0) and (1, 0).
+
+ The per face texture map is of shape (texture_size, texture_size, 3)
+ which is a square. To map a triangular texture to a square grid, each
+ triangle is parametrized as follows (e.g. R = texture_size = 3):
+
+ The triangle texture is first divided into RxR = 9 subtriangles which each
+ map to one grid cell. The numbers in the grid cells and triangles show the mapping.
+
+ ..code-block::python
+
+ Triangular Texture Space:
+
+ 1
+ |\
+ |6 \
+ |____\
+ |\ 7 |\
+ |3 \ |4 \
+ |____\|____\
+ |\ 8 |\ 5 |\
+ |0 \ |1 \ |2 \
+ |____\|____\|____\
+ 0 1
+
+ Square per face texture map:
+
+ R ____________________
+ | | | |
+ | 6 | 7 | 8 |
+ |______|______|______|
+ | | | |
+ | 3 | 4 | 5 |
+ |______|______|______|
+ | | | |
+ | 0 | 1 | 2 |
+ |______|______|______|
+ 0 R
+
+
+ The barycentric coordinates of each grid cell are calculated using the
+ xy coordinates:
+
+ ..code-block::python
+
+ The cartesian coordinates are:
+
+ Grid 1:
+
+ R ____________________
+ | | | |
+ | 20 | 21 | 22 |
+ |______|______|______|
+ | | | |
+ | 10 | 11 | 12 |
+ |______|______|______|
+ | | | |
+ | 00 | 01 | 02 |
+ |______|______|______|
+ 0 R
+
+ where 02 means y = 0, x = 2
+
+ Now consider this subset of the triangle which corresponds to
+ grid cells 0 and 8:
+
+ ..code-block::python
+
+ 1/R ________
+ |\ 8 |
+ | \ |
+ | 0 \ |
+ |_______\|
+ 0 1/R
+
+ The centroids of the triangles are:
+ 0: (1/3, 1/3) * 1/R
+ 8: (2/3, 2/3) * 1/R
+
+ For each grid cell we can now calculate the centroid `(c_y, c_x)`
+ of the corresponding texture triangle:
+ - if `(x + y) < R`, then offset the centroid of
+ triangle 0 by `(y, x) * (1/R)`
+ - if `(x + y) > R`, then offset the centroid of
+ triangle 8 by `((R-1-y), (R-1-x)) * (1/R)`.
+
+ This is equivalent to updating the portion of Grid 1
+ above the diagonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
+
+ ..code-block::python
+
+ R _____________________
+ | | | |
+ | 20 | 01 | 00 |
+ |______|______|______|
+ | | | |
+ | 10 | 11 | 10 |
+ |______|______|______|
+ | | | |
+ | 00 | 01 | 02 |
+ |______|______|______|
+ 0 R
+
+ The barycentric coordinates (w0, w1, w2) are then given by:
+
+ ..code-block::python
+
+ w0 = c_x
+ w1 = c_y
+ w2 = 1- w0 - w1
+
+ Args:
+ image: FloatTensor of shape (H, W, 3)
+ faces_verts_uvs: uv coordinates for each vertex in each face (F, 3, 2)
+ texture_size: int
+
+ Returns:
+ atlas: a FloatTensor of shape (F, texture_size, texture_size, 3) giving a
+ per face texture map.
+
+ [1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
+ 3D Reasoning', ICCV 2019
+ """
+ R = texture_size
+ device = faces_verts_uvs.device
+ rng = torch.arange(R, device=device)
+
+ # Meshgrid returns (row, column) i.e (Y, X)
+ # Change order to (X, Y) to make the grid.
+ Y, X = meshgrid_ij(rng, rng)
+ # pyre-fixme[28]: Unexpected keyword argument `axis`.
+ grid = torch.stack([X, Y], axis=-1) # (R, R, 2)
+
+ # Grid cells below the diagonal: x + y < R.
+ below_diag = grid.sum(-1) < R
+
+ # map a [0, R] grid -> to a [0, 1] barycentric coordinates of
+ # the texture triangle centroids.
+ bary = torch.zeros((R, R, 3), device=device) # (R, R, 3)
+ slc = torch.arange(2, device=device)[:, None]
+ # w0, w1
+ bary[below_diag, slc] = ((grid[below_diag] + 1.0 / 3.0) / R).T
+ # w0, w1 for above diagonal grid cells.
+ bary[~below_diag, slc] = (((R - 1.0 - grid[~below_diag]) + 2.0 / 3.0) / R).T
+ # w2 = 1. - w0 - w1
+ bary[..., -1] = 1 - bary[..., :2].sum(dim=-1)
+
+ # Calculate the uv position in the image for each pixel
+ # in the per face texture map
+ # (F, 1, 1, 3, 2) * (R, R, 3, 1) -> (F, R, R, 3, 2) -> (F, R, R, 2)
+ uv_pos = (faces_verts_uvs[:, None, None] * bary[..., None]).sum(-2)
+
+ # bi-linearly interpolate the textures from the images
+ # using the uv coordinates given by uv_pos.
+ textures = _bilinear_interpolation_grid_sample(image, uv_pos)
+
+ return textures
+
+
+def _bilinear_interpolation_vectorized(
+ image: torch.Tensor, grid: torch.Tensor
+) -> torch.Tensor:
+ """
+ Bi linearly interpolate the image using the uv positions in the flow-field
+ grid (following the naming conventions for torch.nn.functional.grid_sample).
+
+ This implementation uses the same steps as in the SoftRasterizer CUDA kernel
+ for loading textures. We are keeping it for reference to make it easy to
+ compare if required.
+
+ However it doesn't properly handle the out of bound values in the same way as
+ the grid_sample function does with the padding_mode argument.
+ This vectorized version requires less memory than
+ _bilinear_interpolation_grid_sample but is slightly slower.
+
+ Args:
+ image: FloatTensor of shape (H, W, D) a single image/input tensor with D
+ channels.
+ grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
+ points at which to sample a value in the image. The grid values must
+ be in the range [0, 1]. u is the x direction and v is the y direction.
+
+ Returns:
+ out: FloatTensor of shape (N, H, W, D) giving the interpolated
+ D dimensional value from image at each of the pixel locations in grid.
+
+ """
+ H, W, _ = image.shape
+ # Convert [0, 1] to the range [0, W-1] and [0, H-1]
+ grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid)
+ weight_1 = grid - grid.int()
+ weight_0 = 1.0 - weight_1
+
+ grid_x, grid_y = grid.unbind(-1)
+ y0 = grid_y.to(torch.int64)
+ y1 = (grid_y + 1).to(torch.int64)
+ x0 = grid_x.to(torch.int64)
+ x1 = x0 + 1
+
+ weight_x0, weight_y0 = weight_0.unbind(-1)
+ weight_x1, weight_y1 = weight_1.unbind(-1)
+
+ # Bi-linear interpolation
+ # griditions = [[y, x], [(y+1), x]
+ # [y, (x+1)], [(y+1), (x+1)]]
+ # weights = [[wx0*wy0, wx0*wy1],
+ # [wx1*wy0, wx1*wy1]]
+ out = (
+ image[y0, x0] * (weight_x0 * weight_y0)[..., None]
+ + image[y1, x0] * (weight_x0 * weight_y1)[..., None]
+ + image[y0, x1] * (weight_x1 * weight_y0)[..., None]
+ + image[y1, x1] * (weight_x1 * weight_y1)[..., None]
+ )
+
+ return out
+
+
+def _bilinear_interpolation_grid_sample(
+ image: torch.Tensor, grid: torch.Tensor
+) -> torch.Tensor:
+ """
+ Bi linearly interpolate the image using the uv positions in the flow-field
+ grid (following the conventions for torch.nn.functional.grid_sample).
+
+ This implementation is faster than _bilinear_interpolation_vectorized but
+ requires more memory so can cause OOMs. If speed is an issue try this function
+ instead.
+
+ Args:
+ image: FloatTensor of shape (H, W, D) a single image/input tensor with D
+ channels.
+ grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
+ points at which to sample a value in the image. The grid values must
+ be in the range [0, 1]. u is the x direction and v is the y direction.
+
+ Returns:
+ out: FloatTensor of shape (N, H, W, D) giving the interpolated
+ D dimensional value from image at each of the pixel locations in grid.
+ """
+
+ N = grid.shape[0]
+ # convert [0, 1] to the range [-1, 1] expected by grid_sample.
+ grid = grid * 2.0 - 1.0
+ image = image.permute(2, 0, 1)[None, ...].expand(N, -1, -1, -1) # (N, 3, H, W)
+ # Align_corners has to be set to True to match the output of the SoftRas
+ # cuda kernel for bilinear sampling.
+ out = F.grid_sample(image, grid, mode="bilinear", align_corners=True)
+ return out.permute(0, 2, 3, 1)
+
+
+MaterialProperties = Dict[str, Dict[str, torch.Tensor]]
+TextureFiles = Dict[str, str]
+TextureImages = Dict[str, torch.Tensor]
+
+
+def _parse_mtl(
+ f: str, path_manager: PathManager, device: Device = "cpu"
+) -> Tuple[MaterialProperties, TextureFiles]:
+ material_properties = {}
+ texture_files = {}
+ material_name = ""
+
+ # pyre-fixme[9]: f has type `str`; used as `IO[typing.Any]`.
+ with _open_file(f, path_manager, "r") as f:
+ for line in f:
+ tokens = line.strip().split()
+ if not tokens:
+ continue
+ if tokens[0] == "newmtl":
+ material_name = tokens[1]
+ material_properties[material_name] = {}
+ elif tokens[0] == "map_Kd":
+ # Diffuse texture map
+ # Account for the case where filenames might have spaces
+ filename = line.strip()[7:]
+ texture_files[material_name] = filename
+ elif tokens[0] == "Kd":
+ # RGB diffuse reflectivity
+ kd = np.array(tokens[1:4]).astype(np.float32)
+ kd = torch.from_numpy(kd).to(device)
+ material_properties[material_name]["diffuse_color"] = kd
+ elif tokens[0] == "Ka":
+ # RGB ambient reflectivity
+ ka = np.array(tokens[1:4]).astype(np.float32)
+ ka = torch.from_numpy(ka).to(device)
+ material_properties[material_name]["ambient_color"] = ka
+ elif tokens[0] == "Ks":
+ # RGB specular reflectivity
+ ks = np.array(tokens[1:4]).astype(np.float32)
+ ks = torch.from_numpy(ks).to(device)
+ material_properties[material_name]["specular_color"] = ks
+ elif tokens[0] == "Ns":
+ # Specular exponent
+ ns = np.array(tokens[1:4]).astype(np.float32)
+ ns = torch.from_numpy(ns).to(device)
+ material_properties[material_name]["shininess"] = ns
+
+ return material_properties, texture_files
+
+
+def _load_texture_images(
+ material_names: List[str],
+ data_dir: str,
+ material_properties: MaterialProperties,
+ texture_files: TextureFiles,
+ path_manager: PathManager,
+) -> Tuple[MaterialProperties, TextureImages]:
+ final_material_properties = {}
+ texture_images = {}
+
+ used_material_names = list(material_names)
+ if not used_material_names and material_properties:
+ if len(material_properties) > 1:
+ raise ValueError(
+ "Multiple materials but no usemtl declarations in the obj file"
+ )
+ # No materials were specified in obj file and only one is in the
+ # specified .mtl file, so we use it.
+ used_material_names.append(next(iter(material_properties.keys())))
+
+ # Only keep the materials referenced in the obj.
+ for material_name in used_material_names:
+ if material_name in texture_files:
+ # Load the texture image.
+ path = os.path.join(data_dir, texture_files[material_name])
+ if path_manager.exists(path):
+ image = (
+ _read_image(path, path_manager=path_manager, format="RGB") / 255.0
+ )
+ image = torch.from_numpy(image)
+ texture_images[material_name] = image
+ else:
+ msg = f"Texture file does not exist: {path}"
+ warnings.warn(msg)
+
+ if material_name in material_properties:
+ final_material_properties[material_name] = material_properties[
+ material_name
+ ]
+
+ return final_material_properties, texture_images
+
+
+def load_mtl(
+ f: str,
+ *,
+ material_names: List[str],
+ data_dir: str,
+ device: Device = "cpu",
+ path_manager: PathManager,
+) -> Tuple[MaterialProperties, TextureImages]:
+ """
+ Load texture images and material reflectivity values for ambient, diffuse
+ and specular light (Ka, Kd, Ks, Ns).
+
+ Args:
+ f: path to the material information.
+ material_names: a list of the material names found in the .obj file.
+ data_dir: the directory where the material texture files are located.
+ device: Device (as str or torch.tensor) on which to return the new tensors.
+ path_manager: PathManager for interpreting both f and material_names.
+
+ Returns:
+ material_properties: dict of properties for each material. If a material
+ does not have any properties it will have an empty dict.
+ {
+ material_name_1: {
+ "ambient_color": tensor of shape (1, 3),
+ "diffuse_color": tensor of shape (1, 3),
+ "specular_color": tensor of shape (1, 3),
+ "shininess": tensor of shape (1)
+ },
+ material_name_2: {},
+ ...
+ }
+ texture_images: dict of material names and texture images
+ {
+ material_name_1: (H, W, 3) image,
+ ...
+ }
+ """
+ material_properties, texture_files = _parse_mtl(f, path_manager, device)
+ return _load_texture_images(
+ material_names,
+ data_dir,
+ material_properties,
+ texture_files,
+ path_manager=path_manager,
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/obj_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/obj_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cc29794bcc3f400367a17b50f883427aced01a6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/obj_io.py
@@ -0,0 +1,943 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+"""This module implements utility functions for loading and saving meshes."""
+import os
+import warnings
+from collections import namedtuple
+from pathlib import Path
+from typing import List, Optional
+
+import numpy as np
+import torch
+from iopath.common.file_io import PathManager
+from PIL import Image
+from pytorch3d.common.datatypes import Device
+from pytorch3d.io.mtl_io import load_mtl, make_mesh_texture_atlas
+from pytorch3d.io.utils import _check_faces_indices, _make_tensor, _open_file, PathOrStr
+from pytorch3d.renderer import TexturesAtlas, TexturesUV
+from pytorch3d.structures import join_meshes_as_batch, Meshes
+
+from .pluggable_formats import endswith, MeshFormatInterpreter
+
+
+# Faces & Aux type returned from load_obj function.
+_Faces = namedtuple("Faces", "verts_idx normals_idx textures_idx materials_idx")
+_Aux = namedtuple(
+ "Properties", "normals verts_uvs material_colors texture_images texture_atlas"
+)
+
+
+def _format_faces_indices(faces_indices, max_index: int, device, pad_value=None):
+ """
+ Format indices and check for invalid values. Indices can refer to
+ values in one of the face properties: vertices, textures or normals.
+ See comments of the load_obj function for more details.
+
+ Args:
+ faces_indices: List of ints of indices.
+ max_index: Max index for the face property.
+ pad_value: if any of the face_indices are padded, specify
+ the value of the padding (e.g. -1). This is only used
+ for texture indices indices where there might
+ not be texture information for all the faces.
+
+ Returns:
+ faces_indices: List of ints of indices.
+
+ Raises:
+ ValueError if indices are not in a valid range.
+ """
+ faces_indices = _make_tensor(
+ faces_indices, cols=3, dtype=torch.int64, device=device
+ )
+
+ if pad_value is not None:
+ mask = faces_indices.eq(pad_value).all(dim=-1)
+
+ # Change to 0 based indexing.
+ faces_indices[(faces_indices > 0)] -= 1
+
+ # Negative indexing counts from the end.
+ faces_indices[(faces_indices < 0)] += max_index
+
+ if pad_value is not None:
+ # pyre-fixme[61]: `mask` is undefined, or not always defined.
+ faces_indices[mask] = pad_value
+
+ return _check_faces_indices(faces_indices, max_index, pad_value)
+
+
+def load_obj(
+ f,
+ load_textures: bool = True,
+ create_texture_atlas: bool = False,
+ texture_atlas_size: int = 4,
+ texture_wrap: Optional[str] = "repeat",
+ device: Device = "cpu",
+ path_manager: Optional[PathManager] = None,
+):
+ """
+ Load a mesh from a .obj file and optionally textures from a .mtl file.
+ Currently this handles verts, faces, vertex texture uv coordinates, normals,
+ texture images and material reflectivity values.
+
+ Note .obj files are 1-indexed. The tensors returned from this function
+ are 0-indexed. OBJ spec reference: http://www.martinreddy.net/gfx/3d/OBJ.spec
+
+ Example .obj file format:
+ ::
+ # this is a comment
+ v 1.000000 -1.000000 -1.000000
+ v 1.000000 -1.000000 1.000000
+ v -1.000000 -1.000000 1.000000
+ v -1.000000 -1.000000 -1.000000
+ v 1.000000 1.000000 -1.000000
+ vt 0.748573 0.750412
+ vt 0.749279 0.501284
+ vt 0.999110 0.501077
+ vt 0.999455 0.750380
+ vn 0.000000 0.000000 -1.000000
+ vn -1.000000 -0.000000 -0.000000
+ vn -0.000000 -0.000000 1.000000
+ f 5/2/1 1/2/1 4/3/1
+ f 5/1/1 4/3/1 2/4/1
+
+ The first character of the line denotes the type of input:
+ ::
+ - v is a vertex
+ - vt is the texture coordinate of one vertex
+ - vn is the normal of one vertex
+ - f is a face
+
+ Faces are interpreted as follows:
+ ::
+ 5/2/1 describes the first vertex of the first triangle
+ - 5: index of vertex [1.000000 1.000000 -1.000000]
+ - 2: index of texture coordinate [0.749279 0.501284]
+ - 1: index of normal [0.000000 0.000000 -1.000000]
+
+ If there are faces with more than 3 vertices
+ they are subdivided into triangles. Polygonal faces are assumed to have
+ vertices ordered counter-clockwise so the (right-handed) normal points
+ out of the screen e.g. a proper rectangular face would be specified like this:
+ ::
+ 0_________1
+ | |
+ | |
+ 3 ________2
+
+ The face would be split into two triangles: (0, 2, 1) and (0, 3, 2),
+ both of which are also oriented counter-clockwise and have normals
+ pointing out of the screen.
+
+ Args:
+ f: A file-like object (with methods read, readline, tell, and seek),
+ a pathlib path or a string containing a file name.
+ load_textures: Boolean indicating whether material files are loaded
+ create_texture_atlas: Bool, If True a per face texture map is created and
+ a tensor `texture_atlas` is also returned in `aux`.
+ texture_atlas_size: Int specifying the resolution of the texture map per face
+ when `create_texture_atlas=True`. A (texture_size, texture_size, 3)
+ map is created per face.
+ texture_wrap: string, one of ["repeat", "clamp"]. This applies when computing
+ the texture atlas.
+ If `texture_mode="repeat"`, for uv values outside the range [0, 1] the integer part
+ is ignored and a repeating pattern is formed.
+ If `texture_mode="clamp"` the values are clamped to the range [0, 1].
+ If None, then there is no transformation of the texture values.
+ device: Device (as str or torch.device) on which to return the new tensors.
+ path_manager: optionally a PathManager object to interpret paths.
+
+ Returns:
+ 6-element tuple containing
+
+ - **verts**: FloatTensor of shape (V, 3).
+ - **faces**: NamedTuple with fields:
+ - verts_idx: LongTensor of vertex indices, shape (F, 3).
+ - normals_idx: (optional) LongTensor of normal indices, shape (F, 3).
+ - textures_idx: (optional) LongTensor of texture indices, shape (F, 3).
+ This can be used to index into verts_uvs.
+ - materials_idx: (optional) List of indices indicating which
+ material the texture is derived from for each face.
+ If there is no material for a face, the index is -1.
+ This can be used to retrieve the corresponding values
+ in material_colors/texture_images after they have been
+ converted to tensors or Materials/Textures data
+ structures - see textures.py and materials.py for
+ more info.
+ - **aux**: NamedTuple with fields:
+ - normals: FloatTensor of shape (N, 3)
+ - verts_uvs: FloatTensor of shape (T, 2), giving the uv coordinate per
+ vertex. If a vertex is shared between two faces, it can have
+ a different uv value for each instance. Therefore it is
+ possible that the number of verts_uvs is greater than
+ num verts i.e. T > V.
+ vertex.
+ - material_colors: if `load_textures=True` and the material has associated
+ properties this will be a dict of material names and properties of the form:
+
+ .. code-block:: python
+
+ {
+ material_name_1: {
+ "ambient_color": tensor of shape (1, 3),
+ "diffuse_color": tensor of shape (1, 3),
+ "specular_color": tensor of shape (1, 3),
+ "shininess": tensor of shape (1)
+ },
+ material_name_2: {},
+ ...
+ }
+
+ If a material does not have any properties it will have an
+ empty dict. If `load_textures=False`, `material_colors` will None.
+
+ - texture_images: if `load_textures=True` and the material has a texture map,
+ this will be a dict of the form:
+
+ .. code-block:: python
+
+ {
+ material_name_1: (H, W, 3) image,
+ ...
+ }
+ If `load_textures=False`, `texture_images` will None.
+ - texture_atlas: if `load_textures=True` and `create_texture_atlas=True`,
+ this will be a FloatTensor of the form: (F, texture_size, textures_size, 3)
+ If the material does not have a texture map, then all faces
+ will have a uniform white texture. Otherwise `texture_atlas` will be
+ None.
+ """
+ data_dir = "./"
+ if isinstance(f, (str, bytes, Path)):
+ # pyre-fixme[6]: For 1st argument expected `PathLike[Variable[AnyStr <:
+ # [str, bytes]]]` but got `Union[Path, bytes, str]`.
+ data_dir = os.path.dirname(f)
+ if path_manager is None:
+ path_manager = PathManager()
+ with _open_file(f, path_manager, "r") as f:
+ return _load_obj(
+ f,
+ data_dir=data_dir,
+ load_textures=load_textures,
+ create_texture_atlas=create_texture_atlas,
+ texture_atlas_size=texture_atlas_size,
+ texture_wrap=texture_wrap,
+ path_manager=path_manager,
+ device=device,
+ )
+
+
+def load_objs_as_meshes(
+ files: list,
+ device: Optional[Device] = None,
+ load_textures: bool = True,
+ create_texture_atlas: bool = False,
+ texture_atlas_size: int = 4,
+ texture_wrap: Optional[str] = "repeat",
+ path_manager: Optional[PathManager] = None,
+):
+ """
+ Load meshes from a list of .obj files using the load_obj function, and
+ return them as a Meshes object. This only works for meshes which have a
+ single texture image for the whole mesh. See the load_obj function for more
+ details. material_colors and normals are not stored.
+
+ Args:
+ files: A list of file-like objects (with methods read, readline, tell,
+ and seek), pathlib paths or strings containing file names.
+ device: Desired device of returned Meshes. Default:
+ uses the current device for the default tensor type.
+ load_textures: Boolean indicating whether material files are loaded
+ create_texture_atlas, texture_atlas_size, texture_wrap: as for load_obj.
+ path_manager: optionally a PathManager object to interpret paths.
+
+ Returns:
+ New Meshes object.
+ """
+ mesh_list = []
+ for f_obj in files:
+ verts, faces, aux = load_obj(
+ f_obj,
+ load_textures=load_textures,
+ create_texture_atlas=create_texture_atlas,
+ texture_atlas_size=texture_atlas_size,
+ texture_wrap=texture_wrap,
+ path_manager=path_manager,
+ )
+ tex = None
+ if create_texture_atlas:
+ # TexturesAtlas type
+ tex = TexturesAtlas(atlas=[aux.texture_atlas.to(device)])
+ else:
+ # TexturesUV type
+ tex_maps = aux.texture_images
+ if tex_maps is not None and len(tex_maps) > 0:
+ verts_uvs = aux.verts_uvs.to(device) # (V, 2)
+ faces_uvs = faces.textures_idx.to(device) # (F, 3)
+ image = list(tex_maps.values())[0].to(device)[None]
+ tex = TexturesUV(
+ verts_uvs=[verts_uvs], faces_uvs=[faces_uvs], maps=image
+ )
+
+ mesh = Meshes(
+ verts=[verts.to(device)], faces=[faces.verts_idx.to(device)], textures=tex
+ )
+ mesh_list.append(mesh)
+ if len(mesh_list) == 1:
+ return mesh_list[0]
+ return join_meshes_as_batch(mesh_list)
+
+
+class MeshObjFormat(MeshFormatInterpreter):
+ def __init__(self) -> None:
+ self.known_suffixes = (".obj",)
+
+ def read(
+ self,
+ path: PathOrStr,
+ include_textures: bool,
+ device: Device,
+ path_manager: PathManager,
+ create_texture_atlas: bool = False,
+ texture_atlas_size: int = 4,
+ texture_wrap: Optional[str] = "repeat",
+ **kwargs,
+ ) -> Optional[Meshes]:
+ if not endswith(path, self.known_suffixes):
+ return None
+ mesh = load_objs_as_meshes(
+ files=[path],
+ device=device,
+ load_textures=include_textures,
+ create_texture_atlas=create_texture_atlas,
+ texture_atlas_size=texture_atlas_size,
+ texture_wrap=texture_wrap,
+ path_manager=path_manager,
+ )
+ return mesh
+
+ def save(
+ self,
+ data: Meshes,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ decimal_places: Optional[int] = None,
+ **kwargs,
+ ) -> bool:
+ if not endswith(path, self.known_suffixes):
+ return False
+
+ verts = data.verts_list()[0]
+ faces = data.faces_list()[0]
+
+ verts_uvs: Optional[torch.Tensor] = None
+ faces_uvs: Optional[torch.Tensor] = None
+ texture_map: Optional[torch.Tensor] = None
+
+ if isinstance(data.textures, TexturesUV):
+ verts_uvs = data.textures.verts_uvs_padded()[0]
+ faces_uvs = data.textures.faces_uvs_padded()[0]
+ texture_map = data.textures.maps_padded()[0]
+
+ save_obj(
+ f=path,
+ verts=verts,
+ faces=faces,
+ decimal_places=decimal_places,
+ path_manager=path_manager,
+ verts_uvs=verts_uvs,
+ faces_uvs=faces_uvs,
+ texture_map=texture_map,
+ )
+ return True
+
+
+def _parse_face(
+ line,
+ tokens,
+ material_idx,
+ faces_verts_idx,
+ faces_normals_idx,
+ faces_textures_idx,
+ faces_materials_idx,
+) -> None:
+ face = tokens[1:]
+ face_list = [f.split("/") for f in face]
+ face_verts = []
+ face_normals = []
+ face_textures = []
+
+ for vert_props in face_list:
+ # Vertex index.
+ face_verts.append(int(vert_props[0]))
+ if len(vert_props) > 1:
+ if vert_props[1] != "":
+ # Texture index is present e.g. f 4/1/1.
+ face_textures.append(int(vert_props[1]))
+ if len(vert_props) > 2:
+ # Normal index present e.g. 4/1/1 or 4//1.
+ face_normals.append(int(vert_props[2]))
+ if len(vert_props) > 3:
+ raise ValueError(
+ "Face vertices can only have 3 properties. \
+ Face vert %s, Line: %s"
+ % (str(vert_props), str(line))
+ )
+
+ # Triplets must be consistent for all vertices in a face e.g.
+ # legal statement: f 4/1/1 3/2/1 2/1/1.
+ # illegal statement: f 4/1/1 3//1 2//1.
+ # If the face does not have normals or textures indices
+ # fill with pad value = -1. This will ensure that
+ # all the face index tensors will have F values where
+ # F is the number of faces.
+ if len(face_normals) > 0:
+ if not (len(face_verts) == len(face_normals)):
+ raise ValueError(
+ "Face %s is an illegal statement. \
+ Vertex properties are inconsistent. Line: %s"
+ % (str(face), str(line))
+ )
+ else:
+ face_normals = [-1] * len(face_verts) # Fill with -1
+ if len(face_textures) > 0:
+ if not (len(face_verts) == len(face_textures)):
+ raise ValueError(
+ "Face %s is an illegal statement. \
+ Vertex properties are inconsistent. Line: %s"
+ % (str(face), str(line))
+ )
+ else:
+ face_textures = [-1] * len(face_verts) # Fill with -1
+
+ # Subdivide faces with more than 3 vertices.
+ # See comments of the load_obj function for more details.
+ for i in range(len(face_verts) - 2):
+ faces_verts_idx.append((face_verts[0], face_verts[i + 1], face_verts[i + 2]))
+ faces_normals_idx.append(
+ (face_normals[0], face_normals[i + 1], face_normals[i + 2])
+ )
+ faces_textures_idx.append(
+ (face_textures[0], face_textures[i + 1], face_textures[i + 2])
+ )
+ faces_materials_idx.append(material_idx)
+
+
+def _parse_obj(f, data_dir: str):
+ """
+ Load a mesh from a file-like object. See load_obj function for more details
+ about the return values.
+ """
+ verts, normals, verts_uvs = [], [], []
+ faces_verts_idx, faces_normals_idx, faces_textures_idx = [], [], []
+ faces_materials_idx = []
+ material_names = []
+ mtl_path = None
+
+ lines = [line.strip() for line in f]
+
+ # startswith expects each line to be a string. If the file is read in as
+ # bytes then first decode to strings.
+ if lines and isinstance(lines[0], bytes):
+ lines = [el.decode("utf-8") for el in lines]
+
+ materials_idx = -1
+
+ for line in lines:
+ tokens = line.strip().split()
+ if line.startswith("mtllib"):
+ if len(tokens) < 2:
+ raise ValueError("material file name is not specified")
+ # NOTE: only allow one .mtl file per .obj.
+ # Definitions for multiple materials can be included
+ # in this one .mtl file.
+ mtl_path = line[len(tokens[0]) :].strip() # Take the remainder of the line
+ mtl_path = os.path.join(data_dir, mtl_path)
+ elif len(tokens) and tokens[0] == "usemtl":
+ material_name = tokens[1]
+ # materials are often repeated for different parts
+ # of a mesh.
+ if material_name not in material_names:
+ material_names.append(material_name)
+ materials_idx = len(material_names) - 1
+ else:
+ materials_idx = material_names.index(material_name)
+ elif line.startswith("v "): # Line is a vertex.
+ vert = [float(x) for x in tokens[1:4]]
+ if len(vert) != 3:
+ msg = "Vertex %s does not have 3 values. Line: %s"
+ raise ValueError(msg % (str(vert), str(line)))
+ verts.append(vert)
+ elif line.startswith("vt "): # Line is a texture.
+ tx = [float(x) for x in tokens[1:3]]
+ if len(tx) != 2:
+ raise ValueError(
+ "Texture %s does not have 2 values. Line: %s" % (str(tx), str(line))
+ )
+ verts_uvs.append(tx)
+ elif line.startswith("vn "): # Line is a normal.
+ norm = [float(x) for x in tokens[1:4]]
+ if len(norm) != 3:
+ msg = "Normal %s does not have 3 values. Line: %s"
+ raise ValueError(msg % (str(norm), str(line)))
+ normals.append(norm)
+ elif line.startswith("f "): # Line is a face.
+ # Update face properties info.
+ _parse_face(
+ line,
+ tokens,
+ materials_idx,
+ faces_verts_idx,
+ faces_normals_idx,
+ faces_textures_idx,
+ faces_materials_idx,
+ )
+
+ return (
+ verts,
+ normals,
+ verts_uvs,
+ faces_verts_idx,
+ faces_normals_idx,
+ faces_textures_idx,
+ faces_materials_idx,
+ material_names,
+ mtl_path,
+ )
+
+
+def _load_materials(
+ material_names: List[str],
+ f: Optional[str],
+ *,
+ data_dir: str,
+ load_textures: bool,
+ device: Device,
+ path_manager: PathManager,
+):
+ """
+ Load materials and optionally textures from the specified path.
+
+ Args:
+ material_names: a list of the material names found in the .obj file.
+ f: path to the material information.
+ data_dir: the directory where the material texture files are located.
+ load_textures: whether textures should be loaded.
+ device: Device (as str or torch.device) on which to return the new tensors.
+ path_manager: PathManager object to interpret paths.
+
+ Returns:
+ material_colors: dict of properties for each material.
+ texture_images: dict of material names and texture images.
+ """
+ if not load_textures:
+ return None, None
+
+ if f is None:
+ warnings.warn("No mtl file provided")
+ return None, None
+
+ if not path_manager.exists(f):
+ warnings.warn(f"Mtl file does not exist: {f}")
+ return None, None
+
+ # Texture mode uv wrap
+ return load_mtl(
+ f,
+ material_names=material_names,
+ data_dir=data_dir,
+ path_manager=path_manager,
+ device=device,
+ )
+
+
+def _load_obj(
+ f_obj,
+ *,
+ data_dir: str,
+ load_textures: bool = True,
+ create_texture_atlas: bool = False,
+ texture_atlas_size: int = 4,
+ texture_wrap: Optional[str] = "repeat",
+ path_manager: PathManager,
+ device: Device = "cpu",
+):
+ """
+ Load a mesh from a file-like object. See load_obj function more details.
+ Any material files associated with the obj are expected to be in the
+ directory given by data_dir.
+ """
+
+ if texture_wrap is not None and texture_wrap not in ["repeat", "clamp"]:
+ msg = "texture_wrap must be one of ['repeat', 'clamp'] or None, got %s"
+ raise ValueError(msg % texture_wrap)
+
+ (
+ verts,
+ normals,
+ verts_uvs,
+ faces_verts_idx,
+ faces_normals_idx,
+ faces_textures_idx,
+ faces_materials_idx,
+ material_names,
+ mtl_path,
+ ) = _parse_obj(f_obj, data_dir)
+
+ verts = _make_tensor(verts, cols=3, dtype=torch.float32, device=device) # (V, 3)
+ normals = _make_tensor(
+ normals,
+ cols=3,
+ dtype=torch.float32,
+ device=device,
+ ) # (N, 3)
+ verts_uvs = _make_tensor(
+ verts_uvs,
+ cols=2,
+ dtype=torch.float32,
+ device=device,
+ ) # (T, 2)
+
+ faces_verts_idx = _format_faces_indices(
+ faces_verts_idx, verts.shape[0], device=device
+ )
+
+ # Repeat for normals and textures if present.
+ if len(faces_normals_idx):
+ faces_normals_idx = _format_faces_indices(
+ faces_normals_idx, normals.shape[0], device=device, pad_value=-1
+ )
+ if len(faces_textures_idx):
+ faces_textures_idx = _format_faces_indices(
+ faces_textures_idx, verts_uvs.shape[0], device=device, pad_value=-1
+ )
+ if len(faces_materials_idx):
+ faces_materials_idx = torch.tensor(
+ faces_materials_idx, dtype=torch.int64, device=device
+ )
+
+ texture_atlas = None
+ material_colors, texture_images = _load_materials(
+ material_names,
+ mtl_path,
+ data_dir=data_dir,
+ load_textures=load_textures,
+ path_manager=path_manager,
+ device=device,
+ )
+
+ if material_colors and not material_names:
+ # usemtl was not present but single material was present in the .mtl file
+ material_names.append(next(iter(material_colors.keys())))
+ # replace all -1 by 0 material idx
+ if torch.is_tensor(faces_materials_idx):
+ faces_materials_idx.clamp_(min=0)
+
+ if create_texture_atlas:
+ # Using the images and properties from the
+ # material file make a per face texture map.
+
+ # Create an array of strings of material names for each face.
+ # If faces_materials_idx == -1 then that face doesn't have a material.
+ idx = faces_materials_idx.cpu().numpy()
+ face_material_names = np.array([""] + material_names)[idx + 1] # (F,)
+
+ # Construct the atlas.
+ texture_atlas = make_mesh_texture_atlas(
+ material_colors,
+ texture_images,
+ face_material_names,
+ faces_textures_idx,
+ verts_uvs,
+ texture_atlas_size,
+ texture_wrap,
+ )
+
+ faces = _Faces(
+ verts_idx=faces_verts_idx,
+ normals_idx=faces_normals_idx,
+ textures_idx=faces_textures_idx,
+ materials_idx=faces_materials_idx,
+ )
+ aux = _Aux(
+ normals=normals if len(normals) else None,
+ verts_uvs=verts_uvs if len(verts_uvs) else None,
+ material_colors=material_colors,
+ texture_images=texture_images,
+ texture_atlas=texture_atlas,
+ )
+ return verts, faces, aux
+
+
+def save_obj(
+ f: PathOrStr,
+ verts,
+ faces,
+ decimal_places: Optional[int] = None,
+ path_manager: Optional[PathManager] = None,
+ *,
+ normals: Optional[torch.Tensor] = None,
+ faces_normals_idx: Optional[torch.Tensor] = None,
+ verts_uvs: Optional[torch.Tensor] = None,
+ faces_uvs: Optional[torch.Tensor] = None,
+ texture_map: Optional[torch.Tensor] = None,
+) -> None:
+ """
+ Save a mesh to an .obj file.
+
+ Args:
+ f: File (str or path) to which the mesh should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ faces: LongTensor of shape (F, 3) giving faces.
+ decimal_places: Number of decimal places for saving.
+ path_manager: Optional PathManager for interpreting f if
+ it is a str.
+ normals: FloatTensor of shape (V, 3) giving normals for faces_normals_idx
+ to index into.
+ faces_normals_idx: LongTensor of shape (F, 3) giving the index into
+ normals for each vertex in the face.
+ verts_uvs: FloatTensor of shape (V, 2) giving the uv coordinate per vertex.
+ faces_uvs: LongTensor of shape (F, 3) giving the index into verts_uvs for
+ each vertex in the face.
+ texture_map: FloatTensor of shape (H, W, 3) representing the texture map
+ for the mesh which will be saved as an image. The values are expected
+ to be in the range [0, 1],
+ """
+ if len(verts) and (verts.dim() != 2 or verts.size(1) != 3):
+ message = "'verts' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if len(faces) and (faces.dim() != 2 or faces.size(1) != 3):
+ message = "'faces' should either be empty or of shape (num_faces, 3)."
+ raise ValueError(message)
+
+ if (normals is None) != (faces_normals_idx is None):
+ message = "'normals' and 'faces_normals_idx' must both be None or neither."
+ raise ValueError(message)
+
+ if faces_normals_idx is not None and (
+ faces_normals_idx.dim() != 2 or faces_normals_idx.size(1) != 3
+ ):
+ message = (
+ "'faces_normals_idx' should either be empty or of shape (num_faces, 3)."
+ )
+ raise ValueError(message)
+
+ if normals is not None and (normals.dim() != 2 or normals.size(1) != 3):
+ message = "'normals' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if faces_uvs is not None and (faces_uvs.dim() != 2 or faces_uvs.size(1) != 3):
+ message = "'faces_uvs' should either be empty or of shape (num_faces, 3)."
+ raise ValueError(message)
+
+ if verts_uvs is not None and (verts_uvs.dim() != 2 or verts_uvs.size(1) != 2):
+ message = "'verts_uvs' should either be empty or of shape (num_verts, 2)."
+ raise ValueError(message)
+
+ if texture_map is not None and (texture_map.dim() != 3 or texture_map.size(2) != 3):
+ message = "'texture_map' should either be empty or of shape (H, W, 3)."
+ raise ValueError(message)
+
+ if path_manager is None:
+ path_manager = PathManager()
+
+ save_texture = all(t is not None for t in [faces_uvs, verts_uvs, texture_map])
+ output_path = Path(f)
+
+ # Save the .obj file
+ # pyre-fixme[9]: f has type `Union[Path, str]`; used as `IO[typing.Any]`.
+ with _open_file(f, path_manager, "w") as f:
+ if save_texture:
+ # Add the header required for the texture info to be loaded correctly
+ obj_header = "\nmtllib {0}.mtl\nusemtl mesh\n\n".format(output_path.stem)
+ # pyre-fixme[16]: Item `Path` of `Union[Path, str]` has no attribute
+ # `write`.
+ f.write(obj_header)
+ _save(
+ f,
+ verts,
+ faces,
+ decimal_places,
+ normals=normals,
+ faces_normals_idx=faces_normals_idx,
+ verts_uvs=verts_uvs,
+ faces_uvs=faces_uvs,
+ save_texture=save_texture,
+ save_normals=normals is not None,
+ )
+
+ # Save the .mtl and .png files associated with the texture
+ if save_texture:
+ image_path = output_path.with_suffix(".png")
+ mtl_path = output_path.with_suffix(".mtl")
+ if isinstance(f, str):
+ # Back to str for iopath interpretation.
+ image_path = str(image_path)
+ mtl_path = str(mtl_path)
+
+ # Save texture map to output folder
+ # pyre-fixme[16] # undefined attribute cpu
+ texture_map = texture_map.detach().cpu() * 255.0
+ image = Image.fromarray(texture_map.numpy().astype(np.uint8))
+ with _open_file(image_path, path_manager, "wb") as im_f:
+ image.save(im_f)
+
+ # Create .mtl file with the material name and texture map filename
+ # TODO: enable material properties to also be saved.
+ with _open_file(mtl_path, path_manager, "w") as f_mtl:
+ lines = f"newmtl mesh\n" f"map_Kd {output_path.stem}.png\n"
+ f_mtl.write(lines)
+
+
+# TODO (nikhilar) Speed up this function.
+def _save(
+ f,
+ verts,
+ faces,
+ decimal_places: Optional[int] = None,
+ *,
+ normals: Optional[torch.Tensor] = None,
+ faces_normals_idx: Optional[torch.Tensor] = None,
+ verts_uvs: Optional[torch.Tensor] = None,
+ faces_uvs: Optional[torch.Tensor] = None,
+ save_texture: bool = False,
+ save_normals: bool = False,
+) -> None:
+
+ if len(verts) and (verts.dim() != 2 or verts.size(1) != 3):
+ message = "'verts' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if len(faces) and (faces.dim() != 2 or faces.size(1) != 3):
+ message = "'faces' should either be empty or of shape (num_faces, 3)."
+ raise ValueError(message)
+
+ if not (len(verts) or len(faces)):
+ warnings.warn("Empty 'verts' and 'faces' arguments provided")
+ return
+
+ verts, faces = verts.cpu(), faces.cpu()
+
+ lines = ""
+
+ if decimal_places is None:
+ float_str = "%f"
+ else:
+ float_str = "%" + ".%df" % decimal_places
+
+ if len(verts):
+ V, D = verts.shape
+ for i in range(V):
+ vert = [float_str % verts[i, j] for j in range(D)]
+ lines += "v %s\n" % " ".join(vert)
+
+ if save_normals:
+ assert normals is not None
+ assert faces_normals_idx is not None
+ lines += _write_normals(normals, faces_normals_idx, float_str)
+
+ if save_texture:
+ assert faces_uvs is not None
+ assert verts_uvs is not None
+
+ if faces_uvs is not None and (faces_uvs.dim() != 2 or faces_uvs.size(1) != 3):
+ message = "'faces_uvs' should either be empty or of shape (num_faces, 3)."
+ raise ValueError(message)
+
+ if verts_uvs is not None and (verts_uvs.dim() != 2 or verts_uvs.size(1) != 2):
+ message = "'verts_uvs' should either be empty or of shape (num_verts, 2)."
+ raise ValueError(message)
+
+ verts_uvs, faces_uvs = verts_uvs.cpu(), faces_uvs.cpu()
+
+ # Save verts uvs after verts
+ if len(verts_uvs):
+ uV, uD = verts_uvs.shape
+ for i in range(uV):
+ uv = [float_str % verts_uvs[i, j] for j in range(uD)]
+ lines += "vt %s\n" % " ".join(uv)
+
+ f.write(lines)
+
+ if torch.any(faces >= verts.shape[0]) or torch.any(faces < 0):
+ warnings.warn("Faces have invalid indices")
+
+ if len(faces):
+ _write_faces(
+ f,
+ faces,
+ faces_uvs if save_texture else None,
+ faces_normals_idx if save_normals else None,
+ )
+
+
+def _write_normals(
+ normals: torch.Tensor, faces_normals_idx: torch.Tensor, float_str: str
+) -> str:
+ if faces_normals_idx.dim() != 2 or faces_normals_idx.size(1) != 3:
+ message = (
+ "'faces_normals_idx' should either be empty or of shape (num_faces, 3)."
+ )
+ raise ValueError(message)
+
+ if normals.dim() != 2 or normals.size(1) != 3:
+ message = "'normals' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ normals, faces_normals_idx = normals.cpu(), faces_normals_idx.cpu()
+
+ lines = []
+ V, D = normals.shape
+ for i in range(V):
+ normal = [float_str % normals[i, j] for j in range(D)]
+ lines.append("vn %s\n" % " ".join(normal))
+ return "".join(lines)
+
+
+def _write_faces(
+ f,
+ faces: torch.Tensor,
+ faces_uvs: Optional[torch.Tensor],
+ faces_normals_idx: Optional[torch.Tensor],
+) -> None:
+ F, P = faces.shape
+ for i in range(F):
+ if faces_normals_idx is not None:
+ if faces_uvs is not None:
+ # Format faces as {verts_idx}/{verts_uvs_idx}/{verts_normals_idx}
+ face = [
+ "%d/%d/%d"
+ % (
+ faces[i, j] + 1,
+ faces_uvs[i, j] + 1,
+ faces_normals_idx[i, j] + 1,
+ )
+ for j in range(P)
+ ]
+ else:
+ # Format faces as {verts_idx}//{verts_normals_idx}
+ face = [
+ "%d//%d" % (faces[i, j] + 1, faces_normals_idx[i, j] + 1)
+ for j in range(P)
+ ]
+ elif faces_uvs is not None:
+ # Format faces as {verts_idx}/{verts_uvs_idx}
+ face = ["%d/%d" % (faces[i, j] + 1, faces_uvs[i, j] + 1) for j in range(P)]
+ else:
+ face = ["%d" % (faces[i, j] + 1) for j in range(P)]
+
+ if i + 1 < F:
+ f.write("f %s\n" % " ".join(face))
+ else:
+ # No newline at the end of the file.
+ f.write("f %s" % " ".join(face))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/off_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/off_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..78b4390d493d0d16bff73655d566e5cbbc0c657f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/off_io.py
@@ -0,0 +1,496 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+"""
+This module implements utility functions for loading and saving
+meshes as .off files.
+
+This format is introduced, for example, at
+http://www.geomview.org/docs/html/OFF.html .
+"""
+import warnings
+from typing import cast, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from iopath.common.file_io import PathManager
+from pytorch3d.io.utils import _check_faces_indices, _open_file, PathOrStr
+from pytorch3d.renderer import TexturesAtlas, TexturesVertex
+from pytorch3d.structures import Meshes
+
+from .pluggable_formats import endswith, MeshFormatInterpreter
+
+
+def _is_line_empty(line: Union[str, bytes]) -> bool:
+ """
+ Returns whether line is not relevant in an OFF file.
+ """
+ line = line.strip()
+ return len(line) == 0 or line[:1] == b"#"
+
+
+def _count_next_line_periods(file) -> int:
+ """
+ Returns the number of . characters before any # on the next
+ meaningful line.
+ """
+ old_offset = file.tell()
+ line = file.readline()
+ while _is_line_empty(line):
+ line = file.readline()
+ if len(line) == 0:
+ raise ValueError("Premature end of file")
+
+ contents = line.split(b"#")[0]
+ count = contents.count(b".")
+ file.seek(old_offset)
+ return count
+
+
+def _read_faces_lump(
+ file, n_faces: int, n_colors: Optional[int]
+) -> Optional[Tuple[np.ndarray, int, Optional[np.ndarray]]]:
+ """
+ Parse n_faces faces and faces_colors from the file,
+ if they all have the same number of vertices.
+ This is used in two ways.
+ 1) To try to read all faces.
+ 2) To read faces one-by-one if that failed.
+
+ Args:
+ file: file-like object being read.
+ n_faces: The known number of faces yet to read.
+ n_colors: The number of colors if known already.
+
+ Returns:
+ - 2D numpy array of faces
+ - number of colors found
+ - 2D numpy array of face colors if found.
+ of None if there are faces with different numbers of vertices.
+ """
+ if n_faces == 0:
+ return np.array([[]]), 0, None
+ old_offset = file.tell()
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message=".* Empty input file.*", category=UserWarning
+ )
+ data = np.loadtxt(file, dtype=np.float32, ndmin=2, max_rows=n_faces)
+ except ValueError as e:
+ if n_faces > 1 and "Wrong number of columns" in e.args[0]:
+ file.seek(old_offset)
+ return None
+ raise ValueError("Not enough face data.") from None
+
+ if len(data) != n_faces:
+ raise ValueError("Not enough face data.")
+ face_size = int(data[0, 0])
+ if (data[:, 0] != face_size).any():
+ msg = "A line of face data did not have the specified length."
+ raise ValueError(msg)
+ if face_size < 3:
+ raise ValueError("Faces must have at least 3 vertices.")
+
+ n_colors_found = data.shape[1] - 1 - face_size
+ if n_colors is not None and n_colors_found != n_colors:
+ raise ValueError("Number of colors differs between faces.")
+ n_colors = n_colors_found
+ if n_colors not in [0, 3, 4]:
+ raise ValueError("Unexpected number of colors.")
+
+ face_raw_data = data[:, 1 : 1 + face_size].astype("int64")
+ if face_size == 3:
+ face_data = face_raw_data
+ else:
+ face_arrays = [
+ face_raw_data[:, [0, i + 1, i + 2]] for i in range(face_size - 2)
+ ]
+ face_data = np.vstack(face_arrays)
+
+ if n_colors == 0:
+ return face_data, 0, None
+ colors = data[:, 1 + face_size :]
+ if face_size == 3:
+ return face_data, n_colors, colors
+ return face_data, n_colors, np.tile(colors, (face_size - 2, 1))
+
+
+def _read_faces(
+ file, n_faces: int
+) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
+ """
+ Returns faces and face colors from the file.
+
+ Args:
+ file: file-like object being read.
+ n_faces: The known number of faces.
+
+ Returns:
+ 2D numpy arrays of faces and face colors, or None for each if
+ they are not present.
+ """
+ if n_faces == 0:
+ return None, None
+
+ color_is_int = 0 == _count_next_line_periods(file)
+ color_scale = 1 / 255.0 if color_is_int else 1
+
+ faces_ncolors_colors = _read_faces_lump(file, n_faces=n_faces, n_colors=None)
+ if faces_ncolors_colors is not None:
+ faces, _, colors = faces_ncolors_colors
+ if colors is None:
+ return faces, None
+ return faces, colors * color_scale
+
+ faces_list, colors_list = [], []
+ n_colors = None
+ for _ in range(n_faces):
+ faces_ncolors_colors = _read_faces_lump(file, n_faces=1, n_colors=n_colors)
+ faces_found, n_colors, colors_found = cast(
+ Tuple[np.ndarray, int, Optional[np.ndarray]], faces_ncolors_colors
+ )
+ faces_list.append(faces_found)
+ colors_list.append(colors_found)
+ faces = np.vstack(faces_list)
+ if n_colors == 0:
+ colors = None
+ else:
+ colors = np.vstack(colors_list) * color_scale
+ return faces, colors
+
+
+def _read_verts(file, n_verts: int) -> Tuple[np.ndarray, Optional[np.ndarray]]:
+ """
+ Returns verts and vertex colors from the file.
+
+ Args:
+ file: file-like object being read.
+ n_verts: The known number of faces.
+
+ Returns:
+ 2D numpy arrays of verts and (if present)
+ vertex colors.
+ """
+
+ color_is_int = 3 == _count_next_line_periods(file)
+ color_scale = 1 / 255.0 if color_is_int else 1
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message=".* Empty input file.*", category=UserWarning
+ )
+ data = np.loadtxt(file, dtype=np.float32, ndmin=2, max_rows=n_verts)
+ if data.shape[0] != n_verts:
+ raise ValueError("Not enough vertex data.")
+ if data.shape[1] not in [3, 6, 7]:
+ raise ValueError("Bad vertex data.")
+
+ if data.shape[1] == 3:
+ return data, None
+ return data[:, :3], data[:, 3:] * color_scale # []
+
+
+def _load_off_stream(file) -> dict:
+ """
+ Load the data from a stream of an .off file.
+
+ Example .off file format:
+
+ off
+ 8 6 1927 { number of vertices, faces, and (not used) edges }
+ # comment { comments with # sign }
+ 0 0 0 { start of vertex list }
+ 0 0 1
+ 0 1 1
+ 0 1 0
+ 1 0 0
+ 1 0 1
+ 1 1 1
+ 1 1 0
+ 4 0 1 2 3 { start of face list }
+ 4 7 6 5 4
+ 4 0 4 5 1
+ 4 1 5 6 2
+ 4 2 6 7 3
+ 4 3 7 4 0
+
+ Args:
+ file: A binary file-like object (with methods read, readline,
+ tell and seek).
+
+ Returns dictionary possibly containing:
+ verts: (always present) FloatTensor of shape (V, 3).
+ verts_colors: FloatTensor of shape (V, C) where C is 3 or 4.
+ faces: LongTensor of vertex indices, split into triangles, shape (F, 3).
+ faces_colors: FloatTensor of shape (F, C), where C is 3 or 4.
+ """
+ header = file.readline()
+
+ while _is_line_empty(header):
+ header = file.readline()
+
+ if header[:3].lower() == b"off":
+ header = header[3:]
+
+ while _is_line_empty(header):
+ header = file.readline()
+
+ items = header.split()
+ if len(items) < 3:
+ raise ValueError("Invalid counts line: %s" % header)
+
+ try:
+ n_verts = int(items[0])
+ except ValueError:
+ raise ValueError("Invalid counts line: %s" % header) from None
+ try:
+ n_faces = int(items[1])
+ except ValueError:
+ raise ValueError("Invalid counts line: %s" % header) from None
+
+ if (len(items) > 3 and not items[3].startswith(b"#")) or n_verts < 0 or n_faces < 0:
+ raise ValueError("Invalid counts line: %s" % header)
+
+ verts, verts_colors = _read_verts(file, n_verts)
+ faces, faces_colors = _read_faces(file, n_faces)
+
+ end = file.read().strip()
+ if len(end) != 0:
+ raise ValueError("Extra data at end of file: " + str(end[:20]))
+
+ out = {"verts": verts}
+ if verts_colors is not None:
+ out["verts_colors"] = verts_colors
+ if faces is not None:
+ out["faces"] = faces
+ if faces_colors is not None:
+ out["faces_colors"] = faces_colors
+ return out
+
+
+def _write_off_data(
+ file,
+ verts: torch.Tensor,
+ verts_colors: Optional[torch.Tensor] = None,
+ faces: Optional[torch.LongTensor] = None,
+ faces_colors: Optional[torch.Tensor] = None,
+ decimal_places: Optional[int] = None,
+) -> None:
+ """
+ Internal implementation for saving 3D data to a .off file.
+
+ Args:
+ file: Binary file object to which the 3D data should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ verts_colors: FloatTensor of shape (V, C) giving vertex colors where C is 3 or 4.
+ faces: LongTensor of shape (F, 3) giving faces.
+ faces_colors: FloatTensor of shape (V, C) giving face colors where C is 3 or 4.
+ decimal_places: Number of decimal places for saving.
+ """
+ nfaces = 0 if faces is None else faces.shape[0]
+ file.write(f"off\n{verts.shape[0]} {nfaces} 0\n".encode("ascii"))
+
+ if verts_colors is not None:
+ verts = torch.cat((verts, verts_colors), dim=1)
+ if decimal_places is None:
+ float_str = "%f"
+ else:
+ float_str = "%" + ".%df" % decimal_places
+ np.savetxt(file, verts.cpu().detach().numpy(), float_str)
+
+ if faces is not None:
+ _check_faces_indices(faces, max_index=verts.shape[0])
+
+ if faces_colors is not None:
+ face_data = torch.cat(
+ [
+ cast(torch.Tensor, faces).cpu().to(torch.float64),
+ faces_colors.detach().cpu().to(torch.float64),
+ ],
+ dim=1,
+ )
+ format = "3 %d %d %d" + " %f" * faces_colors.shape[1]
+ np.savetxt(file, face_data.numpy(), format)
+ elif faces is not None:
+ np.savetxt(file, faces.cpu().detach().numpy(), "3 %d %d %d")
+
+
+def _save_off(
+ file,
+ *,
+ verts: torch.Tensor,
+ verts_colors: Optional[torch.Tensor] = None,
+ faces: Optional[torch.LongTensor] = None,
+ faces_colors: Optional[torch.Tensor] = None,
+ decimal_places: Optional[int] = None,
+ path_manager: PathManager,
+) -> None:
+ """
+ Save a mesh to an ascii .off file.
+
+ Args:
+ file: File (or path) to which the mesh should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ verts_colors: FloatTensor of shape (V, C) giving vertex colors where C is 3 or 4.
+ faces: LongTensor of shape (F, 3) giving faces.
+ faces_colors: FloatTensor of shape (V, C) giving face colors where C is 3 or 4.
+ decimal_places: Number of decimal places for saving.
+ """
+ if len(verts) and not (verts.dim() == 2 and verts.size(1) == 3):
+ message = "Argument 'verts' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if verts_colors is not None and 0 == len(verts_colors):
+ verts_colors = None
+ if faces_colors is not None and 0 == len(faces_colors):
+ faces_colors = None
+ if faces is not None and 0 == len(faces):
+ faces = None
+
+ if verts_colors is not None:
+ if not (verts_colors.dim() == 2 and verts_colors.size(1) in [3, 4]):
+ message = "verts_colors should have shape (num_faces, C)."
+ raise ValueError(message)
+ if verts_colors.shape[0] != verts.shape[0]:
+ message = "verts_colors should have the same length as verts."
+ raise ValueError(message)
+
+ if faces is not None and not (faces.dim() == 2 and faces.size(1) == 3):
+ message = "Argument 'faces' if present should have shape (num_faces, 3)."
+ raise ValueError(message)
+ if faces_colors is not None and faces is None:
+ message = "Cannot have face colors without faces"
+ raise ValueError(message)
+
+ if faces_colors is not None:
+ if not (faces_colors.dim() == 2 and faces_colors.size(1) in [3, 4]):
+ message = "faces_colors should have shape (num_faces, C)."
+ raise ValueError(message)
+ if faces_colors.shape[0] != cast(torch.LongTensor, faces).shape[0]:
+ message = "faces_colors should have the same length as faces."
+ raise ValueError(message)
+
+ with _open_file(file, path_manager, "wb") as f:
+ _write_off_data(f, verts, verts_colors, faces, faces_colors, decimal_places)
+
+
+class MeshOffFormat(MeshFormatInterpreter):
+ """
+ Loads and saves meshes in the ascii OFF format. This is a simple
+ format which can only deal with the following texture types:
+
+ - TexturesVertex, i.e. one color for each vertex
+ - TexturesAtlas with R=1, i.e. one color for each face.
+
+ There are some possible features of OFF files which we do not support
+ and which appear to be rare:
+
+ - Four dimensional data.
+ - Binary data.
+ - Vertex Normals.
+ - Texture coordinates.
+ - "COFF" header.
+
+ Example .off file format:
+
+ off
+ 8 6 1927 { number of vertices, faces, and (not used) edges }
+ # comment { comments with # sign }
+ 0 0 0 { start of vertex list }
+ 0 0 1
+ 0 1 1
+ 0 1 0
+ 1 0 0
+ 1 0 1
+ 1 1 1
+ 1 1 0
+ 4 0 1 2 3 { start of face list }
+ 4 7 6 5 4
+ 4 0 4 5 1
+ 4 1 5 6 2
+ 4 2 6 7 3
+ 4 3 7 4 0
+
+ """
+
+ def __init__(self) -> None:
+ self.known_suffixes = (".off",)
+
+ def read(
+ self,
+ path: PathOrStr,
+ include_textures: bool,
+ device,
+ path_manager: PathManager,
+ **kwargs,
+ ) -> Optional[Meshes]:
+ if not endswith(path, self.known_suffixes):
+ return None
+
+ with _open_file(path, path_manager, "rb") as f:
+ data = _load_off_stream(f)
+ verts = torch.from_numpy(data["verts"]).to(device)
+ if "faces" in data:
+ faces = torch.from_numpy(data["faces"]).to(dtype=torch.int64, device=device)
+ else:
+ faces = torch.zeros((0, 3), dtype=torch.int64, device=device)
+
+ textures = None
+ if "verts_colors" in data:
+ if "faces_colors" in data:
+ msg = "Faces colors ignored because vertex colors provided too."
+ warnings.warn(msg)
+ verts_colors = torch.from_numpy(data["verts_colors"]).to(device)
+ textures = TexturesVertex([verts_colors])
+ elif "faces_colors" in data:
+ faces_colors = torch.from_numpy(data["faces_colors"]).to(device)
+ textures = TexturesAtlas([faces_colors[:, None, None, :]])
+
+ mesh = Meshes(
+ verts=[verts.to(device)], faces=[faces.to(device)], textures=textures
+ )
+ return mesh
+
+ def save(
+ self,
+ data: Meshes,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ decimal_places: Optional[int] = None,
+ **kwargs,
+ ) -> bool:
+ if not endswith(path, self.known_suffixes):
+ return False
+
+ verts = data.verts_list()[0]
+ faces = data.faces_list()[0]
+ if isinstance(data.textures, TexturesVertex):
+ [verts_colors] = data.textures.verts_features_list()
+ else:
+ verts_colors = None
+
+ faces_colors = None
+ if isinstance(data.textures, TexturesAtlas):
+ [atlas] = data.textures.atlas_list()
+ F, R, _, D = atlas.shape
+ if R == 1:
+ faces_colors = atlas[:, 0, 0, :]
+
+ _save_off(
+ file=path,
+ verts=verts,
+ faces=faces,
+ verts_colors=verts_colors,
+ faces_colors=faces_colors,
+ decimal_places=decimal_places,
+ path_manager=path_manager,
+ )
+ return True
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e9951ac5f29296dfd059d5632b9d2a89c091013
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable.py
@@ -0,0 +1,225 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+from collections import deque
+from pathlib import Path
+from typing import Deque, Optional, Union
+
+from iopath.common.file_io import PathManager
+from pytorch3d.common.datatypes import Device
+from pytorch3d.structures import Meshes, Pointclouds
+
+from .obj_io import MeshObjFormat
+from .off_io import MeshOffFormat
+from .pluggable_formats import MeshFormatInterpreter, PointcloudFormatInterpreter
+from .ply_io import MeshPlyFormat, PointcloudPlyFormat
+
+
+"""
+This module has the master functions for loading and saving data.
+
+The main usage is via the IO object, and its methods
+`load_mesh`, `save_mesh`, `load_pointcloud` and `save_pointcloud`.
+
+For example, to load a mesh you might do::
+
+ from pytorch3d.io import IO
+
+ mesh = IO().load_mesh("mymesh.obj")
+
+and to save a point cloud you might do::
+
+ pcl = Pointclouds(...)
+ IO().save_pointcloud(pcl, "output_pointcloud.obj")
+
+"""
+
+
+class IO:
+ """
+ This class is the interface to flexible loading and saving of meshes and point clouds.
+
+ In simple cases the user will just initialize an instance of this class as `IO()`
+ and then use its load and save functions. The arguments of the initializer are not
+ usually needed.
+
+ The user can add their own formats for saving and loading by passing their own objects
+ to the register_* functions.
+
+ Args:
+ include_default_formats: If False, the built-in file formats will not be available.
+ Then only user-registered formats can be used.
+ path_manager: Used to customize how paths given as strings are interpreted.
+ """
+
+ def __init__(
+ self,
+ include_default_formats: bool = True,
+ path_manager: Optional[PathManager] = None,
+ ) -> None:
+ if path_manager is None:
+ self.path_manager = PathManager()
+ else:
+ self.path_manager = path_manager
+
+ self.mesh_interpreters: Deque[MeshFormatInterpreter] = deque()
+ self.pointcloud_interpreters: Deque[PointcloudFormatInterpreter] = deque()
+
+ if include_default_formats:
+ self.register_default_formats()
+
+ def register_default_formats(self) -> None:
+ self.register_meshes_format(MeshObjFormat())
+ self.register_meshes_format(MeshOffFormat())
+ self.register_meshes_format(MeshPlyFormat())
+ self.register_pointcloud_format(PointcloudPlyFormat())
+
+ def register_meshes_format(self, interpreter: MeshFormatInterpreter) -> None:
+ """
+ Register a new interpreter for a new mesh file format.
+
+ Args:
+ interpreter: the new interpreter to use, which must be an instance
+ of a class which inherits MeshFormatInterpreter.
+ """
+ if not isinstance(interpreter, MeshFormatInterpreter):
+ raise ValueError("Invalid interpreter")
+ self.mesh_interpreters.appendleft(interpreter)
+
+ def register_pointcloud_format(
+ self, interpreter: PointcloudFormatInterpreter
+ ) -> None:
+ """
+ Register a new interpreter for a new point cloud file format.
+
+ Args:
+ interpreter: the new interpreter to use, which must be an instance
+ of a class which inherits PointcloudFormatInterpreter.
+ """
+ if not isinstance(interpreter, PointcloudFormatInterpreter):
+ raise ValueError("Invalid interpreter")
+ self.pointcloud_interpreters.appendleft(interpreter)
+
+ def load_mesh(
+ self,
+ path: Union[str, Path],
+ include_textures: bool = True,
+ device: Device = "cpu",
+ **kwargs,
+ ) -> Meshes:
+ """
+ Attempt to load a mesh from the given file, using a registered format.
+ Materials are not returned. If you have a .obj file with materials
+ you might want to load them with the load_obj function instead.
+
+ Args:
+ path: file to read
+ include_textures: whether to try to load texture information
+ device: device on which to leave the data.
+
+ Returns:
+ new Meshes object containing one mesh.
+ """
+ for mesh_interpreter in self.mesh_interpreters:
+ mesh = mesh_interpreter.read(
+ path,
+ include_textures=include_textures,
+ path_manager=self.path_manager,
+ device=device,
+ **kwargs,
+ )
+ if mesh is not None:
+ return mesh
+
+ raise ValueError(f"No mesh interpreter found to read {path}.")
+
+ def save_mesh(
+ self,
+ data: Meshes,
+ path: Union[str, Path],
+ binary: Optional[bool] = None,
+ include_textures: bool = True,
+ **kwargs,
+ ) -> None:
+ """
+ Attempt to save a mesh to the given file, using a registered format.
+
+ Args:
+ data: a 1-element Meshes
+ path: file to write
+ binary: If there is a choice, whether to save in a binary format.
+ include_textures: If textures are present, whether to try to save
+ them.
+ """
+ if not isinstance(data, Meshes):
+ raise ValueError("Meshes object expected.")
+
+ if len(data) != 1:
+ raise ValueError("Can only save a single mesh.")
+
+ for mesh_interpreter in self.mesh_interpreters:
+ success = mesh_interpreter.save(
+ data, path, path_manager=self.path_manager, binary=binary, **kwargs
+ )
+ if success:
+ return
+
+ raise ValueError(f"No mesh interpreter found to write to {path}.")
+
+ def load_pointcloud(
+ self, path: Union[str, Path], device: Device = "cpu", **kwargs
+ ) -> Pointclouds:
+ """
+ Attempt to load a point cloud from the given file, using a registered format.
+
+ Args:
+ path: file to read
+ device: Device (as str or torch.device) on which to load the data.
+
+ Returns:
+ new Pointclouds object containing one mesh.
+ """
+ for pointcloud_interpreter in self.pointcloud_interpreters:
+ pointcloud = pointcloud_interpreter.read(
+ path, path_manager=self.path_manager, device=device, **kwargs
+ )
+ if pointcloud is not None:
+ return pointcloud
+
+ raise ValueError(f"No point cloud interpreter found to read {path}.")
+
+ def save_pointcloud(
+ self,
+ data: Pointclouds,
+ path: Union[str, Path],
+ binary: Optional[bool] = None,
+ **kwargs,
+ ) -> None:
+ """
+ Attempt to save a point cloud to the given file, using a registered format.
+
+ Args:
+ data: a 1-element Pointclouds
+ path: file to write
+ binary: If there is a choice, whether to save in a binary format.
+ """
+ if not isinstance(data, Pointclouds):
+ raise ValueError("Pointclouds object expected.")
+
+ if len(data) != 1:
+ raise ValueError("Can only save a single point cloud.")
+
+ for pointcloud_interpreter in self.pointcloud_interpreters:
+ success = pointcloud_interpreter.save(
+ data, path, path_manager=self.path_manager, binary=binary, **kwargs
+ )
+ if success:
+ return
+
+ raise ValueError(f"No point cloud interpreter found to write to {path}.")
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable_formats.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable_formats.py
new file mode 100644
index 0000000000000000000000000000000000000000..328a124dc12ec2fbf7e0e39e589d111414f32a2a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/pluggable_formats.py
@@ -0,0 +1,142 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import pathlib
+from typing import Optional, Tuple
+
+from iopath.common.file_io import PathManager
+from pytorch3d.common.datatypes import Device
+from pytorch3d.io.utils import PathOrStr
+from pytorch3d.structures import Meshes, Pointclouds
+
+
+"""
+This module has the base classes which must be extended to define
+an interpreter for loading and saving data in a particular format.
+These can be registered on an IO object so that they can be used in
+its load_* and save_* functions.
+"""
+
+
+def endswith(path: PathOrStr, suffixes: Tuple[str, ...]) -> bool:
+ """
+ Returns whether the path ends with one of the given suffixes.
+ If `path` is not actually a path, returns True. This is useful
+ for allowing interpreters to bypass inappropriate paths, but
+ always accepting streams.
+ """
+ if isinstance(path, pathlib.Path):
+ return path.suffix.lower() in suffixes
+ if isinstance(path, str):
+ return path.lower().endswith(suffixes)
+ return True
+
+
+class MeshFormatInterpreter:
+ """
+ This is a base class for an interpreter which can read or write
+ a mesh in a particular format.
+ """
+
+ def read(
+ self,
+ path: PathOrStr,
+ include_textures: bool,
+ device: Device,
+ path_manager: PathManager,
+ **kwargs,
+ ) -> Optional[Meshes]:
+ """
+ Read the data from the specified file and return it as
+ a Meshes object.
+
+ Args:
+ path: path to load.
+ include_textures: whether to try to load texture information.
+ device: torch.device to load data on to.
+ path_manager: PathManager to interpret the path.
+
+ Returns:
+ None if self is not the appropriate object to interpret the given
+ path.
+ Otherwise, the read Meshes object.
+ """
+ raise NotImplementedError()
+
+ def save(
+ self,
+ data: Meshes,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ **kwargs,
+ ) -> bool:
+ """
+ Save the given Meshes object to the given path.
+
+ Args:
+ data: mesh to save
+ path: path to save to, which may be overwritten.
+ path_manager: PathManager to interpret the path.
+ binary: If there is a choice, whether to save in a binary format.
+
+ Returns:
+ False: if self is not the appropriate object to write to the given path.
+ True: on success.
+ """
+ raise NotImplementedError()
+
+
+class PointcloudFormatInterpreter:
+ """
+ This is a base class for an interpreter which can read or write
+ a point cloud in a particular format.
+ """
+
+ def read(
+ self, path: PathOrStr, device: Device, path_manager: PathManager, **kwargs
+ ) -> Optional[Pointclouds]:
+ """
+ Read the data from the specified file and return it as
+ a Pointclouds object.
+
+ Args:
+ path: path to load.
+ device: torch.device to load data on to.
+ path_manager: PathManager to interpret the path.
+
+ Returns:
+ None if self is not the appropriate object to interpret the given
+ path.
+ Otherwise, the read Pointclouds object.
+ """
+ raise NotImplementedError()
+
+ def save(
+ self,
+ data: Pointclouds,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ **kwargs,
+ ) -> bool:
+ """
+ Save the given Pointclouds object to the given path.
+
+ Args:
+ data: point cloud object to save
+ path: path to save to, which may be overwritten.
+ path_manager: PathManager to interpret the path.
+ binary: If there is a choice, whether to save in a binary format.
+
+ Returns:
+ False: if self is not the appropriate object to write to the given path.
+ True: on success.
+ """
+ raise NotImplementedError()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/ply_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/ply_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..99207e6fd1903cf63c887f211f8f824cb3da3dbe
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/ply_io.py
@@ -0,0 +1,1539 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+"""
+This module implements utility functions for loading and saving
+meshes and point clouds as PLY files.
+"""
+import itertools
+import os
+import struct
+import sys
+import warnings
+from collections import namedtuple
+from dataclasses import asdict, dataclass
+from io import BytesIO, TextIOBase
+from typing import List, Optional, Tuple
+
+import numpy as np
+import torch
+from iopath.common.file_io import PathManager
+from pytorch3d.io.utils import (
+ _check_faces_indices,
+ _make_tensor,
+ _open_file,
+ _read_image,
+ PathOrStr,
+)
+from pytorch3d.renderer import TexturesUV, TexturesVertex
+from pytorch3d.structures import Meshes, Pointclouds
+
+from .pluggable_formats import (
+ endswith,
+ MeshFormatInterpreter,
+ PointcloudFormatInterpreter,
+)
+
+
+_PlyTypeData = namedtuple("_PlyTypeData", "size struct_char np_type")
+
+_PLY_TYPES = {
+ "char": _PlyTypeData(1, "b", np.byte),
+ "uchar": _PlyTypeData(1, "B", np.ubyte),
+ "short": _PlyTypeData(2, "h", np.short),
+ "ushort": _PlyTypeData(2, "H", np.ushort),
+ "int": _PlyTypeData(4, "i", np.int32),
+ "uint": _PlyTypeData(4, "I", np.uint32),
+ "float": _PlyTypeData(4, "f", np.float32),
+ "double": _PlyTypeData(8, "d", np.float64),
+ "int8": _PlyTypeData(1, "b", np.byte),
+ "uint8": _PlyTypeData(1, "B", np.ubyte),
+ "int16": _PlyTypeData(2, "h", np.short),
+ "uint16": _PlyTypeData(2, "H", np.ushort),
+ "int32": _PlyTypeData(4, "i", np.int32),
+ "uint32": _PlyTypeData(4, "I", np.uint32),
+ "float32": _PlyTypeData(4, "f", np.float32),
+ "float64": _PlyTypeData(8, "d", np.float64),
+}
+
+_Property = namedtuple("_Property", "name data_type list_size_type")
+
+
+class _PlyElementType:
+ """
+ Description of an element of a Ply file.
+ Members:
+ self.properties: (List[_Property]) description of all the properties.
+ Each one contains a name and data type.
+ self.count: (int) number of such elements in the file
+ self.name: (str) name of the element
+ """
+
+ def __init__(self, name: str, count: int) -> None:
+ self.name = name
+ self.count = count
+ self.properties: List[_Property] = []
+
+ def add_property(
+ self, name: str, data_type: str, list_size_type: Optional[str] = None
+ ):
+ """Adds a new property.
+
+ Args:
+ name: (str) name of the property.
+ data_type: (str) PLY data type.
+ list_size_type: (str) PLY data type of the list size, or None if not
+ a list.
+ """
+ for property in self.properties:
+ if property.name == name:
+ msg = "Cannot have two properties called %s in %s."
+ raise ValueError(msg % (name, self.name))
+ self.properties.append(_Property(name, data_type, list_size_type))
+
+ def is_fixed_size(self) -> bool:
+ """Return whether the Element has no list properties
+
+ Returns:
+ True if none of the properties are lists.
+ """
+ for property in self.properties:
+ if property.list_size_type is not None:
+ return False
+ return True
+
+ def is_constant_type_fixed_size(self) -> bool:
+ """Return whether the Element has all properties of the same non-list
+ type.
+
+ Returns:
+ True if none of the properties are lists and all the properties
+ share a type.
+ """
+ if not self.is_fixed_size():
+ return False
+ first_type = _PLY_TYPES[self.properties[0].data_type]
+ for property in self.properties:
+ if _PLY_TYPES[property.data_type] != first_type:
+ return False
+ return True
+
+ def try_constant_list(self) -> bool:
+ """Whether the element is just a single list, which might have a
+ constant size, and therefore we could try to parse quickly with numpy.
+
+ Returns:
+ True if the only property is a list.
+ """
+ if len(self.properties) != 1:
+ return False
+ if self.properties[0].list_size_type is None:
+ return False
+ return True
+
+
+class _PlyHeader:
+ def __init__(self, f) -> None:
+ """
+ Load a header of a Ply file from a file-like object.
+ Members:
+ self.elements: (List[_PlyElementType]) element description
+ self.ascii: (bool) Whether in ascii format
+ self.big_endian: (bool) (if not ascii) whether big endian
+ self.obj_info: (List[str]) arbitrary extra data
+ self.comments: (List[str]) comments
+
+ Args:
+ f: file-like object.
+ """
+ if f.readline() not in [b"ply\n", b"ply\r\n", "ply\n"]:
+ raise ValueError("Invalid file header.")
+ seen_format = False
+ self.elements: List[_PlyElementType] = []
+ self.comments: List[str] = []
+ self.obj_info: List[str] = []
+ while True:
+ line = f.readline()
+ if isinstance(line, bytes):
+ line = line.decode("ascii")
+ line = line.strip()
+ if line == "end_header":
+ if not self.elements:
+ raise ValueError("No elements found.")
+ if not self.elements[-1].properties:
+ raise ValueError("Found an element with no properties.")
+ if not seen_format:
+ raise ValueError("No format line found.")
+ break
+ if not seen_format:
+ if line == "format ascii 1.0":
+ seen_format = True
+ self.ascii = True
+ continue
+ if line == "format binary_little_endian 1.0":
+ seen_format = True
+ self.ascii = False
+ self.big_endian = False
+ continue
+ if line == "format binary_big_endian 1.0":
+ seen_format = True
+ self.ascii = False
+ self.big_endian = True
+ continue
+ if line.startswith("format"):
+ raise ValueError("Invalid format line.")
+ if line.startswith("comment "):
+ self.comments.append(line[8:])
+ continue
+ if line.startswith("comment") or len(line) == 0:
+ continue
+ if line.startswith("element"):
+ self._parse_element(line)
+ continue
+ if line.startswith("obj_info "):
+ self.obj_info.append(line[9:])
+ continue
+ if line.startswith("property"):
+ self._parse_property(line)
+ continue
+ raise ValueError("Invalid line: %s." % line)
+
+ def _parse_property(self, line: str):
+ """
+ Decode a ply file header property line.
+
+ Args:
+ line: (str) the ply file's line.
+ """
+ if not self.elements:
+ raise ValueError("Encountered property before any element.")
+ items = line.split(" ")
+ if len(items) not in [3, 5]:
+ raise ValueError("Invalid line: %s" % line)
+ datatype = items[1]
+ name = items[-1]
+ if datatype == "list":
+ datatype = items[3]
+ list_size_type = items[2]
+ if list_size_type not in _PLY_TYPES:
+ raise ValueError("Invalid datatype: %s" % list_size_type)
+ else:
+ list_size_type = None
+ if datatype not in _PLY_TYPES:
+ raise ValueError("Invalid datatype: %s" % datatype)
+ self.elements[-1].add_property(name, datatype, list_size_type)
+
+ def _parse_element(self, line: str):
+ """
+ Decode a ply file header element line.
+
+ Args:
+ line: (str) the ply file's line.
+ """
+ if self.elements and not self.elements[-1].properties:
+ raise ValueError("Found an element with no properties.")
+ items = line.split(" ")
+ if len(items) != 3:
+ raise ValueError("Invalid line: %s" % line)
+ try:
+ count = int(items[2])
+ except ValueError:
+ msg = "Number of items for %s was not a number."
+ raise ValueError(msg % items[1]) from None
+ self.elements.append(_PlyElementType(items[1], count))
+
+
+def _read_ply_fixed_size_element_ascii(f, definition: _PlyElementType):
+ """
+ Given an element which has no lists and one type, read the
+ corresponding data.
+
+ For example
+
+ element vertex 8
+ property float x
+ property float y
+ property float z
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+
+ Returns:
+ 1-element list containing a 2D numpy array corresponding to the data.
+ The rows are the different values. There is one column for each property.
+ """
+ np_type = _PLY_TYPES[definition.properties[0].data_type].np_type
+ old_offset = f.tell()
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message=".* Empty input file.*", category=UserWarning
+ )
+ data = np.loadtxt(
+ f, dtype=np_type, comments=None, ndmin=2, max_rows=definition.count
+ )
+ if not len(data): # np.loadtxt() seeks even on empty data
+ f.seek(old_offset)
+ if data.shape[1] != len(definition.properties):
+ raise ValueError("Inconsistent data for %s." % definition.name)
+ if data.shape[0] != definition.count:
+ raise ValueError("Not enough data for %s." % definition.name)
+ return [data]
+
+
+def _read_ply_nolist_element_ascii(f, definition: _PlyElementType):
+ """
+ Given an element which has no lists and multiple types, read the
+ corresponding data, by loading all the data as float64 and converting
+ the relevant parts later.
+
+ For example, given
+
+ element vertex 8
+ property float x
+ property float y
+ property float z
+ property uchar red
+ property uchar green
+ property uchar blue
+
+ the output will have two arrays, the first containing (x,y,z)
+ and the second (red,green,blue).
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+
+ Returns:
+ List of 2D numpy arrays corresponding to the data.
+ """
+ old_offset = f.tell()
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message=".* Empty input file.*", category=UserWarning
+ )
+ data = np.loadtxt(
+ f, dtype=np.float64, comments=None, ndmin=2, max_rows=definition.count
+ )
+ if not len(data): # np.loadtxt() seeks even on empty data
+ f.seek(old_offset)
+ if data.shape[1] != len(definition.properties):
+ raise ValueError("Inconsistent data for %s." % definition.name)
+ if data.shape[0] != definition.count:
+ raise ValueError("Not enough data for %s." % definition.name)
+ pieces = []
+ offset = 0
+ for dtype, it in itertools.groupby(p.data_type for p in definition.properties):
+ count = sum(1 for _ in it)
+ end_offset = offset + count
+ piece = data[:, offset:end_offset].astype(_PLY_TYPES[dtype].np_type)
+ pieces.append(piece)
+ offset = end_offset
+ return pieces
+
+
+def _try_read_ply_constant_list_ascii(f, definition: _PlyElementType):
+ """
+ If definition is an element which is a single list, attempt to read the
+ corresponding data assuming every value has the same length.
+ If the data is ragged, return None and leave f undisturbed.
+
+ For example, if the element is
+
+ element face 2
+ property list uchar int vertex_index
+
+ and the data is
+
+ 4 0 1 2 3
+ 4 7 6 5 4
+
+ then the function will return
+
+ [[0, 1, 2, 3],
+ [7, 6, 5, 4]]
+
+ but if the data is
+
+ 4 0 1 2 3
+ 3 6 5 4
+
+ then the function will return None.
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+
+ Returns:
+ If every element has the same size, 2D numpy array corresponding to the
+ data. The rows are the different values. Otherwise None.
+ """
+ np_type = _PLY_TYPES[definition.properties[0].data_type].np_type
+ old_offset = f.tell()
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", message=".* Empty input file.*", category=UserWarning
+ )
+ data = np.loadtxt(
+ f, dtype=np_type, comments=None, ndmin=2, max_rows=definition.count
+ )
+ except ValueError:
+ f.seek(old_offset)
+ return None
+ if not len(data): # np.loadtxt() seeks even on empty data
+ f.seek(old_offset)
+ if (data[:, 0] != data.shape[1] - 1).any():
+ msg = "A line of %s data did not have the specified length."
+ raise ValueError(msg % definition.name)
+ if data.shape[0] != definition.count:
+ raise ValueError("Not enough data for %s." % definition.name)
+ return data[:, 1:]
+
+
+def _parse_heterogeneous_property_ascii(datum, line_iter, property: _Property):
+ """
+ Read a general data property from an ascii .ply file.
+
+ Args:
+ datum: list to append the single value to. That value will be a numpy
+ array if the property is a list property, otherwise an int or
+ float.
+ line_iter: iterator to words on the line from which we read.
+ property: the property object describing the property we are reading.
+ """
+ value = next(line_iter, None)
+ if value is None:
+ raise ValueError("Too little data for an element.")
+ if property.list_size_type is None:
+ try:
+ if property.data_type in ["double", "float"]:
+ datum.append(float(value))
+ else:
+ datum.append(int(value))
+ except ValueError:
+ raise ValueError("Bad numerical data.") from None
+ else:
+ try:
+ length = int(value)
+ except ValueError:
+ raise ValueError("A list length was not a number.") from None
+ list_value = np.zeros(length, dtype=_PLY_TYPES[property.data_type].np_type)
+ for i in range(length):
+ inner_value = next(line_iter, None)
+ if inner_value is None:
+ raise ValueError("Too little data for an element.")
+ try:
+ list_value[i] = float(inner_value)
+ except ValueError:
+ raise ValueError("Bad numerical data.") from None
+ datum.append(list_value)
+
+
+def _read_ply_element_ascii(f, definition: _PlyElementType):
+ """
+ Decode all instances of a single element from an ascii .ply file.
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+
+ Returns:
+ In simple cases where every element has the same size, 2D numpy array
+ corresponding to the data. The rows are the different values.
+ Otherwise a list of lists of values, where the outer list is
+ each occurrence of the element, and the inner lists have one value per
+ property.
+ """
+ if not definition.count:
+ return []
+ if definition.is_constant_type_fixed_size():
+ return _read_ply_fixed_size_element_ascii(f, definition)
+ if definition.is_fixed_size():
+ return _read_ply_nolist_element_ascii(f, definition)
+ if definition.try_constant_list():
+ data = _try_read_ply_constant_list_ascii(f, definition)
+ if data is not None:
+ return data
+
+ # We failed to read the element as a lump, must process each line manually.
+ data = []
+ for _i in range(definition.count):
+ line_string = f.readline()
+ if line_string == "":
+ raise ValueError("Not enough data for %s." % definition.name)
+ datum = []
+ line_iter = iter(line_string.strip().split())
+ for property in definition.properties:
+ _parse_heterogeneous_property_ascii(datum, line_iter, property)
+ data.append(datum)
+ if next(line_iter, None) is not None:
+ raise ValueError("Too much data for an element.")
+ return data
+
+
+def _read_raw_array(
+ f, aim: str, length: int, dtype: type = np.uint8, dtype_size: int = 1
+):
+ """
+ Read [length] elements from a file.
+
+ Args:
+ f: file object
+ aim: name of target for error message
+ length: number of elements
+ dtype: numpy type
+ dtype_size: number of bytes per element.
+
+ Returns:
+ new numpy array
+ """
+
+ if isinstance(f, BytesIO):
+ # np.fromfile is faster but won't work on a BytesIO
+ needed_bytes = length * dtype_size
+ bytes_data = bytearray(needed_bytes)
+ n_bytes_read = f.readinto(bytes_data)
+ if n_bytes_read != needed_bytes:
+ raise ValueError("Not enough data for %s." % aim)
+ data = np.frombuffer(bytes_data, dtype=dtype)
+ else:
+ data = np.fromfile(f, dtype=dtype, count=length)
+ if data.shape[0] != length:
+ raise ValueError("Not enough data for %s." % aim)
+ return data
+
+
+def _read_ply_fixed_size_element_binary(
+ f, definition: _PlyElementType, big_endian: bool
+):
+ """
+ Given an element which has no lists and one type, read the
+ corresponding data.
+
+ For example
+
+ element vertex 8
+ property float x
+ property float y
+ property float z
+
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+ big_endian: (bool) whether the document is encoded as big endian.
+
+ Returns:
+ 1-element list containing a 2D numpy array corresponding to the data.
+ The rows are the different values. There is one column for each property.
+ """
+ ply_type = _PLY_TYPES[definition.properties[0].data_type]
+ np_type = ply_type.np_type
+ type_size = ply_type.size
+ needed_length = definition.count * len(definition.properties)
+ data = _read_raw_array(f, definition.name, needed_length, np_type, type_size)
+
+ if (sys.byteorder == "big") != big_endian:
+ data = data.byteswap()
+ return [data.reshape(definition.count, len(definition.properties))]
+
+
+def _read_ply_element_binary_nolists(f, definition: _PlyElementType, big_endian: bool):
+ """
+ Given an element which has no lists, read the corresponding data as tuple
+ of numpy arrays, one for each set of adjacent columns with the same type.
+
+ For example, given
+
+ element vertex 8
+ property float x
+ property float y
+ property float z
+ property uchar red
+ property uchar green
+ property uchar blue
+
+ the output will have two arrays, the first containing (x,y,z)
+ and the second (red,green,blue).
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+ big_endian: (bool) whether the document is encoded as big endian.
+
+ Returns:
+ List of 2D numpy arrays corresponding to the data. The rows are the different
+ values.
+ """
+ size = sum(_PLY_TYPES[prop.data_type].size for prop in definition.properties)
+ needed_bytes = size * definition.count
+ data = _read_raw_array(f, definition.name, needed_bytes).reshape(-1, size)
+ offset = 0
+ pieces = []
+ for dtype, it in itertools.groupby(p.data_type for p in definition.properties):
+ count = sum(1 for _ in it)
+ bytes_each = count * _PLY_TYPES[dtype].size
+ end_offset = offset + bytes_each
+
+ # what we want to do is
+ # piece = data[:, offset:end_offset].view(_PLY_TYPES[dtype].np_type)
+ # but it fails in the general case
+ # because of https://github.com/numpy/numpy/issues/9496.
+ piece = np.lib.stride_tricks.as_strided(
+ data[:1, offset:end_offset].view(_PLY_TYPES[dtype].np_type),
+ shape=(definition.count, count),
+ strides=(data.strides[0], _PLY_TYPES[dtype].size),
+ )
+
+ if (sys.byteorder == "big") != big_endian:
+ piece = piece.byteswap()
+ pieces.append(piece)
+ offset = end_offset
+ return pieces
+
+
+def _try_read_ply_constant_list_binary(
+ f, definition: _PlyElementType, big_endian: bool
+):
+ """
+ If definition is an element which is a single list, attempt to read the
+ corresponding data assuming every value has the same length.
+ If the data is ragged, return None and leave f undisturbed.
+
+ For example, if the element is
+
+ element face 2
+ property list uchar int vertex_index
+
+ and the data is
+
+ 4 0 1 2 3
+ 4 7 6 5 4
+
+ then the function will return
+
+ [[0, 1, 2, 3],
+ [7, 6, 5, 4]]
+
+ but if the data is
+
+ 4 0 1 2 3
+ 3 6 5 4
+
+ then the function will return None.
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+ big_endian: (bool) whether the document is encoded as big endian.
+
+ Returns:
+ If every element has the same size, 2D numpy array corresponding to the
+ data. The rows are the different values. Otherwise None.
+ """
+ property = definition.properties[0]
+ endian_str = ">" if big_endian else "<"
+ length_format = endian_str + _PLY_TYPES[property.list_size_type].struct_char
+ length_struct = struct.Struct(length_format)
+
+ def get_length():
+ bytes_data = f.read(length_struct.size)
+ if len(bytes_data) != length_struct.size:
+ raise ValueError("Not enough data for %s." % definition.name)
+ [length] = length_struct.unpack(bytes_data)
+ return length
+
+ old_offset = f.tell()
+
+ length = get_length()
+ np_type = _PLY_TYPES[definition.properties[0].data_type].np_type
+ type_size = _PLY_TYPES[definition.properties[0].data_type].size
+ data_size = type_size * length
+
+ output = np.zeros((definition.count, length), dtype=np_type)
+
+ for i in range(definition.count):
+ bytes_data = f.read(data_size)
+ if len(bytes_data) != data_size:
+ raise ValueError("Not enough data for %s" % definition.name)
+ output[i] = np.frombuffer(bytes_data, dtype=np_type)
+ if i + 1 == definition.count:
+ break
+ if length != get_length():
+ f.seek(old_offset)
+ return None
+ if (sys.byteorder == "big") != big_endian:
+ output = output.byteswap()
+
+ return output
+
+
+def _read_ply_element_binary(f, definition: _PlyElementType, big_endian: bool) -> list:
+ """
+ Decode all instances of a single element from a binary .ply file.
+
+ Args:
+ f: file-like object being read.
+ definition: The element object which describes what we are reading.
+ big_endian: (bool) whether the document is encoded as big endian.
+
+ Returns:
+ In simple cases where every element has the same size, 2D numpy array
+ corresponding to the data. The rows are the different values.
+ Otherwise a list of lists/tuples of values, where the outer list is
+ each occurrence of the element, and the inner lists have one value per
+ property.
+ """
+ if not definition.count:
+ return []
+
+ if definition.is_constant_type_fixed_size():
+ return _read_ply_fixed_size_element_binary(f, definition, big_endian)
+ if definition.is_fixed_size():
+ return _read_ply_element_binary_nolists(f, definition, big_endian)
+ if definition.try_constant_list():
+ data = _try_read_ply_constant_list_binary(f, definition, big_endian)
+ if data is not None:
+ return data
+
+ # We failed to read the element as a lump, must process each line manually.
+ endian_str = ">" if big_endian else "<"
+ property_structs = []
+ for property in definition.properties:
+ initial_type = property.list_size_type or property.data_type
+ property_structs.append(
+ struct.Struct(endian_str + _PLY_TYPES[initial_type].struct_char)
+ )
+
+ data = []
+ for _i in range(definition.count):
+ datum = []
+ for property, property_struct in zip(definition.properties, property_structs):
+ size = property_struct.size
+ initial_data = f.read(size)
+ if len(initial_data) != size:
+ raise ValueError("Not enough data for %s" % definition.name)
+ [initial] = property_struct.unpack(initial_data)
+ if property.list_size_type is None:
+ datum.append(initial)
+ else:
+ type_size = _PLY_TYPES[property.data_type].size
+ needed_bytes = type_size * initial
+ list_data = f.read(needed_bytes)
+ if len(list_data) != needed_bytes:
+ raise ValueError("Not enough data for %s" % definition.name)
+ np_type = _PLY_TYPES[property.data_type].np_type
+ list_np = np.frombuffer(list_data, dtype=np_type)
+ if (sys.byteorder == "big") != big_endian:
+ list_np = list_np.byteswap()
+ datum.append(list_np)
+ data.append(datum)
+ return data
+
+
+def _load_ply_raw_stream(f) -> Tuple[_PlyHeader, dict]:
+ """
+ Implementation for _load_ply_raw which takes a stream.
+
+ Args:
+ f: A binary or text file-like object.
+
+ Returns:
+ header: A _PlyHeader object describing the metadata in the ply file.
+ elements: A dictionary of element names to values. If an element is regular, in
+ the sense of having no lists or being one uniformly-sized list, then the
+ value will be a 2D numpy array. If not, it is a list of the relevant
+ property values.
+ """
+
+ header = _PlyHeader(f)
+ elements = {}
+ if header.ascii:
+ for element in header.elements:
+ elements[element.name] = _read_ply_element_ascii(f, element)
+ else:
+ if isinstance(f, TextIOBase):
+ raise ValueError(
+ "Cannot safely read a binary ply file using a Text stream."
+ )
+ big = header.big_endian
+ for element in header.elements:
+ elements[element.name] = _read_ply_element_binary(f, element, big)
+ end = f.read().strip()
+ if len(end) != 0:
+ raise ValueError("Extra data at end of file: " + str(end[:20]))
+ return header, elements
+
+
+def _load_ply_raw(f, path_manager: PathManager) -> Tuple[_PlyHeader, dict]:
+ """
+ Load the data from a .ply file.
+
+ Args:
+ f: A binary or text file-like object (with methods read, readline,
+ tell and seek), a pathlib path or a string containing a file name.
+ If the ply file is binary, a text stream is not supported.
+ It is recommended to use a binary stream.
+ path_manager: PathManager for loading if f is a str.
+
+ Returns:
+ header: A _PlyHeader object describing the metadata in the ply file.
+ elements: A dictionary of element names to values. If an element is
+ regular, in the sense of having no lists or being one
+ uniformly-sized list, then the value will be a 2D numpy array.
+ If it has no lists but more than one type, it will be a list of arrays.
+ If not, it is a list of the relevant property values.
+ """
+ with _open_file(f, path_manager, "rb") as f:
+ header, elements = _load_ply_raw_stream(f)
+ return header, elements
+
+
+@dataclass(frozen=True)
+class _VertsColumnIndices:
+ """
+ Contains the relevant layout of the verts section of file being read.
+ Members
+ point_idxs: List[int] of 3 point columns.
+ color_idxs: List[int] of 3 color columns if they are present,
+ otherwise None.
+ color_scale: value to scale colors by.
+ normal_idxs: List[int] of 3 normals columns if they are present,
+ otherwise None.
+ """
+
+ point_idxs: List[int]
+ color_idxs: Optional[List[int]]
+ color_scale: float
+ normal_idxs: Optional[List[int]]
+ texture_uv_idxs: Optional[List[int]]
+
+
+def _get_verts_column_indices(
+ vertex_head: _PlyElementType,
+) -> _VertsColumnIndices:
+ """
+ Get the columns of verts, verts_colors, and verts_normals in the vertex
+ element of a parsed ply file, together with a color scale factor.
+ When the colors are in byte format, they are scaled from 0..255 to [0,1].
+ Otherwise they are not scaled.
+
+ For example, if the vertex element looks as follows:
+
+ element vertex 892
+ property double x
+ property double y
+ property double z
+ property double nx
+ property double ny
+ property double nz
+ property uchar red
+ property uchar green
+ property uchar blue
+ property double texture_u
+ property double texture_v
+
+ then the return value will be ([0,1,2], [6,7,8], 1.0/255, [3,4,5])
+
+ Args:
+ vertex_head: as returned from load_ply_raw.
+
+ Returns:
+ _VertsColumnIndices object
+ """
+ point_idxs: List[Optional[int]] = [None, None, None]
+ color_idxs: List[Optional[int]] = [None, None, None]
+ normal_idxs: List[Optional[int]] = [None, None, None]
+ texture_uv_idxs: List[Optional[int]] = [None, None]
+ for i, prop in enumerate(vertex_head.properties):
+ if prop.list_size_type is not None:
+ raise ValueError("Invalid vertices in file: did not expect list.")
+ for j, letter in enumerate(["x", "y", "z"]):
+ if prop.name == letter:
+ point_idxs[j] = i
+ for j, name in enumerate(["red", "green", "blue"]):
+ if prop.name == name:
+ color_idxs[j] = i
+ for j, name in enumerate(["nx", "ny", "nz"]):
+ if prop.name == name:
+ normal_idxs[j] = i
+ for j, name in enumerate(["texture_u", "texture_v"]):
+ if prop.name == name:
+ texture_uv_idxs[j] = i
+ if None in point_idxs:
+ raise ValueError("Invalid vertices in file.")
+ color_scale = 1.0
+ if all(
+ idx is not None and _PLY_TYPES[vertex_head.properties[idx].data_type].size == 1
+ for idx in color_idxs
+ ):
+ color_scale = 1.0 / 255
+ return _VertsColumnIndices(
+ point_idxs=point_idxs,
+ color_idxs=None if None in color_idxs else color_idxs,
+ color_scale=color_scale,
+ normal_idxs=None if None in normal_idxs else normal_idxs,
+ texture_uv_idxs=None if None in texture_uv_idxs else texture_uv_idxs,
+ )
+
+
+@dataclass(frozen=True)
+class _VertsData:
+ """
+ Contains the data of the verts section of file being read.
+ Members:
+ verts: FloatTensor of shape (V, 3).
+ verts_colors: None or FloatTensor of shape (V, 3).
+ verts_normals: None or FloatTensor of shape (V, 3).
+ """
+
+ verts: torch.Tensor
+ verts_colors: Optional[torch.Tensor] = None
+ verts_normals: Optional[torch.Tensor] = None
+ verts_texture_uvs: Optional[torch.Tensor] = None
+
+
+def _get_verts(header: _PlyHeader, elements: dict) -> _VertsData:
+ """
+ Get the vertex locations, colors and normals from a parsed ply file.
+
+ Args:
+ header, elements: as returned from load_ply_raw.
+
+ Returns:
+ _VertsData object
+ """
+
+ vertex = elements.get("vertex", None)
+ if vertex is None:
+ raise ValueError("The ply file has no vertex element.")
+ if not isinstance(vertex, list):
+ raise ValueError("Invalid vertices in file.")
+ vertex_head = next(head for head in header.elements if head.name == "vertex")
+
+ column_idxs = _get_verts_column_indices(vertex_head)
+
+ # Case of no vertices
+ if vertex_head.count == 0:
+ verts = torch.zeros((0, 3), dtype=torch.float32)
+ if column_idxs.color_idxs is None:
+ return _VertsData(verts=verts)
+ return _VertsData(
+ verts=verts, verts_colors=torch.zeros((0, 3), dtype=torch.float32)
+ )
+
+ # Simple case where the only data is the vertices themselves
+ if (
+ len(vertex) == 1
+ and isinstance(vertex[0], np.ndarray)
+ and vertex[0].ndim == 2
+ and vertex[0].shape[1] == 3
+ ):
+ return _VertsData(verts=_make_tensor(vertex[0], cols=3, dtype=torch.float32))
+
+ vertex_colors = None
+ vertex_normals = None
+ vertex_texture_uvs = None
+
+ if len(vertex) == 1:
+ # This is the case where the whole vertex element has one type,
+ # so it was read as a single array and we can index straight into it.
+ verts = torch.tensor(vertex[0][:, column_idxs.point_idxs], dtype=torch.float32)
+ if column_idxs.color_idxs is not None:
+ vertex_colors = column_idxs.color_scale * torch.tensor(
+ vertex[0][:, column_idxs.color_idxs], dtype=torch.float32
+ )
+ if column_idxs.normal_idxs is not None:
+ vertex_normals = torch.tensor(
+ vertex[0][:, column_idxs.normal_idxs], dtype=torch.float32
+ )
+ if column_idxs.texture_uv_idxs is not None:
+ vertex_texture_uvs = torch.tensor(
+ vertex[0][:, column_idxs.texture_uv_idxs], dtype=torch.float32
+ )
+ else:
+ # The vertex element is heterogeneous. It was read as several arrays,
+ # part by part, where a part is a set of properties with the same type.
+ # For each property (=column in the file), we store in
+ # prop_to_partnum_col its partnum (i.e. the index of what part it is
+ # in) and its column number (its index within its part).
+ prop_to_partnum_col = [
+ (partnum, col)
+ for partnum, array in enumerate(vertex)
+ for col in range(array.shape[1])
+ ]
+ verts = torch.empty(size=(vertex_head.count, 3), dtype=torch.float32)
+ for axis in range(3):
+ partnum, col = prop_to_partnum_col[column_idxs.point_idxs[axis]]
+ verts.numpy()[:, axis] = vertex[partnum][:, col]
+ # Note that in the previous line, we made the assignment
+ # as numpy arrays by casting verts. If we took the (more
+ # obvious) method of converting the right hand side to
+ # torch, then we might have an extra data copy because
+ # torch wants contiguity. The code would be like:
+ # if not vertex[partnum].flags["C_CONTIGUOUS"]:
+ # vertex[partnum] = np.ascontiguousarray(vertex[partnum])
+ # verts[:, axis] = torch.tensor((vertex[partnum][:, col]))
+ if column_idxs.color_idxs is not None:
+ vertex_colors = torch.empty(
+ size=(vertex_head.count, 3), dtype=torch.float32
+ )
+ for color in range(3):
+ partnum, col = prop_to_partnum_col[column_idxs.color_idxs[color]]
+ vertex_colors.numpy()[:, color] = vertex[partnum][:, col]
+ vertex_colors *= column_idxs.color_scale
+ if column_idxs.normal_idxs is not None:
+ vertex_normals = torch.empty(
+ size=(vertex_head.count, 3), dtype=torch.float32
+ )
+ for axis in range(3):
+ partnum, col = prop_to_partnum_col[column_idxs.normal_idxs[axis]]
+ vertex_normals.numpy()[:, axis] = vertex[partnum][:, col]
+ if column_idxs.texture_uv_idxs is not None:
+ vertex_texture_uvs = torch.empty(
+ size=(vertex_head.count, 2),
+ dtype=torch.float32,
+ )
+ for axis in range(2):
+ partnum, col = prop_to_partnum_col[column_idxs.texture_uv_idxs[axis]]
+ vertex_texture_uvs.numpy()[:, axis] = vertex[partnum][:, col]
+ return _VertsData(
+ verts=verts,
+ verts_colors=vertex_colors,
+ verts_normals=vertex_normals,
+ verts_texture_uvs=vertex_texture_uvs,
+ )
+
+
+@dataclass(frozen=True)
+class _PlyData:
+ """
+ Contains the data from a PLY file which has been read.
+ Members:
+ header: _PlyHeader of file metadata from the header
+ verts: FloatTensor of shape (V, 3).
+ faces: None or LongTensor of vertex indices, shape (F, 3).
+ verts_colors: None or FloatTensor of shape (V, 3).
+ verts_normals: None or FloatTensor of shape (V, 3).
+ """
+
+ header: _PlyHeader
+ verts: torch.Tensor
+ faces: Optional[torch.Tensor]
+ verts_colors: Optional[torch.Tensor]
+ verts_normals: Optional[torch.Tensor]
+ verts_texture_uvs: Optional[torch.Tensor]
+
+
+def _load_ply(f, *, path_manager: PathManager) -> _PlyData:
+ """
+ Load the data from a .ply file.
+
+ Args:
+ f: A binary or text file-like object (with methods read, readline,
+ tell and seek), a pathlib path or a string containing a file name.
+ If the ply file is in the binary ply format rather than the text
+ ply format, then a text stream is not supported.
+ It is easiest to use a binary stream in all cases.
+ path_manager: PathManager for loading if f is a str.
+
+ Returns:
+ _PlyData object
+ """
+ header, elements = _load_ply_raw(f, path_manager=path_manager)
+
+ verts_data = _get_verts(header, elements)
+
+ face = elements.get("face", None)
+ if face is not None:
+ face_head = next(head for head in header.elements if head.name == "face")
+ if (
+ len(face_head.properties) != 1
+ or face_head.properties[0].list_size_type is None
+ ):
+ raise ValueError("Unexpected form of faces data.")
+ # face_head.properties[0].name is usually "vertex_index" or "vertex_indices"
+ # but we don't need to enforce this.
+
+ if face is None:
+ faces = None
+ elif not len(face):
+ # pyre is happier when this condition is not joined to the
+ # previous one with `or`.
+ faces = None
+ elif isinstance(face, np.ndarray) and face.ndim == 2: # Homogeneous elements
+ if face.shape[1] < 3:
+ raise ValueError("Faces must have at least 3 vertices.")
+ face_arrays = [face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)]
+ faces = torch.LongTensor(np.vstack(face_arrays).astype(np.int64))
+ else:
+ face_list = []
+ for (face_item,) in face:
+ if face_item.ndim != 1:
+ raise ValueError("Bad face data.")
+ if face_item.shape[0] < 3:
+ raise ValueError("Faces must have at least 3 vertices.")
+ for i in range(face_item.shape[0] - 2):
+ face_list.append([face_item[0], face_item[i + 1], face_item[i + 2]])
+ faces = torch.tensor(face_list, dtype=torch.int64)
+
+ if faces is not None:
+ _check_faces_indices(faces, max_index=verts_data.verts.shape[0])
+
+ return _PlyData(**asdict(verts_data), faces=faces, header=header)
+
+
+def load_ply(
+ f, *, path_manager: Optional[PathManager] = None
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Load the verts and faces from a .ply file.
+ Note that the preferred way to load data from such a file
+ is to use the IO.load_mesh and IO.load_pointcloud functions,
+ which can read more of the data.
+
+ Example .ply file format::
+
+ ply
+ format ascii 1.0 { ascii/binary, format version number }
+ comment made by Greg Turk { comments keyword specified, like all lines }
+ comment this file is a cube
+ element vertex 8 { define "vertex" element, 8 of them in file }
+ property float x { vertex contains float "x" coordinate }
+ property float y { y coordinate is also a vertex property }
+ property float z { z coordinate, too }
+ element face 6 { there are 6 "face" elements in the file }
+ property list uchar int vertex_index { "vertex_indices" is a list of ints }
+ end_header { delimits the end of the header }
+ 0 0 0 { start of vertex list }
+ 0 0 1
+ 0 1 1
+ 0 1 0
+ 1 0 0
+ 1 0 1
+ 1 1 1
+ 1 1 0
+ 4 0 1 2 3 { start of face list }
+ 4 7 6 5 4
+ 4 0 4 5 1
+ 4 1 5 6 2
+ 4 2 6 7 3
+ 4 3 7 4 0
+
+ Args:
+ f: A binary or text file-like object (with methods read, readline,
+ tell and seek), a pathlib path or a string containing a file name.
+ If the ply file is in the binary ply format rather than the text
+ ply format, then a text stream is not supported.
+ It is easiest to use a binary stream in all cases.
+ path_manager: PathManager for loading if f is a str.
+
+ Returns:
+ verts: FloatTensor of shape (V, 3).
+ faces: LongTensor of vertex indices, shape (F, 3).
+ """
+
+ if path_manager is None:
+ path_manager = PathManager()
+ data = _load_ply(f, path_manager=path_manager)
+ faces = data.faces
+ if faces is None:
+ faces = torch.zeros(0, 3, dtype=torch.int64)
+
+ return data.verts, faces
+
+
+def _write_ply_header(
+ f,
+ *,
+ verts: torch.Tensor,
+ faces: Optional[torch.LongTensor],
+ verts_normals: Optional[torch.Tensor],
+ verts_colors: Optional[torch.Tensor],
+ ascii: bool,
+ colors_as_uint8: bool,
+) -> None:
+ """
+ Internal implementation for writing header when saving to a .ply file.
+
+ Args:
+ f: File object to which the 3D data should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ faces: LongTensor of shape (F, 3) giving faces.
+ verts_normals: FloatTensor of shape (V, 3) giving vertex normals.
+ verts_colors: FloatTensor of shape (V, 3) giving vertex colors.
+ ascii: (bool) whether to use the ascii ply format.
+ colors_as_uint8: Whether to save colors as numbers in the range
+ [0, 255] instead of float32.
+ """
+ assert not len(verts) or (verts.dim() == 2 and verts.size(1) == 3)
+ assert faces is None or not len(faces) or (faces.dim() == 2 and faces.size(1) == 3)
+ assert verts_normals is None or (
+ verts_normals.dim() == 2 and verts_normals.size(1) == 3
+ )
+ assert verts_colors is None or (
+ verts_colors.dim() == 2 and verts_colors.size(1) == 3
+ )
+
+ if ascii:
+ f.write(b"ply\nformat ascii 1.0\n")
+ elif sys.byteorder == "big":
+ f.write(b"ply\nformat binary_big_endian 1.0\n")
+ else:
+ f.write(b"ply\nformat binary_little_endian 1.0\n")
+ f.write(f"element vertex {verts.shape[0]}\n".encode("ascii"))
+ f.write(b"property float x\n")
+ f.write(b"property float y\n")
+ f.write(b"property float z\n")
+ if verts_normals is not None:
+ f.write(b"property float nx\n")
+ f.write(b"property float ny\n")
+ f.write(b"property float nz\n")
+ if verts_colors is not None:
+ color_ply_type = b"uchar" if colors_as_uint8 else b"float"
+ for color in (b"red", b"green", b"blue"):
+ f.write(b"property " + color_ply_type + b" " + color + b"\n")
+ if len(verts) and faces is not None:
+ f.write(f"element face {faces.shape[0]}\n".encode("ascii"))
+ f.write(b"property list uchar int vertex_index\n")
+ f.write(b"end_header\n")
+
+
+def _save_ply(
+ f,
+ *,
+ verts: torch.Tensor,
+ faces: Optional[torch.LongTensor],
+ verts_normals: Optional[torch.Tensor],
+ verts_colors: Optional[torch.Tensor],
+ ascii: bool,
+ decimal_places: Optional[int] = None,
+ colors_as_uint8: bool,
+) -> None:
+ """
+ Internal implementation for saving 3D data to a .ply file.
+
+ Args:
+ f: File object to which the 3D data should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ faces: LongTensor of shape (F, 3) giving faces.
+ verts_normals: FloatTensor of shape (V, 3) giving vertex normals.
+ verts_colors: FloatTensor of shape (V, 3) giving vertex colors.
+ ascii: (bool) whether to use the ascii ply format.
+ decimal_places: Number of decimal places for saving if ascii=True.
+ colors_as_uint8: Whether to save colors as numbers in the range
+ [0, 255] instead of float32.
+ """
+ _write_ply_header(
+ f,
+ verts=verts,
+ faces=faces,
+ verts_normals=verts_normals,
+ verts_colors=verts_colors,
+ ascii=ascii,
+ colors_as_uint8=colors_as_uint8,
+ )
+
+ if not (len(verts)):
+ warnings.warn("Empty 'verts' provided")
+ return
+
+ color_np_type = np.ubyte if colors_as_uint8 else np.float32
+ verts_dtype = [("verts", np.float32, 3)]
+ if verts_normals is not None:
+ verts_dtype.append(("normals", np.float32, 3))
+ if verts_colors is not None:
+ verts_dtype.append(("colors", color_np_type, 3))
+
+ vert_data = np.zeros(verts.shape[0], dtype=verts_dtype)
+ vert_data["verts"] = verts.detach().cpu().numpy()
+ if verts_normals is not None:
+ vert_data["normals"] = verts_normals.detach().cpu().numpy()
+ if verts_colors is not None:
+ color_data = verts_colors.detach().cpu().numpy()
+ if colors_as_uint8:
+ vert_data["colors"] = np.rint(color_data * 255)
+ else:
+ vert_data["colors"] = color_data
+
+ if ascii:
+ if decimal_places is None:
+ float_str = b"%f"
+ else:
+ float_str = b"%" + b".%df" % decimal_places
+ float_group_str = (float_str + b" ") * 3
+ formats = [float_group_str]
+ if verts_normals is not None:
+ formats.append(float_group_str)
+ if verts_colors is not None:
+ formats.append(b"%d %d %d " if colors_as_uint8 else float_group_str)
+ formats[-1] = formats[-1][:-1] + b"\n"
+ for line_data in vert_data:
+ for data, format in zip(line_data, formats):
+ f.write(format % tuple(data))
+ else:
+ if isinstance(f, BytesIO):
+ # tofile only works with real files, but is faster than this.
+ f.write(vert_data.tobytes())
+ else:
+ vert_data.tofile(f)
+
+ if faces is not None:
+ faces_array = faces.detach().cpu().numpy()
+
+ _check_faces_indices(faces, max_index=verts.shape[0])
+
+ if len(faces_array):
+ if ascii:
+ np.savetxt(f, faces_array, "3 %d %d %d")
+ else:
+ faces_recs = np.zeros(
+ len(faces_array),
+ dtype=[("count", np.uint8), ("vertex_indices", np.uint32, 3)],
+ )
+ faces_recs["count"] = 3
+ faces_recs["vertex_indices"] = faces_array
+ faces_uints = faces_recs.view(np.uint8)
+
+ if isinstance(f, BytesIO):
+ f.write(faces_uints.tobytes())
+ else:
+ faces_uints.tofile(f)
+
+
+def save_ply(
+ f,
+ verts: torch.Tensor,
+ faces: Optional[torch.LongTensor] = None,
+ verts_normals: Optional[torch.Tensor] = None,
+ ascii: bool = False,
+ decimal_places: Optional[int] = None,
+ path_manager: Optional[PathManager] = None,
+) -> None:
+ """
+ Save a mesh to a .ply file.
+
+ Args:
+ f: File (or path) to which the mesh should be written.
+ verts: FloatTensor of shape (V, 3) giving vertex coordinates.
+ faces: LongTensor of shape (F, 3) giving faces.
+ verts_normals: FloatTensor of shape (V, 3) giving vertex normals.
+ ascii: (bool) whether to use the ascii ply format.
+ decimal_places: Number of decimal places for saving if ascii=True.
+ path_manager: PathManager for interpreting f if it is a str.
+ """
+
+ if len(verts) and not (verts.dim() == 2 and verts.size(1) == 3):
+ message = "Argument 'verts' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if (
+ faces is not None
+ and len(faces)
+ and not (faces.dim() == 2 and faces.size(1) == 3)
+ ):
+ message = "Argument 'faces' should either be empty or of shape (num_faces, 3)."
+ raise ValueError(message)
+
+ if (
+ verts_normals is not None
+ and len(verts_normals)
+ and not (
+ verts_normals.dim() == 2
+ and verts_normals.size(1) == 3
+ and verts_normals.size(0) == verts.size(0)
+ )
+ ):
+ message = "Argument 'verts_normals' should either be empty or of shape (num_verts, 3)."
+ raise ValueError(message)
+
+ if path_manager is None:
+ path_manager = PathManager()
+ with _open_file(f, path_manager, "wb") as f:
+ _save_ply(
+ f,
+ verts=verts,
+ faces=faces,
+ verts_normals=verts_normals,
+ verts_colors=None,
+ ascii=ascii,
+ decimal_places=decimal_places,
+ colors_as_uint8=False,
+ )
+
+
+class MeshPlyFormat(MeshFormatInterpreter):
+ def __init__(self) -> None:
+ self.known_suffixes = (".ply",)
+
+ def read(
+ self,
+ path: PathOrStr,
+ include_textures: bool,
+ device,
+ path_manager: PathManager,
+ **kwargs,
+ ) -> Optional[Meshes]:
+ if not endswith(path, self.known_suffixes):
+ return None
+
+ data = _load_ply(f=path, path_manager=path_manager)
+ faces = data.faces
+ if faces is None:
+ faces = torch.zeros(0, 3, dtype=torch.int64)
+
+ texture = None
+ if include_textures:
+ if data.verts_colors is not None:
+ texture = TexturesVertex([data.verts_colors.to(device)])
+ elif data.verts_texture_uvs is not None:
+ texture_file_path = None
+ for comment in data.header.comments:
+ if "TextureFile" in comment:
+ given_texture_file = comment.split(" ")[-1]
+ texture_file_path = os.path.join(
+ os.path.dirname(str(path)), given_texture_file
+ )
+ if texture_file_path is not None:
+ texture_map = _read_image(
+ texture_file_path, path_manager, format="RGB"
+ )
+ texture_map = torch.tensor(texture_map, dtype=torch.float32) / 255.0
+ texture = TexturesUV(
+ [texture_map.to(device)],
+ [faces.to(device)],
+ [data.verts_texture_uvs.to(device)],
+ )
+
+ verts_normals = None
+ if data.verts_normals is not None:
+ verts_normals = [data.verts_normals.to(device)]
+ mesh = Meshes(
+ verts=[data.verts.to(device)],
+ faces=[faces.to(device)],
+ textures=texture,
+ verts_normals=verts_normals,
+ )
+ return mesh
+
+ def save(
+ self,
+ data: Meshes,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ decimal_places: Optional[int] = None,
+ colors_as_uint8: bool = False,
+ **kwargs,
+ ) -> bool:
+ """
+ Extra optional args:
+ colors_as_uint8: (bool) Whether to save colors as numbers in the
+ range [0, 255] instead of float32.
+ """
+ if not endswith(path, self.known_suffixes):
+ return False
+
+ verts = data.verts_list()[0]
+ faces = data.faces_list()[0]
+
+ if data.has_verts_normals():
+ verts_normals = data.verts_normals_list()[0]
+ else:
+ verts_normals = None
+
+ if isinstance(data.textures, TexturesVertex):
+ mesh_verts_colors = data.textures.verts_features_list()[0]
+ n_colors = mesh_verts_colors.shape[1]
+ if n_colors == 3:
+ verts_colors = mesh_verts_colors
+ else:
+ warnings.warn(
+ f"Texture will not be saved as it has {n_colors} colors, not 3."
+ )
+ verts_colors = None
+ else:
+ verts_colors = None
+
+ with _open_file(path, path_manager, "wb") as f:
+ _save_ply(
+ f=f,
+ verts=verts,
+ faces=faces,
+ verts_colors=verts_colors,
+ verts_normals=verts_normals,
+ ascii=binary is False,
+ decimal_places=decimal_places,
+ colors_as_uint8=colors_as_uint8,
+ )
+ return True
+
+
+class PointcloudPlyFormat(PointcloudFormatInterpreter):
+ def __init__(self) -> None:
+ self.known_suffixes = (".ply",)
+
+ def read(
+ self,
+ path: PathOrStr,
+ device,
+ path_manager: PathManager,
+ **kwargs,
+ ) -> Optional[Pointclouds]:
+ if not endswith(path, self.known_suffixes):
+ return None
+
+ data = _load_ply(f=path, path_manager=path_manager)
+ features = None
+ if data.verts_colors is not None:
+ features = [data.verts_colors.to(device)]
+ normals = None
+ if data.verts_normals is not None:
+ normals = [data.verts_normals.to(device)]
+
+ pointcloud = Pointclouds(
+ points=[data.verts.to(device)], features=features, normals=normals
+ )
+ return pointcloud
+
+ def save(
+ self,
+ data: Pointclouds,
+ path: PathOrStr,
+ path_manager: PathManager,
+ binary: Optional[bool],
+ decimal_places: Optional[int] = None,
+ colors_as_uint8: bool = False,
+ **kwargs,
+ ) -> bool:
+ """
+ Extra optional args:
+ colors_as_uint8: (bool) Whether to save colors as numbers in the
+ range [0, 255] instead of float32.
+ """
+ if not endswith(path, self.known_suffixes):
+ return False
+
+ points = data.points_list()[0]
+ features = data.features_packed()
+ normals = data.normals_packed()
+
+ with _open_file(path, path_manager, "wb") as f:
+ _save_ply(
+ f=f,
+ verts=points,
+ verts_colors=features,
+ verts_normals=normals,
+ faces=None,
+ ascii=binary is False,
+ decimal_places=decimal_places,
+ colors_as_uint8=colors_as_uint8,
+ )
+ return True
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..467c7c14b5f76a23090a5f3375fe5948fa23a460
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/utils.py
@@ -0,0 +1,85 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import contextlib
+import pathlib
+import warnings
+from typing import cast, ContextManager, IO, Optional, Union
+
+import numpy as np
+import torch
+from iopath.common.file_io import PathManager
+from PIL import Image
+
+from ..common.datatypes import Device
+
+
+PathOrStr = Union[pathlib.Path, str]
+
+
+def _open_file(f, path_manager: PathManager, mode: str = "r") -> ContextManager[IO]:
+ if isinstance(f, str):
+ # pyre-fixme[6]: For 2nd argument expected `Union[typing_extensions.Literal['...
+ f = path_manager.open(f, mode)
+ return contextlib.closing(f)
+ elif isinstance(f, pathlib.Path):
+ f = f.open(mode)
+ return contextlib.closing(f)
+ else:
+ return contextlib.nullcontext(cast(IO, f))
+
+
+def _make_tensor(
+ data, cols: int, dtype: torch.dtype, device: Device = "cpu"
+) -> torch.Tensor:
+ """
+ Return a 2D tensor with the specified cols and dtype filled with data,
+ even when data is empty.
+ """
+ if not len(data):
+ return torch.zeros((0, cols), dtype=dtype, device=device)
+
+ return torch.tensor(data, dtype=dtype, device=device)
+
+
+def _check_faces_indices(
+ faces_indices: torch.Tensor, max_index: int, pad_value: Optional[int] = None
+) -> torch.Tensor:
+ if pad_value is None:
+ mask = torch.ones(faces_indices.shape[:-1]).bool() # Keep all faces
+ else:
+ mask = faces_indices.ne(pad_value).any(dim=-1)
+ if torch.any(faces_indices[mask] >= max_index) or torch.any(
+ faces_indices[mask] < 0
+ ):
+ warnings.warn("Faces have invalid indices")
+ return faces_indices
+
+
+def _read_image(file_name: str, path_manager: PathManager, format=None):
+ """
+ Read an image from a file using Pillow.
+ Args:
+ file_name: image file path.
+ path_manager: PathManager for interpreting file_name.
+ format: one of ["RGB", "BGR"]
+ Returns:
+ image: an image of shape (H, W, C).
+ """
+ if format not in ["RGB", "BGR"]:
+ raise ValueError("format can only be one of [RGB, BGR]; got %s", format)
+ with path_manager.open(file_name, "rb") as f:
+ image = Image.open(f)
+ if format is not None:
+ # PIL only supports RGB. First convert to RGB and flip channels
+ # below for BGR.
+ image = image.convert("RGB")
+ image = np.asarray(image).astype(np.float32)
+ if format == "BGR":
+ image = image[:, :, ::-1]
+ return image
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/graph_conv.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/graph_conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..a43eeb3ad204ff0dde46b8945383c15367e0d788
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/graph_conv.py
@@ -0,0 +1,176 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+
+import torch
+import torch.nn as nn
+from pytorch3d import _C
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+
+
+class GraphConv(nn.Module):
+ """A single graph convolution layer."""
+
+ def __init__(
+ self,
+ input_dim: int,
+ output_dim: int,
+ init: str = "normal",
+ directed: bool = False,
+ ) -> None:
+ """
+ Args:
+ input_dim: Number of input features per vertex.
+ output_dim: Number of output features per vertex.
+ init: Weight initialization method. Can be one of ['zero', 'normal'].
+ directed: Bool indicating if edges in the graph are directed.
+ """
+ super().__init__()
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.directed = directed
+ self.w0 = nn.Linear(input_dim, output_dim)
+ self.w1 = nn.Linear(input_dim, output_dim)
+
+ if init == "normal":
+ nn.init.normal_(self.w0.weight, mean=0, std=0.01)
+ nn.init.normal_(self.w1.weight, mean=0, std=0.01)
+ self.w0.bias.data.zero_()
+ self.w1.bias.data.zero_()
+ elif init == "zero":
+ self.w0.weight.data.zero_()
+ self.w1.weight.data.zero_()
+ else:
+ raise ValueError('Invalid GraphConv initialization "%s"' % init)
+
+ def forward(self, verts, edges):
+ """
+ Args:
+ verts: FloatTensor of shape (V, input_dim) where V is the number of
+ vertices and input_dim is the number of input features
+ per vertex. input_dim has to match the input_dim specified
+ in __init__.
+ edges: LongTensor of shape (E, 2) where E is the number of edges
+ where each edge has the indices of the two vertices which
+ form the edge.
+
+ Returns:
+ out: FloatTensor of shape (V, output_dim) where output_dim is the
+ number of output features per vertex.
+ """
+ if verts.is_cuda != edges.is_cuda:
+ raise ValueError("verts and edges tensors must be on the same device.")
+ if verts.shape[0] == 0:
+ # empty graph.
+ return verts.new_zeros((0, self.output_dim)) * verts.sum()
+
+ verts_w0 = self.w0(verts) # (V, output_dim)
+ verts_w1 = self.w1(verts) # (V, output_dim)
+
+ if torch.cuda.is_available() and verts.is_cuda and edges.is_cuda:
+ neighbor_sums = gather_scatter(verts_w1, edges, self.directed)
+ else:
+ neighbor_sums = gather_scatter_python(
+ verts_w1, edges, self.directed
+ ) # (V, output_dim)
+
+ # Add neighbor features to each vertex's features.
+ out = verts_w0 + neighbor_sums
+ return out
+
+ def __repr__(self):
+ Din, Dout, directed = self.input_dim, self.output_dim, self.directed
+ return "GraphConv(%d -> %d, directed=%r)" % (Din, Dout, directed)
+
+
+def gather_scatter_python(input, edges, directed: bool = False):
+ """
+ Python implementation of gather_scatter for aggregating features of
+ neighbor nodes in a graph.
+
+ Given a directed graph: v0 -> v1 -> v2 the updated feature for v1 depends
+ on v2 in order to be consistent with Morris et al. AAAI 2019
+ (https://arxiv.org/abs/1810.02244). This only affects
+ directed graphs; for undirected graphs v1 will depend on both v0 and v2,
+ no matter which way the edges are physically stored.
+
+ Args:
+ input: Tensor of shape (num_vertices, input_dim).
+ edges: Tensor of edge indices of shape (num_edges, 2).
+ directed: bool indicating if edges are directed.
+
+ Returns:
+ output: Tensor of same shape as input.
+ """
+ if not (input.dim() == 2):
+ raise ValueError("input can only have 2 dimensions.")
+ if not (edges.dim() == 2):
+ raise ValueError("edges can only have 2 dimensions.")
+ if not (edges.shape[1] == 2):
+ raise ValueError("edges must be of shape (num_edges, 2).")
+
+ num_vertices, input_feature_dim = input.shape
+ num_edges = edges.shape[0]
+ output = torch.zeros_like(input)
+ idx0 = edges[:, 0].view(num_edges, 1).expand(num_edges, input_feature_dim)
+ idx1 = edges[:, 1].view(num_edges, 1).expand(num_edges, input_feature_dim)
+
+ output = output.scatter_add(0, idx0, input.gather(0, idx1))
+ if not directed:
+ output = output.scatter_add(0, idx1, input.gather(0, idx0))
+ return output
+
+
+class GatherScatter(Function):
+ """
+ Torch autograd Function wrapper for gather_scatter C++/CUDA implementations.
+ """
+
+ @staticmethod
+ def forward(ctx, input, edges, directed=False):
+ """
+ Args:
+ ctx: Context object used to calculate gradients.
+ input: Tensor of shape (num_vertices, input_dim)
+ edges: Tensor of edge indices of shape (num_edges, 2)
+ directed: Bool indicating if edges are directed.
+
+ Returns:
+ output: Tensor of same shape as input.
+ """
+ if not (input.dim() == 2):
+ raise ValueError("input can only have 2 dimensions.")
+ if not (edges.dim() == 2):
+ raise ValueError("edges can only have 2 dimensions.")
+ if not (edges.shape[1] == 2):
+ raise ValueError("edges must be of shape (num_edges, 2).")
+ if not (input.dtype == torch.float32):
+ raise ValueError("input has to be of type torch.float32.")
+
+ ctx.directed = directed
+ input, edges = input.contiguous(), edges.contiguous()
+ ctx.save_for_backward(edges)
+ backward = False
+ output = _C.gather_scatter(input, edges, directed, backward)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ grad_output = grad_output.contiguous()
+ edges = ctx.saved_tensors[0]
+ directed = ctx.directed
+ backward = True
+ grad_input = _C.gather_scatter(grad_output, edges, directed, backward)
+ grad_edges = None
+ grad_directed = None
+ return grad_input, grad_edges, grad_directed
+
+
+gather_scatter = GatherScatter.apply
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_filtering.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_filtering.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8f29431c6b6843411a51f14a69fd8ceac4f165f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_filtering.py
@@ -0,0 +1,62 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+# pyre-unsafe
+
+import torch
+from pytorch3d.ops import norm_laplacian
+from pytorch3d.structures import Meshes, utils as struct_utils
+
+
+# ------------------------ Mesh Smoothing ------------------------ #
+# This file contains differentiable operators to filter meshes
+# The ops include
+# 1) Taubin Smoothing
+# TODO(gkioxari) add more! :)
+# ---------------------------------------------------------------- #
+
+
+# ----------------------- Taubin Smoothing ----------------------- #
+
+
+def taubin_smoothing(
+ meshes: Meshes, lambd: float = 0.53, mu: float = -0.53, num_iter: int = 10
+) -> Meshes:
+ """
+ Taubin smoothing [1] is an iterative smoothing operator for meshes.
+ At each iteration
+ verts := (1 - λ) * verts + λ * L * verts
+ verts := (1 - μ) * verts + μ * L * verts
+
+ This function returns a new mesh with smoothed vertices.
+ Args:
+ meshes: Meshes input to be smoothed
+ lambd, mu: float parameters for Taubin smoothing,
+ lambd > 0, mu < 0
+ num_iter: number of iterations to execute smoothing
+ Returns:
+ mesh: Smoothed input Meshes
+
+ [1] Curve and Surface Smoothing without Shrinkage,
+ Gabriel Taubin, ICCV 1997
+ """
+ verts = meshes.verts_packed() # V x 3
+ edges = meshes.edges_packed() # E x 3
+
+ for _ in range(num_iter):
+ L = norm_laplacian(verts, edges)
+ total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
+ verts = (1 - lambd) * verts + lambd * torch.mm(L, verts) / total_weight
+
+ L = norm_laplacian(verts, edges)
+ total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
+ verts = (1 - mu) * verts + mu * torch.mm(L, verts) / total_weight
+
+ verts_list = struct_utils.packed_to_list(
+ verts, meshes.num_verts_per_mesh().tolist()
+ )
+ mesh = Meshes(verts=list(verts_list), faces=meshes.faces_list())
+ return mesh
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_face_areas_normals.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_face_areas_normals.py
new file mode 100644
index 0000000000000000000000000000000000000000..970a4de30d761704648482d6cd48e6eb47287087
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_face_areas_normals.py
@@ -0,0 +1,47 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_face_areas_normals import TestFaceAreasNormals
+
+
+def bm_face_areas_normals() -> None:
+ kwargs_list = []
+ backend = ["cpu"]
+ if torch.cuda.is_available():
+ backend.append("cuda:0")
+
+ num_meshes = [2, 10, 32]
+ num_verts = [100, 1000]
+ num_faces = [300, 3000]
+
+ test_cases = product(num_meshes, num_verts, num_faces, backend)
+ for case in test_cases:
+ n, v, f, d = case
+ kwargs_list.append(
+ {"num_meshes": n, "num_verts": v, "num_faces": f, "device": d}
+ )
+ benchmark(
+ TestFaceAreasNormals.face_areas_normals_with_init,
+ "FACE_AREAS_NORMALS",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ benchmark(
+ TestFaceAreasNormals.face_areas_normals_with_init_torch,
+ "FACE_AREAS_NORMALS_TORCH",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_face_areas_normals()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_graph_conv.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_graph_conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..4276f23d2392523b8851cef0ea09668b84712dbe
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_graph_conv.py
@@ -0,0 +1,50 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_graph_conv import TestGraphConv
+
+
+def bm_graph_conv() -> None:
+ backends = ["cpu"]
+ if torch.cuda.is_available():
+ backends.append("cuda")
+
+ kwargs_list = []
+ gconv_dim = [128, 256]
+ num_meshes = [32, 64]
+ num_verts = [100]
+ num_faces = [1000]
+ directed = [False, True]
+ test_cases = product(
+ gconv_dim, num_meshes, num_verts, num_faces, directed, backends
+ )
+ for case in test_cases:
+ g, n, v, f, d, b = case
+ kwargs_list.append(
+ {
+ "gconv_dim": g,
+ "num_meshes": n,
+ "num_verts": v,
+ "num_faces": f,
+ "directed": d,
+ "backend": b,
+ }
+ )
+ benchmark(
+ TestGraphConv.graph_conv_forward_backward,
+ "GRAPH CONV",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_graph_conv()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_iou_box3d.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_iou_box3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..152e36ea78862faab7acd73968613e1e5cb0a033
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_iou_box3d.py
@@ -0,0 +1,54 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_iou_box3d import TestIoU3D
+
+
+def bm_iou_box3d() -> None:
+ # Realistic use cases
+ N = [30, 100]
+ M = [5, 10, 100]
+ kwargs_list = []
+ test_cases = product(N, M)
+ for case in test_cases:
+ n, m = case
+ kwargs_list.append({"N": n, "M": m, "device": "cuda:0"})
+ benchmark(TestIoU3D.iou, "3D_IOU", kwargs_list, warmup_iters=1)
+
+ # Comparison of C++/CUDA
+ kwargs_list = []
+ N = [1, 4, 8, 16]
+ devices = ["cpu", "cuda:0"]
+ test_cases = product(N, N, devices)
+ for case in test_cases:
+ n, m, d = case
+ kwargs_list.append({"N": n, "M": m, "device": d})
+ benchmark(TestIoU3D.iou, "3D_IOU", kwargs_list, warmup_iters=1)
+
+ # Naive PyTorch
+ N = [1, 4]
+ kwargs_list = []
+ test_cases = product(N, N)
+ for case in test_cases:
+ n, m = case
+ kwargs_list.append({"N": n, "M": m, "device": "cuda:0"})
+ benchmark(TestIoU3D.iou_naive, "3D_IOU_NAIVE", kwargs_list, warmup_iters=1)
+
+ # Sampling based method
+ num_samples = [2000, 5000]
+ kwargs_list = []
+ test_cases = product(N, N, num_samples)
+ for case in test_cases:
+ n, m, s = case
+ kwargs_list.append({"N": n, "M": m, "num_samples": s, "device": "cuda:0"})
+ benchmark(TestIoU3D.iou_sampling, "3D_IOU_SAMPLING", kwargs_list, warmup_iters=1)
+
+
+if __name__ == "__main__":
+ bm_iou_box3d()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..37155fae34691442d4f1acb0e47a1563f0746ba6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_io.py
@@ -0,0 +1,105 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_io_obj import TestMeshObjIO
+from tests.test_io_ply import TestMeshPlyIO
+
+
+def bm_save_load() -> None:
+ simple_kwargs_list = [
+ {"V": 100, "F": 200},
+ {"V": 1000, "F": 2000},
+ {"V": 10000, "F": 20000},
+ ]
+ benchmark(
+ TestMeshObjIO.bm_load_simple_obj_with_init,
+ "LOAD_SIMPLE_OBJ",
+ simple_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshObjIO.bm_save_simple_obj_with_init,
+ "SAVE_SIMPLE_OBJ",
+ simple_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshPlyIO.bm_load_simple_ply_with_init,
+ "LOAD_SIMPLE_PLY",
+ simple_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshPlyIO.bm_save_simple_ply_with_init,
+ "SAVE_SIMPLE_PLY",
+ simple_kwargs_list,
+ warmup_iters=1,
+ )
+
+ complex_kwargs_list = [{"N": 8}, {"N": 32}, {"N": 128}]
+ benchmark(
+ TestMeshObjIO.bm_load_complex_obj,
+ "LOAD_COMPLEX_OBJ",
+ complex_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshObjIO.bm_save_complex_obj,
+ "SAVE_COMPLEX_OBJ",
+ complex_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshPlyIO.bm_load_complex_ply,
+ "LOAD_COMPLEX_PLY",
+ complex_kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshPlyIO.bm_save_complex_ply,
+ "SAVE_COMPLEX_PLY",
+ complex_kwargs_list,
+ warmup_iters=1,
+ )
+
+ # Texture loading benchmarks
+ kwargs_list = [{"R": 2}, {"R": 4}, {"R": 10}, {"R": 15}, {"R": 20}]
+ benchmark(
+ TestMeshObjIO.bm_load_texture_atlas,
+ "PYTORCH3D_TEXTURE_ATLAS",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ kwargs_list = []
+ S = [64, 256, 1024]
+ F = [100, 1000, 10000]
+ R = [5, 10, 20]
+ test_cases = product(S, F, R)
+
+ for case in test_cases:
+ s, f, r = case
+ kwargs_list.append({"S": s, "F": f, "R": r})
+
+ benchmark(
+ TestMeshObjIO.bm_bilinear_sampling_vectorized,
+ "BILINEAR_VECTORIZED",
+ kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshObjIO.bm_bilinear_sampling_grid_sample,
+ "BILINEAR_GRID_SAMPLE",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_save_load()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_laplacian_smoothing.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_laplacian_smoothing.py
new file mode 100644
index 0000000000000000000000000000000000000000..440aafbae75bc917aaea13f7e096d3a5fb73a92c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_laplacian_smoothing.py
@@ -0,0 +1,40 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_mesh_laplacian_smoothing import TestLaplacianSmoothing
+
+
+def bm_mesh_laplacian_smoothing() -> None:
+ devices = ["cpu"]
+ if torch.cuda.is_available():
+ devices.append("cuda")
+
+ kwargs_list = []
+ num_meshes = [2, 10, 32]
+ num_verts = [100, 1000]
+ num_faces = [300, 3000]
+ test_cases = product(num_meshes, num_verts, num_faces, devices)
+ for case in test_cases:
+ n, v, f, d = case
+ kwargs_list.append(
+ {"num_meshes": n, "num_verts": v, "num_faces": f, "device": d}
+ )
+
+ benchmark(
+ TestLaplacianSmoothing.laplacian_smoothing_with_init,
+ "MESH_LAPLACIAN_SMOOTHING",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_mesh_laplacian_smoothing()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_rasterizer_transform.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_rasterizer_transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..e65c265ef6e902b51acca9a8df9a258eeb85e7e4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_mesh_rasterizer_transform.py
@@ -0,0 +1,53 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from pytorch3d.renderer.cameras import FoVPerspectiveCameras, look_at_view_transform
+from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer
+from pytorch3d.utils.ico_sphere import ico_sphere
+
+
+def rasterize_transform_with_init(num_meshes: int, ico_level: int = 5, device="cuda"):
+ # Init meshes
+ sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes)
+ # Init transform
+ R, T = look_at_view_transform(1.0, 0.0, 0.0)
+ cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
+ # Init rasterizer
+ rasterizer = MeshRasterizer(cameras=cameras)
+
+ torch.cuda.synchronize()
+
+ def raster_fn():
+ rasterizer.transform(sphere_meshes)
+ torch.cuda.synchronize()
+
+ return raster_fn
+
+
+def bm_mesh_rasterizer_transform() -> None:
+ if torch.cuda.is_available():
+ kwargs_list = []
+ num_meshes = [1, 8]
+ ico_level = [0, 1, 3, 4]
+ test_cases = product(num_meshes, ico_level)
+ for case in test_cases:
+ n, ic = case
+ kwargs_list.append({"num_meshes": n, "ico_level": ic})
+ benchmark(
+ rasterize_transform_with_init,
+ "MESH_RASTERIZER",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_mesh_rasterizer_transform()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_meshes.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d0b63f2ba1f0b339780238e029b306fcaea6847
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_meshes.py
@@ -0,0 +1,43 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_meshes import TestMeshes
+
+
+def bm_compute_packed_padded_meshes() -> None:
+ devices = ["cpu"]
+ if torch.cuda.is_available():
+ devices.append("cuda")
+
+ kwargs_list = []
+ num_meshes = [32, 128]
+ max_v = [100, 1000, 10000]
+ max_f = [300, 3000, 30000]
+ test_cases = product(num_meshes, max_v, max_f, devices)
+ for case in test_cases:
+ n, v, f, d = case
+ kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f, "device": d})
+ benchmark(
+ TestMeshes.compute_packed_with_init,
+ "COMPUTE_PACKED",
+ kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestMeshes.compute_padded_with_init,
+ "COMPUTE_PADDED",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_compute_packed_padded_meshes()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_point_mesh_distance.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_point_mesh_distance.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc1da12883da43fa792ce14d561ae6af072b7a70
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_point_mesh_distance.py
@@ -0,0 +1,44 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_point_mesh_distance import TestPointMeshDistance
+
+
+def bm_point_mesh_distance() -> None:
+
+ backend = ["cuda:0"]
+
+ kwargs_list = []
+ batch_size = [4, 8, 16]
+ num_verts = [100, 1000]
+ num_faces = [300, 3000]
+ num_points = [5000, 10000]
+ test_cases = product(batch_size, num_verts, num_faces, num_points, backend)
+ for case in test_cases:
+ n, v, f, p, b = case
+ kwargs_list.append({"N": n, "V": v, "F": f, "P": p, "device": b})
+
+ benchmark(
+ TestPointMeshDistance.point_mesh_edge,
+ "POINT_MESH_EDGE",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ benchmark(
+ TestPointMeshDistance.point_mesh_face,
+ "POINT_MESH_FACE",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_point_mesh_distance()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_pointclouds.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_pointclouds.py
new file mode 100644
index 0000000000000000000000000000000000000000..4868e6bf2aa202fb5fd77ccb21f7e4699ea49db8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_pointclouds.py
@@ -0,0 +1,38 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_pointclouds import TestPointclouds
+
+
+def bm_compute_packed_padded_pointclouds() -> None:
+ kwargs_list = []
+ num_clouds = [32, 128]
+ max_p = [100, 10000]
+ feats = [1, 10, 300]
+ test_cases = product(num_clouds, max_p, feats)
+ for case in test_cases:
+ n, p, f = case
+ kwargs_list.append({"num_clouds": n, "max_p": p, "features": f})
+ benchmark(
+ TestPointclouds.compute_packed_with_init,
+ "COMPUTE_PACKED",
+ kwargs_list,
+ warmup_iters=1,
+ )
+ benchmark(
+ TestPointclouds.compute_padded_with_init,
+ "COMPUTE_PADDED",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_compute_packed_padded_pointclouds()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_points_alignment.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_points_alignment.py
new file mode 100644
index 0000000000000000000000000000000000000000..842773fb6390548b5cfcf28f6fdc0e90a4b67f05
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_points_alignment.py
@@ -0,0 +1,80 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from copy import deepcopy
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_points_alignment import TestCorrespondingPointsAlignment, TestICP
+
+
+def bm_iterative_closest_point() -> None:
+
+ case_grid = {
+ "batch_size": [1, 10],
+ "dim": [3, 20],
+ "n_points_X": [100, 1000],
+ "n_points_Y": [100, 1000],
+ "use_pointclouds": [False],
+ }
+
+ test_args = sorted(case_grid.keys())
+ test_cases = product(*case_grid.values())
+ kwargs_list = [dict(zip(test_args, case)) for case in test_cases]
+
+ # add the use_pointclouds=True test cases whenever we have dim==3
+ kwargs_to_add = []
+ for entry in kwargs_list:
+ if entry["dim"] == 3:
+ entry_add = deepcopy(entry)
+ entry_add["use_pointclouds"] = True
+ kwargs_to_add.append(entry_add)
+ kwargs_list.extend(kwargs_to_add)
+
+ benchmark(
+ TestICP.iterative_closest_point,
+ "IterativeClosestPoint",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+def bm_corresponding_points_alignment() -> None:
+
+ case_grid = {
+ "allow_reflection": [True, False],
+ "batch_size": [1, 10, 100],
+ "dim": [3, 20],
+ "estimate_scale": [True, False],
+ "n_points": [100, 10000],
+ "random_weights": [False, True],
+ "use_pointclouds": [False],
+ }
+
+ test_args = sorted(case_grid.keys())
+ test_cases = product(*case_grid.values())
+ kwargs_list = [dict(zip(test_args, case)) for case in test_cases]
+
+ # add the use_pointclouds=True test cases whenever we have dim==3
+ kwargs_to_add = []
+ for entry in kwargs_list:
+ if entry["dim"] == 3:
+ entry_add = deepcopy(entry)
+ entry_add["use_pointclouds"] = True
+ kwargs_to_add.append(entry_add)
+ kwargs_list.extend(kwargs_to_add)
+
+ benchmark(
+ TestCorrespondingPointsAlignment.corresponding_points_alignment,
+ "CorrespodingPointsAlignment",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_corresponding_points_alignment()
+ bm_iterative_closest_point()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_meshes.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a6531a389189ad13c63d1712ef08d499ba1584c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_meshes.py
@@ -0,0 +1,127 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_rasterize_meshes import TestRasterizeMeshes
+
+BM_RASTERIZE_MESHES_N_THREADS = os.getenv("BM_RASTERIZE_MESHES_N_THREADS", 1)
+torch.set_num_threads(int(BM_RASTERIZE_MESHES_N_THREADS))
+
+# ico levels:
+# 0: (12 verts, 20 faces)
+# 1: (42 verts, 80 faces)
+# 3: (642 verts, 1280 faces)
+# 4: (2562 verts, 5120 faces)
+# 5: (10242 verts, 20480 faces)
+# 6: (40962 verts, 81920 faces)
+
+
+def bm_rasterize_meshes() -> None:
+ kwargs_list = [
+ {
+ "num_meshes": 1,
+ "ico_level": 0,
+ "image_size": 10, # very slow with large image size
+ "blur_radius": 0.0,
+ "faces_per_pixel": 3,
+ }
+ ]
+ benchmark(
+ TestRasterizeMeshes.rasterize_meshes_python_with_init,
+ "RASTERIZE_MESHES",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ kwargs_list = []
+ num_meshes = [1]
+ ico_level = [1]
+ image_size = [64, 128, 512]
+ blur = [1e-6]
+ faces_per_pixel = [3, 50]
+ test_cases = product(num_meshes, ico_level, image_size, blur, faces_per_pixel)
+ for case in test_cases:
+ n, ic, im, b, f = case
+ kwargs_list.append(
+ {
+ "num_meshes": n,
+ "ico_level": ic,
+ "image_size": im,
+ "blur_radius": b,
+ "faces_per_pixel": f,
+ }
+ )
+ benchmark(
+ TestRasterizeMeshes.rasterize_meshes_cpu_with_init,
+ "RASTERIZE_MESHES",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ if torch.cuda.is_available():
+ kwargs_list = []
+ num_meshes = [8, 16]
+ ico_level = [4, 5, 6]
+ # Square and non square cases
+ image_size = [64, 128, 512, (512, 256), (256, 512)]
+ blur = [1e-6]
+ faces_per_pixel = [40]
+ test_cases = product(num_meshes, ico_level, image_size, blur, faces_per_pixel)
+
+ for case in test_cases:
+ n, ic, im, b, f = case
+ kwargs_list.append(
+ {
+ "num_meshes": n,
+ "ico_level": ic,
+ "image_size": im,
+ "blur_radius": b,
+ "faces_per_pixel": f,
+ }
+ )
+ benchmark(
+ TestRasterizeMeshes.rasterize_meshes_cuda_with_init,
+ "RASTERIZE_MESHES_CUDA",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+ # Test a subset of the cases with the
+ # image plane intersecting the mesh.
+ kwargs_list = []
+ num_meshes = [8, 16]
+ # Square and non square cases
+ image_size = [64, 128, 512, (512, 256), (256, 512)]
+ dist = [3, 0.8, 0.5]
+ test_cases = product(num_meshes, dist, image_size)
+
+ for case in test_cases:
+ n, d, im = case
+ kwargs_list.append(
+ {
+ "num_meshes": n,
+ "ico_level": 4,
+ "image_size": im,
+ "blur_radius": 1e-6,
+ "faces_per_pixel": 40,
+ "dist": d,
+ }
+ )
+
+ benchmark(
+ TestRasterizeMeshes.bm_rasterize_meshes_with_clipping,
+ "RASTERIZE_MESHES_CUDA_CLIPPING",
+ kwargs_list,
+ warmup_iters=1,
+ )
+
+
+if __name__ == "__main__":
+ bm_rasterize_meshes()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_points.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcdd8fb08393aed7862488601725e1f5b686aaea
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_rasterize_points.py
@@ -0,0 +1,105 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from pytorch3d.renderer.points.rasterize_points import (
+ rasterize_points,
+ rasterize_points_python,
+)
+from pytorch3d.structures.pointclouds import Pointclouds
+
+
+def _bm_python_with_init(N, P, img_size=32, radius=0.1, pts_per_pxl=3):
+ torch.manual_seed(231)
+ points = torch.randn(N, P, 3)
+ pointclouds = Pointclouds(points=points)
+ args = (pointclouds, img_size, radius, pts_per_pxl)
+ return lambda: rasterize_points_python(*args)
+
+
+def _bm_rasterize_points_with_init(
+ N, P, img_size=32, radius=0.1, pts_per_pxl=3, device="cpu", expand_radius=False
+):
+ torch.manual_seed(231)
+ device = torch.device(device)
+ points = torch.randn(N, P, 3, device=device)
+ pointclouds = Pointclouds(points=points)
+
+ if expand_radius:
+ points_padded = pointclouds.points_padded()
+ radius = torch.full((N, P), fill_value=radius).type_as(points_padded)
+
+ args = (pointclouds, img_size, radius, pts_per_pxl)
+ if device == "cuda":
+ torch.cuda.synchronize(device)
+
+ def fn():
+ rasterize_points(*args)
+ if device == "cuda":
+ torch.cuda.synchronize(device)
+
+ return fn
+
+
+def bm_python_vs_cpu_vs_cuda() -> None:
+ kwargs_list = []
+ num_meshes = [1]
+ num_points = [10000, 2000]
+ image_size = [128, 256]
+ radius = [1e-3, 0.01]
+ pts_per_pxl = [50, 100]
+ expand = [True, False]
+ test_cases = product(
+ num_meshes, num_points, image_size, radius, pts_per_pxl, expand
+ )
+ for case in test_cases:
+ n, p, im, r, pts, e = case
+ kwargs_list.append(
+ {
+ "N": n,
+ "P": p,
+ "img_size": im,
+ "radius": r,
+ "pts_per_pxl": pts,
+ "device": "cpu",
+ "expand_radius": e,
+ }
+ )
+
+ benchmark(
+ _bm_rasterize_points_with_init, "RASTERIZE_CPU", kwargs_list, warmup_iters=1
+ )
+ kwargs_list += [
+ {"N": 32, "P": 100000, "img_size": 128, "radius": 0.01, "pts_per_pxl": 50},
+ {"N": 8, "P": 200000, "img_size": 512, "radius": 0.01, "pts_per_pxl": 50},
+ {"N": 8, "P": 200000, "img_size": 256, "radius": 0.01, "pts_per_pxl": 50},
+ {
+ "N": 8,
+ "P": 200000,
+ "img_size": (512, 256),
+ "radius": 0.01,
+ "pts_per_pxl": 50,
+ },
+ {
+ "N": 8,
+ "P": 200000,
+ "img_size": (256, 512),
+ "radius": 0.01,
+ "pts_per_pxl": 50,
+ },
+ ]
+ for k in kwargs_list:
+ k["device"] = "cuda"
+ benchmark(
+ _bm_rasterize_points_with_init, "RASTERIZE_CUDA", kwargs_list, warmup_iters=1
+ )
+
+
+if __name__ == "__main__":
+ bm_python_vs_cpu_vs_cuda()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raymarching.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raymarching.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37f612734b184edd8cae93dd077ec468d05d223
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raymarching.py
@@ -0,0 +1,23 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import itertools
+
+from fvcore.common.benchmark import benchmark
+from pytorch3d.renderer import AbsorptionOnlyRaymarcher, EmissionAbsorptionRaymarcher
+from tests.test_raymarching import TestRaymarching
+
+
+def bm_raymarching() -> None:
+ case_grid = {
+ "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher],
+ "n_rays": [10, 1000, 10000],
+ "n_pts_per_ray": [10, 1000, 10000],
+ }
+ test_cases = itertools.product(*case_grid.values())
+ kwargs_list = [dict(zip(case_grid.keys(), case)) for case in test_cases]
+
+ benchmark(TestRaymarching.raymarcher, "RAYMARCHER", kwargs_list, warmup_iters=1)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raysampling.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raysampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..09b0f1283e65a2788c43d8002822283d7b02e393
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_raysampling.py
@@ -0,0 +1,47 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import itertools
+
+from fvcore.common.benchmark import benchmark
+from pytorch3d.renderer import (
+ FoVOrthographicCameras,
+ FoVPerspectiveCameras,
+ MonteCarloRaysampler,
+ MultinomialRaysampler,
+ NDCMultinomialRaysampler,
+ OrthographicCameras,
+ PerspectiveCameras,
+)
+from tests.test_raysampling import TestRaysampling
+
+
+def bm_raysampling() -> None:
+ case_grid = {
+ "raysampler_type": [
+ MultinomialRaysampler,
+ NDCMultinomialRaysampler,
+ MonteCarloRaysampler,
+ ],
+ "camera_type": [
+ PerspectiveCameras,
+ OrthographicCameras,
+ FoVPerspectiveCameras,
+ FoVOrthographicCameras,
+ ],
+ "batch_size": [1, 10],
+ "n_pts_per_ray": [10, 1000, 10000],
+ "image_width": [10, 300],
+ "image_height": [10, 300],
+ }
+ test_cases = itertools.product(*case_grid.values())
+ kwargs_list = [dict(zip(case_grid.keys(), case)) for case in test_cases]
+
+ benchmark(TestRaysampling.raysampler, "RAYSAMPLER", kwargs_list, warmup_iters=1)
+
+
+if __name__ == "__main__":
+ bm_raysampling()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_render_volumes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_render_volumes.py
new file mode 100644
index 0000000000000000000000000000000000000000..abe76d93205d72f64d3eff1b057e5f1acbd8e281
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_render_volumes.py
@@ -0,0 +1,28 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import itertools
+
+from fvcore.common.benchmark import benchmark
+from pytorch3d.renderer import AbsorptionOnlyRaymarcher, EmissionAbsorptionRaymarcher
+from tests.test_render_volumes import TestRenderVolumes
+
+
+def bm_render_volumes() -> None:
+ case_grid = {
+ "volume_size": [tuple([17] * 3), tuple([129] * 3)],
+ "batch_size": [1, 5],
+ "shape": ["sphere", "cube"],
+ "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher],
+ "n_rays_per_image": [64**2, 256**2],
+ "n_pts_per_ray": [16, 128],
+ }
+ test_cases = itertools.product(*case_grid.values())
+ kwargs_list = [dict(zip(case_grid.keys(), case)) for case in test_cases]
+
+ benchmark(
+ TestRenderVolumes.renderer, "VOLUME_RENDERER", kwargs_list, warmup_iters=1
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_sample_pdf.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_sample_pdf.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f94df8f9a1915001d929177a4c47006ccbaeefe
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_sample_pdf.py
@@ -0,0 +1,37 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from itertools import product
+
+from fvcore.common.benchmark import benchmark
+from tests.test_sample_pdf import TestSamplePDF
+
+
+def bm_sample_pdf() -> None:
+
+ backends = ["python_cuda", "cuda", "python_cpu", "cpu"]
+
+ kwargs_list = []
+ sample_counts = [64]
+ batch_sizes = [1024, 10240]
+ bin_counts = [62, 600]
+ test_cases = product(backends, sample_counts, batch_sizes, bin_counts)
+ for case in test_cases:
+ backend, n_samples, batch_size, n_bins = case
+ kwargs_list.append(
+ {
+ "backend": backend,
+ "n_samples": n_samples,
+ "batch_size": batch_size,
+ "n_bins": n_bins,
+ }
+ )
+
+ benchmark(TestSamplePDF.bm_fn, "SAMPLE_PDF", kwargs_list, warmup_iters=1)
+
+
+if __name__ == "__main__":
+ bm_sample_pdf()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_se3.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_se3.py
new file mode 100644
index 0000000000000000000000000000000000000000..62ac896730cb10ecf0e616b43986a4b9e5c4faec
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_se3.py
@@ -0,0 +1,23 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from fvcore.common.benchmark import benchmark
+from tests.test_se3 import TestSE3
+
+
+def bm_se3() -> None:
+ kwargs_list = [
+ {"batch_size": 1},
+ {"batch_size": 10},
+ {"batch_size": 100},
+ {"batch_size": 1000},
+ ]
+ benchmark(TestSE3.se3_expmap, "SE3_EXP", kwargs_list, warmup_iters=1)
+ benchmark(TestSE3.se3_logmap, "SE3_LOG", kwargs_list, warmup_iters=1)
+
+
+if __name__ == "__main__":
+ bm_se3()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_vert_align.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_vert_align.py
new file mode 100644
index 0000000000000000000000000000000000000000..6670029e89203fbd5e516d1cb57f15b946c94cec
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/benchmarks/bm_vert_align.py
@@ -0,0 +1,37 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+from itertools import product
+
+import torch
+from fvcore.common.benchmark import benchmark
+from tests.test_vert_align import TestVertAlign
+
+
+def bm_vert_align() -> None:
+ devices = ["cpu"]
+ if torch.cuda.is_available():
+ devices.append("cuda")
+
+ kwargs_list = []
+ num_meshes = [2, 10, 32]
+ num_verts = [100, 1000]
+ num_faces = [300, 3000]
+ test_cases = product(num_meshes, num_verts, num_faces, devices)
+ for case in test_cases:
+ n, v, f, d = case
+ kwargs_list.append(
+ {"num_meshes": n, "num_verts": v, "num_faces": f, "device": d}
+ )
+
+ benchmark(
+ TestVertAlign.vert_align_with_init, "VERT_ALIGN", kwargs_list, warmup_iters=1
+ )
+
+
+if __name__ == "__main__":
+ bm_vert_align()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.mtl b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.mtl
new file mode 100644
index 0000000000000000000000000000000000000000..d6a0f4fb3c119916f2c324f846a040879e4b6e2a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.mtl
@@ -0,0 +1,9 @@
+newmtl material_1
+map_Kd material_1.png
+
+# Test colors
+
+Ka 1.000 1.000 1.000 # white
+Kd 1.000 1.000 1.000 # white
+Ks 0.000 0.000 0.000 # black
+Ns 10.0
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.obj b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.obj
new file mode 100644
index 0000000000000000000000000000000000000000..cf411442fd4d6e5364e0e49aea0e3bbd570d6b06
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model.obj
@@ -0,0 +1,10 @@
+
+mtllib model.mtl
+
+v 0.1 0.2 0.3
+v 0.2 0.3 0.4
+v 0.3 0.4 0.5
+v 0.4 0.5 0.6
+usemtl material_1
+f 1 2 3
+f 1 2 4
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model2.obj b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model2.obj
new file mode 100644
index 0000000000000000000000000000000000000000..df393f9f1b360ded4b6d232039702a2c745f5529
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_files_obj/model2.obj
@@ -0,0 +1,10 @@
+
+mtllib model2.mtl
+
+v 0.1 0.2 0.3
+v 0.2 0.3 0.4
+v 0.3 0.4 0.5
+v 0.4 0.5 0.6
+usemtl material_1
+f 1 2 3
+f 1 2 4
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1c9b08bea9e0a4efac7770c872a7aa2b6023e307
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/README.md
@@ -0,0 +1,7 @@
+# Acknowledgements
+
+This is copied version of docs/tutorials/data/cow_mesh with removed line 6159 (usemtl material_1) to test behavior without usemtl material_1 declaration.
+
+Thank you to Keenan Crane for allowing the cow mesh model to be used freely in the public domain.
+
+###### Source: http://www.cs.cmu.edu/~kmcrane/Projects/ModelRepository/
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.mtl b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.mtl
new file mode 100644
index 0000000000000000000000000000000000000000..c3bc054358c3db2526520c29f4c30ac812f7c370
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.mtl
@@ -0,0 +1,9 @@
+newmtl material_1
+map_Kd cow_texture.png
+
+# Test colors
+
+Ka 1.000 1.000 1.000 # white
+Kd 1.000 1.000 1.000 # white
+Ks 0.000 0.000 0.000 # black
+Ns 10.0
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.obj b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.obj
new file mode 100644
index 0000000000000000000000000000000000000000..2f349d0dac3994869486bdac7f15892f5acb2508
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/missing_usemtl/cow.obj
@@ -0,0 +1,12014 @@
+
+mtllib cow.mtl
+
+v 0.348799 -0.334989 -0.0832331
+v 0.313132 -0.399051 0.881192
+v 0.266758 0.181628 0.122726
+v 0.229555 0.0663178 0.828702
+v 0.353623 -0.0486456 0.443969
+v 0.335878 -0.384495 0.425693
+v 0.305362 0.0307983 -0.00655663
+v 0.279611 -0.0552387 0.858547
+v 0.266719 0.10578 0.46681
+v 0.149341 -0.451522 0.166423
+v 0.12606 -0.162036 -0.185668
+v 0.114009 -0.358339 -0.0759521
+v 0.101546 -0.475382 0.663157
+v 0.240757 -0.435635 0.421981
+v 0.385825 -0.174501 0.180766
+v 0.358602 -0.37797 0.17897
+v 0.345953 -0.229705 0.664126
+v 0.316102 -0.419396 0.649153
+v 0.131961 -0.201382 0.951731
+v 0.0876766 -0.391182 0.901606
+v 0.161859 0.285356 0.303098
+v 0.177264 0.282839 0.0875537
+v 0.158786 0.0751889 -0.141343
+v 0.310974 -0.0974099 -0.0697762
+v 0.138236 0.191142 0.700393
+v 0.13061 0.109881 0.873657
+v 0.287544 -0.214549 0.864938
+v 0.25445 0.101336 0.680112
+v 0.34304 -0.0541165 0.672286
+v 0.369388 -0.0242775 0.205838
+v 0.273782 0.160332 0.273861
+v 0.36907 -0.228332 0.435279
+v 0.146074 0.206063 0.488052
+v 0.15689 0.317253 -0.0179554
+v 0.184529 0.181235 -0.238574
+v 0.229877 0.446038 -0.0428095
+v 0.326584 0.303676 -0.375494
+v 0.257915 0.488969 -0.517721
+v 0.187711 0.730295 -0.437134
+v 0.190442 0.122856 -0.437696
+v 0.271501 0.201979 -0.579853
+v 0.227961 0.337016 -0.641468
+v 0.323288 -0.645898 -0.0650069
+v 0.277559 -0.660659 0.872426
+v 0.15321 -0.671553 0.116597
+v 0.153343 -0.652512 -0.0628489
+v 0.129752 -0.680413 0.714233
+v 0.310744 -0.658159 0.12474
+v 0.270197 -0.670204 0.705429
+v 0.125665 -0.664371 0.877017
+v 0.264169 0.646946 -0.369897
+v 0.180359 0.68646 -0.148279
+v 0.263204 0.560375 -0.137191
+v 0.371765 0.683869 -0.321196
+v 0.432195 0.751246 -0.233457
+v 0.424574 0.680439 -0.150855
+v 0.379346 0.604049 -0.213075
+v 0 -0.192084 -0.196407
+v 0 0.505678 -0.563016
+v 0 0.340641 -0.665066
+v 0 0.0414775 -0.241591
+v 0 0.117615 -0.305384
+v 0 0.100384 -0.450564
+v 0 0.182587 -0.616155
+v -4.33681e-19 0.765067 -0.449072
+v -4.33681e-19 0.834757 -0.307668
+v 0 -0.434774 0.0615192
+v 0 -0.354126 -0.0677048
+v 0 0.326124 0.139538
+v 0 0.352207 0.0388631
+v 0 0.499175 0.0586613
+v -4.33681e-19 0.723906 -0.0597107
+v 0 0.816061 -0.189369
+v 0 -0.435249 0.726548
+v 0 -0.533724 0.415511
+v 0 0.301194 0.312719
+v 0 0.233342 0.49566
+v 0 -0.340037 0.891691
+v 0 -0.194995 0.971379
+v 0 0.220553 0.711025
+v 0 0.130478 0.891171
+v 0.164015 0.929339 -0.232094
+v 0.205905 0.915741 -0.313374
+v 0.15338 0.935554 -0.282029
+v 0.221786 0.905461 -0.256021
+v 0.115236 0.837896 -0.222054
+v 0.086327 0.812209 -0.211465
+v 0.172231 0.819312 -0.334104
+v 0.162464 0.791078 -0.351846
+v 0.0787362 0.822102 -0.294366
+v 0.102999 0.844736 -0.288229
+v 0.219399 0.754552 -0.28348
+v 0.199751 0.790382 -0.244881
+v 0.184684 0.762009 -0.196152
+v 0.279703 0.762439 -0.233186
+v 0.120311 -0.456517 0.243094
+v 0.111282 -0.477316 0.58188
+v 0.186164 -0.487701 0.417533
+v 0 -0.462404 0.208739
+v 0 -0.483948 0.606421
+v 0.17185 -0.0410977 0.94527
+v 0.106249 -0.0142167 0.968533
+v 0.107395 -0.0653463 0.975253
+v 0 0.00228925 0.987177
+v 0 -0.0756163 0.995507
+v 0.0517509 -0.0608359 1.02473
+v 0.0517951 -0.0798093 1.0112
+v 0 -0.0688251 1.04807
+v 0 -0.0952192 1.02642
+v -0.348799 -0.334989 -0.0832331
+v -0.313132 -0.399051 0.881192
+v -0.266758 0.181628 0.122726
+v -0.229555 0.0663178 0.828702
+v -0.353623 -0.0486456 0.443969
+v -0.335878 -0.384495 0.425693
+v -0.305362 0.0307983 -0.00655663
+v -0.279611 -0.0552387 0.858547
+v -0.266719 0.10578 0.46681
+v -0.149341 -0.451522 0.166423
+v -0.12606 -0.162036 -0.185668
+v -0.114009 -0.358339 -0.0759521
+v -0.101546 -0.475382 0.663157
+v -0.240757 -0.435635 0.421981
+v -0.385825 -0.174501 0.180766
+v -0.358602 -0.37797 0.17897
+v -0.345953 -0.229705 0.664126
+v -0.316102 -0.419396 0.649153
+v -0.131961 -0.201382 0.951731
+v -0.0876766 -0.391182 0.901606
+v -0.161859 0.285356 0.303098
+v -0.177264 0.282839 0.0875537
+v -0.158786 0.0751889 -0.141343
+v -0.310974 -0.0974099 -0.0697762
+v -0.138236 0.191142 0.700393
+v -0.13061 0.109881 0.873657
+v -0.287544 -0.214549 0.864938
+v -0.25445 0.101336 0.680112
+v -0.34304 -0.0541165 0.672286
+v -0.369388 -0.0242775 0.205838
+v -0.273782 0.160332 0.273861
+v -0.36907 -0.228332 0.435279
+v -0.146074 0.206063 0.488052
+v -0.15689 0.317253 -0.0179554
+v -0.184529 0.181235 -0.238574
+v -0.229877 0.446038 -0.0428095
+v -0.326584 0.303676 -0.375494
+v -0.257915 0.488969 -0.517721
+v -0.187711 0.730295 -0.437134
+v -0.190442 0.122856 -0.437696
+v -0.271501 0.201979 -0.579853
+v -0.227961 0.337016 -0.641468
+v -0.323288 -0.645898 -0.0650069
+v -0.277559 -0.660659 0.872426
+v -0.15321 -0.671553 0.116597
+v -0.153343 -0.652512 -0.0628489
+v -0.129752 -0.680413 0.714233
+v -0.310744 -0.658159 0.12474
+v -0.270197 -0.670204 0.705429
+v -0.125665 -0.664371 0.877017
+v -0.264169 0.646946 -0.369897
+v -0.180359 0.68646 -0.148279
+v -0.263204 0.560375 -0.137191
+v -0.371765 0.683869 -0.321196
+v -0.432195 0.751246 -0.233457
+v -0.424574 0.680439 -0.150855
+v -0.379346 0.604049 -0.213075
+v -0.164015 0.929339 -0.232094
+v -0.205905 0.915741 -0.313374
+v -0.15338 0.935554 -0.282029
+v -0.221786 0.905461 -0.256021
+v -0.115236 0.837896 -0.222054
+v -0.086327 0.812209 -0.211465
+v -0.172231 0.819312 -0.334104
+v -0.162464 0.791078 -0.351846
+v -0.0787362 0.822102 -0.294366
+v -0.102999 0.844736 -0.288229
+v -0.219399 0.754552 -0.28348
+v -0.199751 0.790382 -0.244881
+v -0.184684 0.762009 -0.196152
+v -0.279703 0.762439 -0.233186
+v -0.120311 -0.456517 0.243094
+v -0.111282 -0.477316 0.58188
+v -0.186164 -0.487701 0.417533
+v -0.17185 -0.0410977 0.94527
+v -0.106249 -0.0142167 0.968533
+v -0.107395 -0.0653463 0.975253
+v -0.0517509 -0.0608359 1.02473
+v -0.0517951 -0.0798093 1.0112
+v 0.287063 -0.417912 0.42339
+v 0.223323 -0.431749 0.300839
+v 0.262599 -0.446574 0.214165
+v 0.345683 -0.367458 0.305622
+v 0.284862 -0.410239 0.304857
+v 0.320467 -0.390454 0.538527
+v 0.218649 -0.474853 0.619945
+v 0.19621 -0.447285 0.538732
+v 0.257025 -0.429064 0.536591
+v 0.240602 -0.341214 -0.117452
+v 0.12147 -0.249274 -0.145022
+v 0.221157 -0.120393 -0.146058
+v 0.328715 -0.192761 -0.0778193
+v 0.229478 -0.212011 -0.133147
+v 0.307987 -0.0331195 -0.049691
+v 0.139893 -0.0548182 -0.181103
+v 0.237075 0.0521949 -0.0759725
+v 0.227173 -0.0372356 -0.128413
+v 0.301492 -0.292885 0.870674
+v 0.213279 -0.204742 0.922518
+v 0.117499 -0.281642 0.935739
+v 0.211816 -0.394732 0.922409
+v 0.215547 -0.286415 0.919674
+v 0.364968 -0.12541 0.046235
+v 0.382458 -0.25968 0.176197
+v 0.385194 -0.351205 0.0306159
+v 0.375418 -0.216313 0.0359591
+v 0.382575 -0.209764 0.311653
+v 0.361147 -0.316624 0.430338
+v 0.374384 -0.294532 0.308248
+v 0.368668 -0.137134 0.438729
+v 0.385132 -0.101185 0.188168
+v 0.365146 -0.0406368 0.324829
+v 0.381949 -0.125584 0.315412
+v 0.350232 0.00253885 0.0908221
+v 0.361458 -0.0619596 0.0609153
+v 0.343331 -0.409171 0.778577
+v 0.341862 -0.313907 0.657949
+v 0.328019 -0.223318 0.773747
+v 0.336544 -0.303107 0.775339
+v 0.283052 -0.132631 0.863571
+v 0.351001 -0.142438 0.668828
+v 0.322487 -0.0565049 0.774019
+v 0.329 -0.140489 0.774308
+v 0.349301 -0.0521301 0.560904
+v 0.357572 -0.233122 0.551664
+v 0.360647 -0.141814 0.556564
+v 0.349272 -0.319879 0.544579
+v 0.23281 0.229543 0.110217
+v 0.170184 0.299829 0.20555
+v 0.221341 0.236481 0.293418
+v 0.272682 0.181352 0.189589
+v 0.227226 0.252859 0.204008
+v 0.271155 0.129527 0.366223
+v 0.153276 0.244216 0.39384
+v 0.20969 0.164574 0.478658
+v 0.215375 0.198113 0.382954
+v 0.24282 0.0875955 0.770725
+v 0.198521 0.154682 0.689709
+v 0.134533 0.161682 0.796578
+v 0.187262 0.086887 0.854887
+v 0.191558 0.132026 0.78245
+v 0.141545 0.196582 0.593509
+v 0.26179 0.101674 0.574769
+v 0.204392 0.157257 0.584394
+v 0.327755 0.0676272 0.239516
+v 0.317321 0.0335117 0.454173
+v 0.324726 0.0469262 0.344192
+v 0.307072 0.0296092 0.675079
+v 0.253941 0.0157408 0.848168
+v 0.289863 0.0225221 0.77107
+v 0.190901 0.188016 -0.0300482
+v 0.288174 0.116771 0.0639197
+v 0.239299 0.151564 0.0118342
+v 0.313467 0.030566 0.566851
+v 0.317033 0.0909783 0.142973
+v 0.150018 0.148066 -0.170355
+v 0.192003 0.257627 -0.114007
+v 0.150923 0.290686 0.0162651
+v 0.172619 0.221245 -0.0722104
+v 0.274031 0.23831 -0.301318
+v 0.294876 0.383503 -0.203773
+v 0.194068 0.37243 -0.0269037
+v 0.248629 0.311098 -0.153862
+v 0.196806 0.141274 -0.362924
+v 0.23911 0.14497 -0.502892
+v 0.324701 0.24108 -0.502727
+v 0.278147 0.181024 -0.423508
+v 0.263786 0.27805 -0.630217
+v 0.250979 0.388662 -0.601302
+v 0.318198 0.390558 -0.446528
+v 0.314593 0.32071 -0.563292
+v 0.338046 -0.680095 0.0309463
+v 0.228165 -0.692422 0.147407
+v 0.12828 -0.689708 0.0245746
+v 0.238161 -0.677382 -0.0918856
+v 0.229681 -0.733842 0.0266899
+v 0.20056 -0.687393 0.898863
+v 0.107919 -0.696884 0.796995
+v 0.199176 -0.699686 0.68654
+v 0.291818 -0.690195 0.788408
+v 0.198244 -0.736784 0.793448
+v 0.307855 -0.55258 0.89062
+v 0.294592 -0.571581 0.678753
+v 0.335563 -0.561971 0.78431
+v 0.0593616 -0.458076 0.795932
+v 0.110927 -0.595142 0.691599
+v 0.102733 -0.558625 0.896274
+v 0.0716769 -0.583028 0.796447
+v 0.203007 -0.591569 0.647127
+v 0.206666 -0.553636 0.930804
+v 0.339121 -0.546516 0.150832
+v 0.352933 -0.520557 -0.0885122
+v 0.383244 -0.532332 0.0297109
+v 0.132569 -0.534652 -0.0767316
+v 0.24402 -0.524662 -0.124285
+v 0.143499 -0.577512 0.141914
+v 0.241073 -0.571789 0.188527
+v 0.0964942 -0.434904 0.0342915
+v 0.0999951 -0.56397 0.0317575
+v 0.271576 0.561647 -0.416318
+v 0.282894 0.56989 -0.257283
+v 0.23851 0.520387 -0.0907886
+v 0.293195 0.482084 -0.276654
+v 0.213045 0.609177 -0.109989
+v 0.321376 0.576724 -0.185683
+v 0.422878 0.632073 -0.168661
+v 0.32688 0.680784 -0.145207
+v 0.326345 0.612341 -0.141546
+v 0.312594 0.674309 -0.341185
+v 0.379053 0.63587 -0.283895
+v 0.3154 0.606871 -0.278418
+v 0.417773 0.731266 -0.288914
+v 0.460287 0.727236 -0.177367
+v 0.454185 0.684351 -0.230461
+v 0 0.320051 0.223298
+v 0.0857686 0.301184 0.309872
+v 0.0922474 0.320203 0.127906
+v 0.0910385 0.318251 0.216664
+v 0 0.227033 0.602086
+v 0.0710686 0.213115 0.708099
+v 0.0750596 0.22755 0.493766
+v 0.0724773 0.219588 0.599773
+v 0 0.186603 0.810651
+v 0.0670456 0.125156 0.886255
+v 0.0695062 0.18012 0.806775
+v 0 -0.40432 -0.00187701
+v 0.0502685 -0.34467 -0.0665397
+v 0.0661753 -0.432919 0.0879789
+v 0.056363 -0.399912 0.0126968
+v 0 -0.395546 0.810009
+v 0.0398588 -0.442711 0.714357
+v 0.0361117 -0.34665 0.891624
+v 0.0321101 -0.404726 0.804765
+v 0 0.264692 0.401284
+v 0.0798727 0.262384 0.399618
+v 0 0.330325 0.0716557
+v 0.0874752 0.345901 0.0284499
+v 0.0858416 0.32349 0.0605597
+v 0 0.409384 0.0515977
+v 0.124296 0.487548 0.0340513
+v 0.105997 0.402715 0.0360884
+v 0 0.641579 -0.507325
+v 0.104733 0.755088 -0.444831
+v 0.214286 0.629383 -0.484754
+v 0.136392 0.50382 -0.556974
+v 0.121817 0.636846 -0.502061
+v 0 0.412943 -0.623842
+v 0.139278 0.340909 -0.661549
+v 0.138945 0.411476 -0.620153
+v 0 -0.271137 0.943676
+v 0.0622199 -0.197393 0.966789
+v 0.0501277 -0.274014 0.941915
+v 0 -0.286073 -0.138523
+v 0.0553298 -0.185801 -0.197022
+v 0.050202 -0.275405 -0.142323
+v 0 -0.0695404 -0.225378
+v 0.0766662 0.0457919 -0.217551
+v 0.0655056 -0.0670069 -0.214493
+v 0 0.101519 -0.264918
+v 0.0933959 0.130309 -0.285229
+v 0.0838463 0.11062 -0.238883
+v 0 0.106874 -0.369301
+v 0.114603 0.104637 -0.445501
+v 0.102596 0.115199 -0.359834
+v 0 0.126441 -0.539369
+v 0.153665 0.185489 -0.611191
+v 0.137484 0.129002 -0.534091
+v 0 0.259825 -0.661348
+v 0.150371 0.262453 -0.657355
+v 0.0819636 0.707011 -0.0978848
+v 0 0.613137 0.0118372
+v 0.128934 0.593549 -0.0276065
+v 0.230899 0.696193 -0.415496
+v 0.2462 0.632058 -0.451998
+v 0.2255 0.918412 -0.287909
+v 0.182289 0.937359 -0.304334
+v 0.155831 0.94592 -0.253016
+v 0.199042 0.926972 -0.236591
+v 0.198381 0.950853 -0.26973
+v 0.195536 0.768708 -0.323732
+v 0.162591 0.796641 -0.337706
+v 0.198135 0.804412 -0.300184
+v 0.204856 0.767832 -0.258627
+v 0.190344 0.780419 -0.305321
+v 0.158595 0.817014 -0.220186
+v 0.102838 0.815304 -0.21903
+v 0.12548 0.786281 -0.190531
+v 0.18859 0.771476 -0.215131
+v 0.14521 0.793415 -0.208712
+v 0.113814 0.810725 -0.339255
+v 0.0911506 0.822276 -0.28966
+v 0.134346 0.834135 -0.324328
+v 0.12321 0.811864 -0.327105
+v 0.0699464 0.823757 -0.248502
+v 0.0956349 0.846841 -0.248877
+v 0.0836698 0.824545 -0.249204
+v 0.24364 0.757848 -0.207433
+v 0.246443 0.756953 -0.257453
+v 0.21289 0.762245 -0.22883
+v 0.355751 0.762428 -0.238497
+v 0.249966 0.723503 -0.332259
+v 0.32003 0.739642 -0.304714
+v 0.181848 0.741315 -0.180043
+v 0.328516 0.741977 -0.181175
+v 0.0499681 0.812176 -0.199395
+v 0 0.785709 -0.129116
+v 0.0788354 0.774284 -0.158189
+v 0.0505691 0.828853 -0.304368
+v 0.168022 0.778755 -0.386065
+v 0 0.821114 -0.378707
+v 0.0897195 0.811796 -0.373587
+v 0 0.831803 -0.246303
+v 0.0404745 0.828353 -0.247459
+v 0.211302 0.744371 -0.365196
+v 0.193702 0.87269 -0.329392
+v 0.216553 0.856661 -0.251136
+v 0.218551 0.860755 -0.297102
+v 0.126841 0.897388 -0.285836
+v 0.157513 0.887986 -0.320358
+v 0.138938 0.890583 -0.222912
+v 0.120161 0.900194 -0.247801
+v 0.181137 0.872948 -0.224154
+v 0.106299 -0.467348 0.612794
+v 0.156751 -0.485651 0.517662
+v 0.218865 -0.458568 0.419816
+v 0.177713 -0.464461 0.527871
+v 0 -0.459742 0.666605
+v 0.0640756 -0.481225 0.60015
+v 0.0486808 -0.462533 0.655449
+v 0.129769 -0.445891 0.210992
+v 0.0674919 -0.459184 0.218582
+v 0 -0.445956 0.12884
+v 0.0638357 -0.444618 0.15006
+v 0.160168 -0.471758 0.313909
+v 0.192938 -0.45045 0.307624
+v 0 -0.518477 0.519852
+v 0.106971 -0.518553 0.416032
+v 0.0913755 -0.507594 0.518132
+v 0 -0.503509 0.307563
+v 0.0906862 -0.492986 0.310407
+v 0.148938 -0.0188178 0.950827
+v 0.109744 -0.0412763 0.976341
+v 0.152295 -0.0598695 0.957881
+v 0.133131 -0.0374762 0.965091
+v 0.129144 -0.123186 0.961976
+v 0.214922 -0.0472768 0.917853
+v 0.202946 -0.113321 0.930645
+v 0.124777 0.0448234 0.926767
+v 0.189431 0.0176502 0.907717
+v 0.0541294 -0.000582154 0.980957
+v 0 0.0636729 0.946137
+v 0.0627521 0.0592134 0.940397
+v 0.0544734 -0.0758718 0.989219
+v 0 -0.121134 0.98554
+v 0.0631528 -0.123094 0.979468
+v 0.0716307 -0.0432 1.00541
+v 0.052795 -0.0725924 1.01945
+v 0.0717681 -0.0690106 0.994894
+v 0.0809297 -0.0558501 0.997263
+v 0 -0.0400118 1.02513
+v 0.0297219 -0.0653048 1.04059
+v 0.0406958 -0.0395224 1.01839
+v 0.0297587 -0.0908237 1.02001
+v 0 -0.0782912 1.00911
+v 0.0407812 -0.0765567 1.00299
+v 0 -0.0903561 1.04431
+v 0.0264068 -0.0851806 1.03686
+v -0.345683 -0.367458 0.305622
+v -0.262599 -0.446574 0.214165
+v -0.223323 -0.431749 0.300839
+v -0.287063 -0.417912 0.42339
+v -0.284862 -0.410239 0.304857
+v -0.19621 -0.447285 0.538732
+v -0.218649 -0.474853 0.619945
+v -0.320467 -0.390454 0.538527
+v -0.257025 -0.429064 0.536591
+v -0.328715 -0.192761 -0.0778193
+v -0.221157 -0.120393 -0.146058
+v -0.12147 -0.249274 -0.145022
+v -0.240602 -0.341214 -0.117452
+v -0.229478 -0.212011 -0.133147
+v -0.237075 0.0521949 -0.0759725
+v -0.139893 -0.0548182 -0.181103
+v -0.307987 -0.0331195 -0.049691
+v -0.227173 -0.0372356 -0.128413
+v -0.211816 -0.394732 0.922409
+v -0.117499 -0.281642 0.935739
+v -0.213279 -0.204742 0.922518
+v -0.301492 -0.292885 0.870674
+v -0.215547 -0.286415 0.919674
+v -0.385194 -0.351205 0.0306159
+v -0.382458 -0.25968 0.176197
+v -0.364968 -0.12541 0.046235
+v -0.375418 -0.216313 0.0359591
+v -0.361147 -0.316624 0.430338
+v -0.382575 -0.209764 0.311653
+v -0.374384 -0.294532 0.308248
+v -0.365146 -0.0406368 0.324829
+v -0.385132 -0.101185 0.188168
+v -0.368668 -0.137134 0.438729
+v -0.381949 -0.125584 0.315412
+v -0.350232 0.00253885 0.0908221
+v -0.361458 -0.0619596 0.0609153
+v -0.328019 -0.223318 0.773747
+v -0.341862 -0.313907 0.657949
+v -0.343331 -0.409171 0.778577
+v -0.336544 -0.303107 0.775339
+v -0.322487 -0.0565049 0.774019
+v -0.351001 -0.142438 0.668828
+v -0.283052 -0.132631 0.863571
+v -0.329 -0.140489 0.774308
+v -0.357572 -0.233122 0.551664
+v -0.349301 -0.0521301 0.560904
+v -0.360647 -0.141814 0.556564
+v -0.349272 -0.319879 0.544579
+v -0.272682 0.181352 0.189589
+v -0.221341 0.236481 0.293418
+v -0.170184 0.299829 0.20555
+v -0.23281 0.229543 0.110217
+v -0.227226 0.252859 0.204008
+v -0.20969 0.164574 0.478658
+v -0.153276 0.244216 0.39384
+v -0.271155 0.129527 0.366223
+v -0.215375 0.198113 0.382954
+v -0.187262 0.086887 0.854887
+v -0.134533 0.161682 0.796578
+v -0.198521 0.154682 0.689709
+v -0.24282 0.0875955 0.770725
+v -0.191558 0.132026 0.78245
+v -0.26179 0.101674 0.574769
+v -0.141545 0.196582 0.593509
+v -0.204392 0.157257 0.584394
+v -0.317321 0.0335117 0.454173
+v -0.327755 0.0676272 0.239516
+v -0.324726 0.0469262 0.344192
+v -0.253941 0.0157408 0.848168
+v -0.307072 0.0296092 0.675079
+v -0.289863 0.0225221 0.77107
+v -0.288174 0.116771 0.0639197
+v -0.190901 0.188016 -0.0300482
+v -0.239299 0.151564 0.0118342
+v -0.313467 0.030566 0.566851
+v -0.317033 0.0909783 0.142973
+v -0.150923 0.290686 0.0162651
+v -0.192003 0.257627 -0.114007
+v -0.150018 0.148066 -0.170355
+v -0.172619 0.221245 -0.0722104
+v -0.194068 0.37243 -0.0269037
+v -0.294876 0.383503 -0.203773
+v -0.274031 0.23831 -0.301318
+v -0.248629 0.311098 -0.153862
+v -0.324701 0.24108 -0.502727
+v -0.23911 0.14497 -0.502892
+v -0.196806 0.141274 -0.362924
+v -0.278147 0.181024 -0.423508
+v -0.318198 0.390558 -0.446528
+v -0.250979 0.388662 -0.601302
+v -0.263786 0.27805 -0.630217
+v -0.314593 0.32071 -0.563292
+v -0.238161 -0.677382 -0.0918856
+v -0.12828 -0.689708 0.0245746
+v -0.228165 -0.692422 0.147407
+v -0.338046 -0.680095 0.0309463
+v -0.229681 -0.733842 0.0266899
+v -0.291818 -0.690195 0.788408
+v -0.199176 -0.699686 0.68654
+v -0.107919 -0.696884 0.796995
+v -0.20056 -0.687393 0.898863
+v -0.198244 -0.736784 0.793448
+v -0.294592 -0.571581 0.678753
+v -0.307855 -0.55258 0.89062
+v -0.335563 -0.561971 0.78431
+v -0.102733 -0.558625 0.896274
+v -0.110927 -0.595142 0.691599
+v -0.0593616 -0.458076 0.795932
+v -0.0716769 -0.583028 0.796447
+v -0.203007 -0.591569 0.647127
+v -0.206666 -0.553636 0.930804
+v -0.352933 -0.520557 -0.0885122
+v -0.339121 -0.546516 0.150832
+v -0.383244 -0.532332 0.0297109
+v -0.132569 -0.534652 -0.0767316
+v -0.24402 -0.524662 -0.124285
+v -0.143499 -0.577512 0.141914
+v -0.241073 -0.571789 0.188527
+v -0.0964942 -0.434904 0.0342915
+v -0.0999951 -0.56397 0.0317575
+v -0.23851 0.520387 -0.0907886
+v -0.282894 0.56989 -0.257283
+v -0.271576 0.561647 -0.416318
+v -0.293195 0.482084 -0.276654
+v -0.32688 0.680784 -0.145207
+v -0.422878 0.632073 -0.168661
+v -0.321376 0.576724 -0.185683
+v -0.213045 0.609177 -0.109989
+v -0.326345 0.612341 -0.141546
+v -0.379053 0.63587 -0.283895
+v -0.312594 0.674309 -0.341185
+v -0.3154 0.606871 -0.278418
+v -0.460287 0.727236 -0.177367
+v -0.417773 0.731266 -0.288914
+v -0.454185 0.684351 -0.230461
+v -0.0922474 0.320203 0.127906
+v -0.0857686 0.301184 0.309872
+v -0.0910385 0.318251 0.216664
+v -0.0750596 0.22755 0.493766
+v -0.0710686 0.213115 0.708099
+v -0.0724773 0.219588 0.599773
+v -0.0670456 0.125156 0.886255
+v -0.0695062 0.18012 0.806775
+v -0.0661753 -0.432919 0.0879789
+v -0.0502685 -0.34467 -0.0665397
+v -0.056363 -0.399912 0.0126968
+v -0.0361117 -0.34665 0.891624
+v -0.0398588 -0.442711 0.714357
+v -0.0321101 -0.404726 0.804765
+v -0.0798727 0.262384 0.399618
+v -0.0874752 0.345901 0.0284499
+v -0.0858416 0.32349 0.0605597
+v -0.124296 0.487548 0.0340513
+v -0.105997 0.402715 0.0360884
+v -0.136392 0.50382 -0.556974
+v -0.214286 0.629383 -0.484754
+v -0.104733 0.755088 -0.444831
+v -0.121817 0.636846 -0.502061
+v -0.139278 0.340909 -0.661549
+v -0.138945 0.411476 -0.620153
+v -0.0622199 -0.197393 0.966789
+v -0.0501277 -0.274014 0.941915
+v -0.0553298 -0.185801 -0.197022
+v -0.050202 -0.275405 -0.142323
+v -0.0766662 0.0457919 -0.217551
+v -0.0655056 -0.0670069 -0.214493
+v -0.0933959 0.130309 -0.285229
+v -0.0838463 0.11062 -0.238883
+v -0.114603 0.104637 -0.445501
+v -0.102596 0.115199 -0.359834
+v -0.153665 0.185489 -0.611191
+v -0.137484 0.129002 -0.534091
+v -0.150371 0.262453 -0.657355
+v -0.0819636 0.707011 -0.0978848
+v -0.128934 0.593549 -0.0276065
+v -0.230899 0.696193 -0.415496
+v -0.2462 0.632058 -0.451998
+v -0.199042 0.926972 -0.236591
+v -0.155831 0.94592 -0.253016
+v -0.182289 0.937359 -0.304334
+v -0.2255 0.918412 -0.287909
+v -0.198381 0.950853 -0.26973
+v -0.204856 0.767832 -0.258627
+v -0.198135 0.804412 -0.300184
+v -0.162591 0.796641 -0.337706
+v -0.195536 0.768708 -0.323732
+v -0.190344 0.780419 -0.305321
+v -0.18859 0.771476 -0.215131
+v -0.12548 0.786281 -0.190531
+v -0.102838 0.815304 -0.21903
+v -0.158595 0.817014 -0.220186
+v -0.14521 0.793415 -0.208712
+v -0.134346 0.834135 -0.324328
+v -0.0911506 0.822276 -0.28966
+v -0.113814 0.810725 -0.339255
+v -0.12321 0.811864 -0.327105
+v -0.0956349 0.846841 -0.248877
+v -0.0699464 0.823757 -0.248502
+v -0.0836698 0.824545 -0.249204
+v -0.246443 0.756953 -0.257453
+v -0.24364 0.757848 -0.207433
+v -0.21289 0.762245 -0.22883
+v -0.249966 0.723503 -0.332259
+v -0.355751 0.762428 -0.238497
+v -0.32003 0.739642 -0.304714
+v -0.181848 0.741315 -0.180043
+v -0.328516 0.741977 -0.181175
+v -0.0499681 0.812176 -0.199395
+v -0.0788354 0.774284 -0.158189
+v -0.168022 0.778755 -0.386065
+v -0.0505691 0.828853 -0.304368
+v -0.0897195 0.811796 -0.373587
+v -0.0404745 0.828353 -0.247459
+v -0.211302 0.744371 -0.365196
+v -0.216553 0.856661 -0.251136
+v -0.193702 0.87269 -0.329392
+v -0.218551 0.860755 -0.297102
+v -0.126841 0.897388 -0.285836
+v -0.157513 0.887986 -0.320358
+v -0.138938 0.890583 -0.222912
+v -0.120161 0.900194 -0.247801
+v -0.181137 0.872948 -0.224154
+v -0.218865 -0.458568 0.419816
+v -0.156751 -0.485651 0.517662
+v -0.106299 -0.467348 0.612794
+v -0.177713 -0.464461 0.527871
+v -0.0640756 -0.481225 0.60015
+v -0.0486808 -0.462533 0.655449
+v -0.0674919 -0.459184 0.218582
+v -0.129769 -0.445891 0.210992
+v -0.0638357 -0.444618 0.15006
+v -0.160168 -0.471758 0.313909
+v -0.192938 -0.45045 0.307624
+v -0.106971 -0.518553 0.416032
+v -0.0913755 -0.507594 0.518132
+v -0.0906862 -0.492986 0.310407
+v -0.152295 -0.0598695 0.957881
+v -0.109744 -0.0412763 0.976341
+v -0.148938 -0.0188178 0.950827
+v -0.133131 -0.0374762 0.965091
+v -0.214922 -0.0472768 0.917853
+v -0.129144 -0.123186 0.961976
+v -0.202946 -0.113321 0.930645
+v -0.124777 0.0448234 0.926767
+v -0.189431 0.0176502 0.907717
+v -0.0541294 -0.000582154 0.980957
+v -0.0627521 0.0592134 0.940397
+v -0.0544734 -0.0758718 0.989219
+v -0.0631528 -0.123094 0.979468
+v -0.0717681 -0.0690106 0.994894
+v -0.052795 -0.0725924 1.01945
+v -0.0716307 -0.0432 1.00541
+v -0.0809297 -0.0558501 0.997263
+v -0.0297219 -0.0653048 1.04059
+v -0.0406958 -0.0395224 1.01839
+v -0.0297587 -0.0908237 1.02001
+v -0.0407812 -0.0765567 1.00299
+v -0.0264068 -0.0851806 1.03686
+v 0.313121 -0.40468 0.424303
+v 0.289638 -0.411984 0.363044
+v 0.317818 -0.392423 0.305485
+v 0.341299 -0.375944 0.366232
+v 0.317288 -0.397295 0.364448
+v 0.23935 -0.43199 0.361842
+v 0.251544 -0.422435 0.302332
+v 0.261632 -0.427221 0.422701
+v 0.262332 -0.422664 0.362137
+v 0.2099 -0.457622 0.202318
+v 0.274846 -0.418754 0.252658
+v 0.194817 -0.437402 0.239358
+v 0.231211 -0.430966 0.245486
+v 0.350284 -0.364453 0.244783
+v 0.312495 -0.42258 0.208415
+v 0.316601 -0.396416 0.250199
+v 0.328162 -0.387696 0.483083
+v 0.291362 -0.413379 0.536815
+v 0.275025 -0.421949 0.482067
+v 0.303426 -0.408222 0.482265
+v 0.26979 -0.455493 0.624003
+v 0.236564 -0.444228 0.584476
+v 0.315111 -0.397679 0.592146
+v 0.280089 -0.425608 0.586435
+v 0.156119 -0.458526 0.595789
+v 0.222989 -0.439421 0.53835
+v 0.164011 -0.483033 0.631406
+v 0.191143 -0.453861 0.591118
+v 0.225399 -0.440193 0.480871
+v 0.247502 -0.431674 0.481825
+v 0.300873 -0.335808 -0.11045
+v 0.23546 -0.269794 -0.12322
+v 0.283535 -0.198083 -0.11242
+v 0.339519 -0.257182 -0.0802831
+v 0.292915 -0.260195 -0.110308
+v 0.119478 -0.295776 -0.113872
+v 0.173106 -0.230478 -0.142628
+v 0.176383 -0.34915 -0.10631
+v 0.174532 -0.282877 -0.121531
+v 0.172014 -0.141551 -0.170002
+v 0.224047 -0.163495 -0.142015
+v 0.123113 -0.206388 -0.170045
+v 0.171621 -0.184999 -0.160037
+v 0.318497 -0.140059 -0.0746981
+v 0.26898 -0.103901 -0.112945
+v 0.274851 -0.146856 -0.114171
+v 0.307808 -0.00318692 -0.0317095
+v 0.269878 -0.0318881 -0.0928838
+v 0.232462 0.00608204 -0.106821
+v 0.273281 0.0420733 -0.0438498
+v 0.272342 0.00331308 -0.0724007
+v 0.222802 -0.0788504 -0.141507
+v 0.308256 -0.0631453 -0.0619102
+v 0.26804 -0.0666021 -0.106124
+v 0.131964 -0.111136 -0.188613
+v 0.182883 -0.0457092 -0.157469
+v 0.176218 -0.0952439 -0.168768
+v 0.199052 0.0623656 -0.105989
+v 0.148915 0.00578811 -0.165362
+v 0.190658 0.0074224 -0.137241
+v 0.30851 -0.340088 0.875405
+v 0.263105 -0.289222 0.900577
+v 0.214141 -0.334681 0.919903
+v 0.26887 -0.396352 0.909752
+v 0.266995 -0.336923 0.904292
+v 0.253262 -0.209214 0.897882
+v 0.215216 -0.244729 0.920604
+v 0.293898 -0.252639 0.867139
+v 0.258195 -0.248301 0.898482
+v 0.126656 -0.240414 0.945024
+v 0.164801 -0.284207 0.93026
+v 0.172003 -0.202637 0.93974
+v 0.169955 -0.242358 0.935335
+v 0.149733 -0.3934 0.920339
+v 0.104904 -0.328487 0.922781
+v 0.15673 -0.332639 0.924128
+v 0.358457 -0.200112 -0.0267162
+v 0.380901 -0.277243 0.032671
+v 0.375987 -0.340814 -0.0337268
+v 0.367895 -0.263853 -0.0306343
+v 0.342627 -0.106257 -0.015664
+v 0.369603 -0.166291 0.0406057
+v 0.349444 -0.148083 -0.0217878
+v 0.385185 -0.214712 0.178025
+v 0.382253 -0.237542 0.105277
+v 0.379025 -0.149836 0.112764
+v 0.38088 -0.190046 0.108528
+v 0.379176 -0.364086 0.102343
+v 0.375596 -0.311145 0.175939
+v 0.381399 -0.294389 0.103665
+v 0.363729 -0.333817 0.306674
+v 0.367965 -0.307651 0.370431
+v 0.351404 -0.354331 0.427814
+v 0.35769 -0.345276 0.36828
+v 0.379525 -0.278609 0.243679
+v 0.369702 -0.323364 0.243084
+v 0.386398 -0.194374 0.247084
+v 0.380079 -0.252636 0.309953
+v 0.384425 -0.235652 0.245143
+v 0.366553 -0.273812 0.432936
+v 0.376188 -0.22098 0.374406
+v 0.373629 -0.265433 0.372527
+v 0.363531 -0.0923241 0.440867
+v 0.375379 -0.132559 0.377674
+v 0.376221 -0.0834201 0.319025
+v 0.359143 -0.0454376 0.384517
+v 0.369754 -0.0887861 0.380331
+v 0.383633 -0.167556 0.313214
+v 0.370145 -0.182621 0.437038
+v 0.377148 -0.176656 0.375911
+v 0.386429 -0.137312 0.183762
+v 0.385994 -0.115397 0.252169
+v 0.387444 -0.154662 0.24911
+v 0.369328 -0.0337485 0.265179
+v 0.380073 -0.0641595 0.195141
+v 0.380479 -0.0754649 0.257209
+v 0.331012 0.0171674 0.0389716
+v 0.357964 -0.0317511 0.0731655
+v 0.338487 -0.0443826 0.00235778
+v 0.336366 -0.0157204 0.0175237
+v 0.37698 -0.0821329 0.123634
+v 0.363023 -0.0117289 0.147082
+v 0.372551 -0.0487428 0.133083
+v 0.363023 -0.0919062 0.0524717
+v 0.378431 -0.114848 0.117331
+v 0.339774 -0.073126 -0.00816016
+v 0.336842 -0.40362 0.835552
+v 0.340911 -0.350598 0.77666
+v 0.324728 -0.29766 0.82767
+v 0.331904 -0.344898 0.831349
+v 0.334716 -0.360298 0.654176
+v 0.340676 -0.308781 0.717453
+v 0.335933 -0.414813 0.716014
+v 0.339263 -0.356428 0.715872
+v 0.338685 -0.226798 0.720092
+v 0.331546 -0.262105 0.774295
+v 0.344609 -0.271269 0.661246
+v 0.339619 -0.266826 0.718796
+v 0.311718 -0.219243 0.822794
+v 0.317327 -0.257308 0.824743
+v 0.282982 -0.0932074 0.86163
+v 0.309753 -0.137557 0.821642
+v 0.32815 -0.0982774 0.774453
+v 0.304296 -0.0566442 0.818942
+v 0.309064 -0.0966912 0.82069
+v 0.328063 -0.182474 0.773912
+v 0.284251 -0.173813 0.864211
+v 0.309915 -0.178867 0.822052
+v 0.348891 -0.186539 0.666645
+v 0.342261 -0.141944 0.72292
+v 0.340465 -0.184932 0.72153
+v 0.335097 -0.0554519 0.724795
+v 0.349859 -0.0980743 0.670701
+v 0.341372 -0.0985454 0.724072
+v 0.350887 -0.0507559 0.502911
+v 0.3576 -0.096426 0.55865
+v 0.364199 -0.140124 0.498352
+v 0.360004 -0.0947951 0.500374
+v 0.356689 -0.14249 0.613383
+v 0.34723 -0.0531297 0.617509
+v 0.354744 -0.0974434 0.615492
+v 0.35206 -0.232064 0.608144
+v 0.360088 -0.187609 0.554338
+v 0.355112 -0.187591 0.610995
+v 0.363053 -0.232129 0.494204
+v 0.364779 -0.186112 0.496433
+v 0.354812 -0.320109 0.488079
+v 0.338808 -0.358381 0.541229
+v 0.344689 -0.357576 0.485189
+v 0.354746 -0.277669 0.548235
+v 0.360332 -0.277544 0.491251
+v 0.344841 -0.317709 0.600763
+v 0.349578 -0.27523 0.604646
+v 0.335024 -0.360119 0.596759
+v 0.253104 0.203435 0.118078
+v 0.22988 0.247545 0.157829
+v 0.250535 0.219935 0.200112
+v 0.270021 0.183796 0.151405
+v 0.251096 0.217673 0.158942
+v 0.173937 0.295795 0.151795
+v 0.201028 0.279793 0.204606
+v 0.208445 0.255016 0.0997486
+v 0.204742 0.274117 0.153286
+v 0.19325 0.264928 0.298974
+v 0.224342 0.248556 0.249012
+v 0.166122 0.2964 0.255682
+v 0.197174 0.276415 0.252836
+v 0.273837 0.172969 0.230557
+v 0.247672 0.201215 0.285394
+v 0.249328 0.213762 0.242242
+v 0.269047 0.115689 0.415447
+v 0.243641 0.166107 0.375362
+v 0.21248 0.178956 0.429644
+v 0.238934 0.137098 0.472953
+v 0.241351 0.149381 0.423
+v 0.218341 0.218477 0.337805
+v 0.272811 0.14513 0.319056
+v 0.245762 0.184358 0.32963
+v 0.157504 0.266542 0.348704
+v 0.185529 0.224498 0.389084
+v 0.189327 0.246276 0.344018
+v 0.178799 0.187723 0.48375
+v 0.149393 0.222637 0.439843
+v 0.181979 0.203634 0.435289
+v 0.235856 0.0765799 0.805621
+v 0.217701 0.11212 0.775527
+v 0.188626 0.112308 0.821548
+v 0.212012 0.0755801 0.841201
+v 0.213324 0.0965225 0.811853
+v 0.226814 0.130295 0.684497
+v 0.195015 0.146226 0.738197
+v 0.249108 0.0961373 0.728207
+v 0.222367 0.12354 0.732548
+v 0.136442 0.179619 0.750387
+v 0.163968 0.1484 0.789706
+v 0.169138 0.174847 0.695193
+v 0.166543 0.164739 0.744396
+v 0.159999 0.0987976 0.865373
+v 0.13257 0.138159 0.837992
+v 0.161693 0.126272 0.830304
+v 0.207042 0.158534 0.530777
+v 0.233686 0.131486 0.579487
+v 0.264402 0.101968 0.520397
+v 0.236433 0.132176 0.525634
+v 0.143538 0.198754 0.539805
+v 0.173735 0.178964 0.589175
+v 0.176115 0.180806 0.535582
+v 0.201599 0.157166 0.637907
+v 0.139858 0.195421 0.647569
+v 0.171473 0.178284 0.642915
+v 0.258575 0.102323 0.628487
+v 0.230534 0.131902 0.632959
+v 0.34742 0.00319315 0.333602
+v 0.320929 0.0389878 0.398635
+v 0.337836 -0.00655256 0.448549
+v 0.342419 -0.00261151 0.390867
+v 0.351215 0.0204208 0.221415
+v 0.327429 0.0566863 0.291075
+v 0.350974 0.0108683 0.277027
+v 0.298744 0.0894187 0.355449
+v 0.30121 0.115034 0.257783
+v 0.300703 0.102007 0.305644
+v 0.293232 0.0711038 0.460405
+v 0.296022 0.0786559 0.407077
+v 0.328122 -0.0112347 0.673608
+v 0.30029 0.0266191 0.725054
+v 0.308985 -0.0158364 0.772679
+v 0.320738 -0.0133772 0.724898
+v 0.267341 0.0573644 0.769829
+v 0.281859 0.0674534 0.67711
+v 0.275748 0.0634772 0.725933
+v 0.24017 0.0457536 0.838346
+v 0.274888 0.0183198 0.812257
+v 0.256292 0.0501447 0.807253
+v 0.268509 -0.018423 0.854481
+v 0.291897 -0.0177918 0.816142
+v 0.181753 0.135303 -0.0848536
+v 0.213035 0.170022 -0.00953204
+v 0.239418 0.102195 -0.0351077
+v 0.20757 0.117335 -0.0616182
+v 0.236523 0.195781 0.0609139
+v 0.190293 0.237055 0.0258931
+v 0.212268 0.218116 0.0448664
+v 0.276588 0.155574 0.0981021
+v 0.265278 0.133372 0.0353559
+v 0.258648 0.173682 0.0768666
+v 0.298292 0.0729821 0.0271766
+v 0.270541 0.0874645 -0.00640695
+v 0.311113 0.0304905 0.622016
+v 0.288873 0.0678482 0.57056
+v 0.286019 0.0682635 0.624906
+v 0.334099 -0.00961306 0.563635
+v 0.332102 -0.0102295 0.619617
+v 0.315185 0.0311388 0.510574
+v 0.335537 -0.00863692 0.506376
+v 0.291063 0.0681481 0.515313
+v 0.33595 0.0447295 0.115485
+v 0.324423 0.0791082 0.189745
+v 0.346277 0.0318576 0.16704
+v 0.305188 0.103086 0.100412
+v 0.320025 0.0585873 0.0683354
+v 0.295396 0.137443 0.169109
+v 0.288286 0.145212 0.129998
+v 0.299573 0.127115 0.211987
+v 0.149034 0.119255 -0.152767
+v 0.167034 0.18223 -0.122798
+v 0.177314 0.204818 -0.0510199
+v 0.168725 0.158519 -0.103729
+v 0.194921 0.221357 -0.173014
+v 0.177164 0.238112 -0.093383
+v 0.160058 0.168229 -0.195838
+v 0.175228 0.201681 -0.146613
+v 0.149721 0.3008 -0.00470029
+v 0.167426 0.259011 -0.0241111
+v 0.17918 0.290497 -0.0609743
+v 0.168557 0.27236 -0.0447148
+v 0.159665 0.285267 0.0452783
+v 0.175009 0.248965 0.00116269
+v 0.233098 0.21084 -0.267301
+v 0.26422 0.274293 -0.228207
+v 0.21819 0.281995 -0.133552
+v 0.227725 0.245742 -0.199837
+v 0.314639 0.343639 -0.292523
+v 0.27622 0.344814 -0.176785
+v 0.306638 0.268641 -0.338166
+v 0.294985 0.306469 -0.259249
+v 0.21453 0.407775 -0.0317089
+v 0.225521 0.344757 -0.0845003
+v 0.267222 0.417414 -0.116444
+v 0.249809 0.379204 -0.0978893
+v 0.173263 0.341693 -0.0238379
+v 0.200073 0.314908 -0.0732157
+v 0.192666 0.159829 -0.308601
+v 0.240251 0.15911 -0.387649
+v 0.279797 0.207117 -0.366981
+v 0.238726 0.181983 -0.331175
+v 0.213461 0.12942 -0.465108
+v 0.265715 0.160342 -0.469212
+v 0.195451 0.128497 -0.407372
+v 0.233311 0.142158 -0.432916
+v 0.305787 0.218563 -0.547441
+v 0.307399 0.208213 -0.46356
+v 0.259412 0.1698 -0.542722
+v 0.290901 0.186126 -0.509143
+v 0.330786 0.26947 -0.445487
+v 0.31171 0.235804 -0.406418
+v 0.32675 0.280027 -0.535933
+v 0.321008 0.352655 -0.511417
+v 0.329541 0.345256 -0.410956
+v 0.333345 0.309542 -0.480189
+v 0.272516 0.239582 -0.609537
+v 0.296456 0.295786 -0.60247
+v 0.307295 0.256707 -0.578835
+v 0.240548 0.35766 -0.62796
+v 0.289278 0.358786 -0.584918
+v 0.246645 0.312827 -0.640337
+v 0.274305 0.331026 -0.617491
+v 0.295245 0.436762 -0.484187
+v 0.255976 0.429549 -0.563622
+v 0.295399 0.395443 -0.540067
+v 0.335047 -0.66747 -0.026194
+v 0.288313 -0.71953 0.0292002
+v 0.233667 -0.718383 -0.0394682
+v 0.288912 -0.666279 -0.0831981
+v 0.288523 -0.704927 -0.0329901
+v 0.276152 -0.679477 0.14116
+v 0.227442 -0.724741 0.0933852
+v 0.327044 -0.674066 0.0872825
+v 0.281161 -0.710704 0.0910758
+v 0.136631 -0.686121 0.0787634
+v 0.172796 -0.723594 0.0247169
+v 0.182766 -0.687312 0.135767
+v 0.176191 -0.716151 0.0857864
+v 0.187443 -0.669698 -0.0818819
+v 0.136552 -0.67511 -0.0280042
+v 0.179633 -0.708233 -0.0350267
+v 0.2461 -0.678205 0.889508
+v 0.198896 -0.723335 0.85193
+v 0.248642 -0.724519 0.790895
+v 0.288217 -0.679435 0.838379
+v 0.246831 -0.711912 0.845432
+v 0.113033 -0.684171 0.844965
+v 0.148661 -0.727347 0.795626
+v 0.15574 -0.680004 0.892121
+v 0.151829 -0.713932 0.849146
+v 0.157976 -0.694713 0.696986
+v 0.198408 -0.728532 0.734591
+v 0.115859 -0.693483 0.748115
+v 0.153047 -0.720555 0.741401
+v 0.283587 -0.684515 0.738785
+v 0.24088 -0.688682 0.691628
+v 0.244434 -0.716483 0.736358
+v 0.342489 -0.483419 0.78141
+v 0.32282 -0.565952 0.726198
+v 0.30391 -0.498066 0.668956
+v 0.331237 -0.488983 0.719836
+v 0.31354 -0.474577 0.887895
+v 0.331331 -0.556659 0.842236
+v 0.337524 -0.478425 0.840051
+v 0.319732 -0.633454 0.786428
+v 0.294197 -0.618974 0.884469
+v 0.315639 -0.625673 0.839496
+v 0.28283 -0.632563 0.691009
+v 0.308326 -0.632734 0.733956
+v 0.0634711 -0.431765 0.851899
+v 0.0654567 -0.515878 0.79543
+v 0.0774719 -0.570173 0.85133
+v 0.0969194 -0.478778 0.897495
+v 0.0707234 -0.496321 0.852392
+v 0.106684 -0.534636 0.682874
+v 0.0832655 -0.591144 0.740087
+v 0.069956 -0.47422 0.735292
+v 0.0773661 -0.528459 0.735056
+v 0.0840444 -0.64607 0.797118
+v 0.119063 -0.647679 0.702012
+v 0.0943475 -0.648249 0.746058
+v 0.112805 -0.624088 0.888986
+v 0.0899111 -0.634521 0.847274
+v 0.252371 -0.581833 0.652832
+v 0.208287 -0.52835 0.637223
+v 0.260651 -0.513618 0.642398
+v 0.20034 -0.651356 0.660475
+v 0.244653 -0.641994 0.666514
+v 0.15335 -0.595644 0.66033
+v 0.156436 -0.650983 0.672896
+v 0.153645 -0.534978 0.650489
+v 0.149543 -0.554429 0.922812
+v 0.209392 -0.47177 0.928314
+v 0.147798 -0.472236 0.922294
+v 0.203436 -0.628169 0.923071
+v 0.153584 -0.625261 0.914716
+v 0.262868 -0.552296 0.920108
+v 0.253551 -0.622939 0.912658
+v 0.267734 -0.472455 0.917082
+v 0.348561 -0.464054 0.158716
+v 0.369537 -0.538419 0.0961211
+v 0.387436 -0.440321 0.0296682
+v 0.376444 -0.45013 0.0999599
+v 0.368236 -0.614976 0.0306267
+v 0.325558 -0.615423 0.139592
+v 0.354786 -0.614851 0.0904298
+v 0.340742 -0.597899 -0.0798229
+v 0.378455 -0.525308 -0.0356201
+v 0.364087 -0.605614 -0.0296813
+v 0.354445 -0.427851 -0.0878125
+v 0.380964 -0.432504 -0.0361787
+v 0.243518 -0.430642 -0.121062
+v 0.182701 -0.527846 -0.109937
+v 0.12704 -0.448099 -0.0735785
+v 0.179139 -0.436248 -0.106528
+v 0.304443 -0.521287 -0.117606
+v 0.305291 -0.427537 -0.115465
+v 0.241923 -0.609859 -0.117359
+v 0.296601 -0.602967 -0.109127
+v 0.141316 -0.607217 -0.0730372
+v 0.186967 -0.607615 -0.104287
+v 0.148115 -0.512563 0.149929
+v 0.188536 -0.577394 0.174934
+v 0.25113 -0.502745 0.197094
+v 0.196177 -0.511769 0.183546
+v 0.233064 -0.638226 0.175543
+v 0.145237 -0.635178 0.13052
+v 0.185474 -0.638484 0.162245
+v 0.293784 -0.559568 0.181063
+v 0.282371 -0.626877 0.167977
+v 0.304357 -0.484216 0.189956
+v 0.105712 -0.548895 -0.0268432
+v 0.0997946 -0.492468 0.0352712
+v 0.0953701 -0.405611 -0.0214782
+v 0.102107 -0.469936 -0.0239275
+v 0.106751 -0.633108 0.0275567
+v 0.114552 -0.619641 -0.0267392
+v 0.113288 -0.573273 0.0910993
+v 0.117988 -0.635837 0.0828
+v 0.112515 -0.451919 0.0935688
+v 0.115559 -0.506404 0.0968639
+v 0.271184 0.525759 -0.460079
+v 0.285353 0.520853 -0.361551
+v 0.308658 0.43336 -0.368305
+v 0.291391 0.478507 -0.418527
+v 0.278055 0.600765 -0.320108
+v 0.282577 0.535883 -0.259681
+v 0.267 0.601931 -0.388185
+v 0.278645 0.563976 -0.33172
+v 0.247008 0.54474 -0.114114
+v 0.271214 0.498067 -0.167977
+v 0.277875 0.55689 -0.190965
+v 0.271665 0.531425 -0.180446
+v 0.298509 0.427522 -0.236282
+v 0.235335 0.485531 -0.0647615
+v 0.272045 0.45855 -0.143225
+v 0.317802 0.38717 -0.32759
+v 0.192626 0.647576 -0.125774
+v 0.267679 0.610284 -0.129549
+v 0.326452 0.645309 -0.138504
+v 0.252363 0.682167 -0.15033
+v 0.258116 0.645451 -0.13607
+v 0.289471 0.569074 -0.162469
+v 0.324346 0.587714 -0.15672
+v 0.239082 0.578957 -0.111996
+v 0.279361 0.583208 -0.137022
+v 0.401583 0.612716 -0.19176
+v 0.380321 0.61857 -0.152413
+v 0.35448 0.589213 -0.202564
+v 0.367842 0.597467 -0.17194
+v 0.388682 0.680406 -0.144893
+v 0.430349 0.658277 -0.154382
+v 0.384792 0.647908 -0.143704
+v 0.294439 0.591849 -0.266039
+v 0.318359 0.584666 -0.230094
+v 0.294318 0.572729 -0.209871
+v 0.28588 0.662049 -0.349089
+v 0.312606 0.638668 -0.319124
+v 0.290372 0.624108 -0.317788
+v 0.375881 0.662505 -0.310821
+v 0.343967 0.620068 -0.286887
+v 0.345079 0.681327 -0.333156
+v 0.34177 0.649107 -0.318477
+v 0.380591 0.614582 -0.245535
+v 0.347363 0.598425 -0.244565
+v 0.393743 0.706765 -0.3123
+v 0.444197 0.711394 -0.263256
+v 0.419572 0.658706 -0.261892
+v 0.412958 0.685943 -0.291128
+v 0.452589 0.744444 -0.206816
+v 0.471552 0.708579 -0.199184
+v 0.43241 0.746455 -0.258303
+v 0.460792 0.730831 -0.231063
+v 0.447289 0.655956 -0.196803
+v 0.447397 0.702392 -0.15815
+v 0.460337 0.681448 -0.17325
+v 0.417303 0.633713 -0.225256
+v 0 0.324094 0.180165
+v 0.0462436 0.320164 0.221337
+v 0.0925539 0.320139 0.170699
+v 0.0467382 0.325284 0.136739
+v 0.0468991 0.323707 0.17766
+v 0.0434892 0.30185 0.311934
+v 0.0886491 0.312396 0.263435
+v 0 0.312812 0.267846
+v 0.0450231 0.313323 0.266515
+v 0.132936 0.312061 0.210485
+v 0.125628 0.296564 0.306829
+v 0.129481 0.307575 0.259519
+v 0.135299 0.30832 0.112598
+v 0.13572 0.310986 0.159838
+v 0 0.228164 0.547798
+v 0.0364517 0.2252 0.601479
+v 0.0735327 0.221293 0.545706
+v 0.0377827 0.232065 0.495188
+v 0.0369868 0.226526 0.54726
+v 0.0357781 0.218679 0.710249
+v 0.0717153 0.218255 0.65442
+v 0 0.22586 0.657001
+v 0.0360809 0.22396 0.656319
+v 0.107651 0.210086 0.597079
+v 0.105384 0.203917 0.704755
+v 0.106456 0.208743 0.651454
+v 0.111325 0.219111 0.491389
+v 0.109197 0.212143 0.543201
+v 0 0.20702 0.762635
+v 0.035033 0.18494 0.809623
+v 0.0703592 0.199987 0.759265
+v 0.0354469 0.205232 0.761742
+v 0.0337403 0.129119 0.889852
+v 0.0684286 0.154761 0.849486
+v 0 0.160642 0.853891
+v 0.0344822 0.159128 0.852718
+v 0.10286 0.172311 0.802347
+v 0.0994805 0.118705 0.880737
+v 0.101303 0.147713 0.844481
+v 0.104202 0.191409 0.755412
+v 0 -0.422236 0.0297518
+v 0.0290027 -0.401336 0.00260959
+v 0.0619855 -0.419454 0.0516405
+v 0.032544 -0.433336 0.0684456
+v 0.0312142 -0.420036 0.03584
+v 0.024587 -0.350255 -0.0669927
+v 0.0524989 -0.374486 -0.0268658
+v 0 -0.38147 -0.0341137
+v 0.0265726 -0.377866 -0.0315624
+v 0.0804386 -0.407577 0.025012
+v 0.0781392 -0.343402 -0.0680291
+v 0.0771327 -0.378765 -0.022981
+v 0.101981 -0.43742 0.118873
+v 0.0918712 -0.426509 0.0746889
+v 0 -0.369655 0.853165
+v 0.0162142 -0.396858 0.808469
+v 0.032518 -0.377955 0.851217
+v 0.0173499 -0.340897 0.891348
+v 0.0161103 -0.370733 0.852319
+v 0.0190571 -0.436735 0.723869
+v 0.0352263 -0.426311 0.757202
+v 0 -0.417485 0.766409
+v 0.0173127 -0.418962 0.764189
+v 0.0473693 -0.423079 0.799815
+v 0.0641499 -0.454693 0.696538
+v 0.0543417 -0.44245 0.745122
+v 0.0576974 -0.360469 0.893822
+v 0.0495204 -0.395304 0.851292
+v 0 0.284012 0.356829
+v 0.0403407 0.264584 0.4009
+v 0.08278 0.283223 0.354838
+v 0.0418935 0.284411 0.356338
+v 0.0772361 0.242537 0.445498
+v 0 0.24666 0.447191
+v 0.0389355 0.245943 0.446792
+v 0.117787 0.25622 0.397308
+v 0.114267 0.235186 0.443214
+v 0.121652 0.278062 0.352302
+v 0 0.337744 0.0495788
+v 0.0441574 0.329119 0.0693112
+v 0.084777 0.331134 0.0392602
+v 0.0448664 0.351036 0.0368917
+v 0.0436281 0.336529 0.047541
+v 0.0891709 0.320583 0.0906503
+v 0 0.327327 0.102505
+v 0.0455092 0.326229 0.0998335
+v 0.122579 0.311429 0.0436831
+v 0.129137 0.308034 0.0737864
+v 0.125569 0.33518 0.0110101
+v 0.120968 0.319807 0.0225689
+v 0 0.450581 0.0596803
+v 0.0541098 0.40816 0.0483714
+v 0.116503 0.442256 0.0399308
+v 0.0632284 0.496693 0.0522852
+v 0.0593708 0.448955 0.0551193
+v 0.0954346 0.370179 0.029827
+v 0 0.37634 0.0420973
+v 0.0488177 0.375217 0.039732
+v 0.153438 0.391277 0.0121446
+v 0.13765 0.35956 0.0095734
+v 0.18104 0.470039 0.00485434
+v 0.169159 0.42866 0.0126096
+v 0 0.570891 -0.534672
+v 0.0626485 0.640189 -0.506046
+v 0.130136 0.568004 -0.528764
+v 0.0692761 0.505582 -0.562042
+v 0.0664738 0.570216 -0.533525
+v 0.0536794 0.762368 -0.447664
+v 0.113201 0.701726 -0.474505
+v 0 0.709164 -0.479338
+v 0.0582379 0.707055 -0.477971
+v 0.199905 0.685497 -0.462496
+v 0.174025 0.632377 -0.495223
+v 0.150533 0.744043 -0.44196
+v 0.161614 0.694175 -0.469576
+v 0.199187 0.49892 -0.545668
+v 0.232047 0.565216 -0.501918
+v 0.188173 0.564066 -0.51907
+v 0 0.376475 -0.648788
+v 0.0711811 0.413275 -0.623292
+v 0.138282 0.375681 -0.645424
+v 0.0726958 0.340957 -0.664799
+v 0.0716465 0.376815 -0.648416
+v 0.138821 0.452917 -0.589054
+v 0 0.45452 -0.593995
+v 0.0706181 0.454733 -0.593233
+v 0.199873 0.404748 -0.612935
+v 0.202195 0.446614 -0.579567
+v 0.193634 0.339499 -0.652866
+v 0.194895 0.370921 -0.637938
+v 0 -0.233549 0.959877
+v 0.0240468 -0.271835 0.943264
+v 0.057123 -0.235922 0.956621
+v 0.0306399 -0.195728 0.970285
+v 0.0277858 -0.234252 0.959139
+v 0.0425527 -0.311463 0.921053
+v 0 -0.306916 0.921405
+v 0.0202817 -0.307657 0.921165
+v 0.0802769 -0.277764 0.939515
+v 0.0688023 -0.319918 0.921674
+v 0.0956803 -0.199455 0.960676
+v 0.0895627 -0.238122 0.952018
+v 0 -0.322733 -0.103397
+v 0.0238367 -0.283103 -0.139843
+v 0.049547 -0.311956 -0.105817
+v 0.023709 -0.319134 -0.103998
+v 0.0267024 -0.190718 -0.197111
+v 0.0521008 -0.23408 -0.173683
+v 0 -0.242929 -0.170415
+v 0.0248684 -0.240765 -0.171792
+v 0.0816243 -0.264189 -0.144485
+v 0.0878074 -0.176513 -0.193941
+v 0.0840611 -0.222685 -0.173847
+v 0.0796433 -0.304819 -0.108871
+v 0 -0.132321 -0.213832
+v 0.0321871 -0.0691596 -0.222731
+v 0.0599751 -0.128388 -0.209467
+v 0.0292367 -0.131565 -0.213064
+v 0.0381981 0.0419961 -0.235526
+v 0.0713902 -0.00682142 -0.215572
+v 0 -0.00964083 -0.233734
+v 0.035269 -0.00935459 -0.229192
+v 0.101087 -0.0620719 -0.200364
+v 0.115674 0.055105 -0.187883
+v 0.109216 -0.00036684 -0.192884
+v 0.0937169 -0.121882 -0.201748
+v 0 0.0779149 -0.251636
+v 0.0428926 0.103386 -0.257977
+v 0.0803711 0.0844567 -0.225271
+v 0.0406901 0.0790388 -0.244814
+v 0.0470212 0.120443 -0.299626
+v 0.0884331 0.12573 -0.258533
+v 0 0.114136 -0.282485
+v 0.0449537 0.11665 -0.27594
+v 0.120922 0.124855 -0.209363
+v 0.138478 0.148595 -0.265069
+v 0.128964 0.142918 -0.232488
+v 0.118034 0.096215 -0.193935
+v 0 0.113801 -0.334664
+v 0.0518947 0.108853 -0.365753
+v 0.097999 0.124878 -0.319976
+v 0.0492429 0.116377 -0.329956
+v 0.0595919 0.101024 -0.449026
+v 0.107541 0.107031 -0.401861
+v 0 0.101009 -0.408276
+v 0.0552525 0.102268 -0.405836
+v 0.150911 0.126322 -0.356271
+v 0.16045 0.112915 -0.441079
+v 0.153901 0.116281 -0.399697
+v 0.145782 0.140078 -0.308863
+v 0 0.109177 -0.495145
+v 0.0709982 0.126709 -0.538642
+v 0.125551 0.112279 -0.490196
+v 0.065189 0.10952 -0.494143
+v 0.0790718 0.182988 -0.615599
+v 0.1475 0.153854 -0.575332
+v 0 0.151226 -0.580589
+v 0.0759743 0.151547 -0.579976
+v 0.194944 0.134806 -0.523346
+v 0.219301 0.191387 -0.600189
+v 0.210129 0.15949 -0.56385
+v 0.176258 0.119186 -0.482359
+v 0 0.219576 -0.64342
+v 0.0775425 0.260206 -0.661002
+v 0.154044 0.222564 -0.638941
+v 0.0792455 0.219987 -0.642965
+v 0.144378 0.302534 -0.665208
+v 0 0.300969 -0.668909
+v 0.0750102 0.301309 -0.668637
+v 0.213772 0.26767 -0.647803
+v 0.202463 0.304851 -0.656009
+v 0.21995 0.228648 -0.628685
+v 0.126715 0.537337 0.0111468
+v 0.0606884 0.60785 0.00104436
+v 0 0.554408 0.0408567
+v 0.0643107 0.550463 0.0321785
+v 0.191637 0.550233 -0.0477524
+v 0.185308 0.513743 -0.0172354
+v 0.171165 0.605799 -0.0764289
+v 0.209701 0.569968 -0.0807895
+v 0.126096 0.69557 -0.127071
+v 0.0987466 0.656786 -0.062447
+v 0.142824 0.650926 -0.0969906
+v 0 0.672219 -0.0228265
+v 0.0403517 0.7183 -0.0719016
+v 0.0504326 0.666442 -0.034872
+v 0.237614 0.630468 -0.469873
+v 0.258514 0.602197 -0.445199
+v 0.253217 0.578269 -0.474783
+v 0.212896 0.714905 -0.428435
+v 0.240943 0.662224 -0.43894
+v 0.224501 0.676789 -0.452578
+v 0.246529 0.67248 -0.397952
+v 0.254771 0.635976 -0.4207
+v 0.226436 0.912723 -0.26973
+v 0.21544 0.938681 -0.279423
+v 0.201014 0.943338 -0.25145
+v 0.214224 0.916672 -0.246046
+v 0.216965 0.931825 -0.261167
+v 0.196917 0.927676 -0.311546
+v 0.191876 0.948989 -0.288378
+v 0.216862 0.918936 -0.303965
+v 0.207634 0.937732 -0.29671
+v 0.1537 0.943252 -0.269054
+v 0.17745 0.953646 -0.260405
+v 0.165912 0.939303 -0.292739
+v 0.173109 0.951035 -0.277835
+v 0.179801 0.930945 -0.23207
+v 0.159856 0.939686 -0.239651
+v 0.180783 0.946411 -0.244635
+v 0.207814 0.759869 -0.30299
+v 0.191438 0.774734 -0.312022
+v 0.199084 0.77337 -0.281405
+v 0.209996 0.761876 -0.268358
+v 0.201734 0.767127 -0.28955
+v 0.161743 0.793141 -0.342758
+v 0.178221 0.788374 -0.325575
+v 0.180956 0.7796 -0.34144
+v 0.178357 0.783728 -0.331262
+v 0.18742 0.811743 -0.321532
+v 0.192387 0.789092 -0.301842
+v 0.165819 0.804418 -0.335288
+v 0.181068 0.796668 -0.322844
+v 0.202175 0.775482 -0.251997
+v 0.203233 0.797472 -0.273932
+v 0.199221 0.78193 -0.2764
+v 0.181336 0.804317 -0.228606
+v 0.151415 0.801648 -0.215482
+v 0.168503 0.781447 -0.210541
+v 0.192516 0.777938 -0.227759
+v 0.174268 0.789156 -0.220565
+v 0.108345 0.822919 -0.221075
+v 0.12229 0.805309 -0.211386
+v 0.135327 0.828572 -0.217894
+v 0.128262 0.813355 -0.215464
+v 0.103071 0.800257 -0.198013
+v 0.137434 0.789176 -0.20042
+v 0.0963023 0.812563 -0.21585
+v 0.114773 0.801816 -0.205592
+v 0.186175 0.766882 -0.204739
+v 0.153638 0.772539 -0.191376
+v 0.162292 0.776737 -0.200861
+v 0.13845 0.801677 -0.350685
+v 0.119159 0.809804 -0.331351
+v 0.143329 0.804627 -0.337252
+v 0.140844 0.802002 -0.341998
+v 0.086003 0.820458 -0.291304
+v 0.10501 0.817882 -0.310157
+v 0.0934554 0.817549 -0.318853
+v 0.100185 0.816042 -0.313185
+v 0.116614 0.840248 -0.308051
+v 0.127696 0.819206 -0.325202
+v 0.0961567 0.829666 -0.288835
+v 0.109848 0.825231 -0.308777
+v 0.153713 0.826966 -0.334024
+v 0.147191 0.8121 -0.335056
+v 0.0710179 0.824335 -0.270682
+v 0.0780919 0.822576 -0.248928
+v 0.084054 0.824638 -0.268864
+v 0.0786961 0.822795 -0.269513
+v 0.0899479 0.821575 -0.232115
+v 0.0751675 0.819879 -0.228529
+v 0.084018 0.819324 -0.230668
+v 0.102029 0.843892 -0.232989
+v 0.0888081 0.831892 -0.249223
+v 0.095203 0.828981 -0.232883
+v 0.0959818 0.847028 -0.267898
+v 0.0891348 0.832003 -0.268429
+v 0.208076 0.764415 -0.241789
+v 0.226075 0.759994 -0.240858
+v 0.231902 0.754223 -0.26947
+v 0.216972 0.759952 -0.252956
+v 0.20189 0.765574 -0.22074
+v 0.201854 0.770051 -0.234112
+v 0.215759 0.757673 -0.200462
+v 0.223444 0.760451 -0.217689
+v 0.205912 0.761992 -0.209879
+v 0.264144 0.760816 -0.243923
+v 0.265103 0.760852 -0.220189
+v 0.240714 0.760641 -0.229576
+v 0.278995 0.751255 -0.279238
+v 0.277858 0.727498 -0.319523
+v 0.234872 0.742799 -0.306286
+v 0.256224 0.744786 -0.293184
+v 0.311167 0.763685 -0.236173
+v 0.343965 0.756617 -0.269721
+v 0.300161 0.759231 -0.259113
+v 0.37536 0.738148 -0.30116
+v 0.402685 0.758643 -0.236201
+v 0.390758 0.753705 -0.27032
+v 0.317986 0.708047 -0.333577
+v 0.358205 0.712515 -0.324745
+v 0.258236 0.693552 -0.356883
+v 0.284726 0.698216 -0.343913
+v 0.183529 0.754714 -0.188935
+v 0.248651 0.737773 -0.18066
+v 0.277376 0.753251 -0.197126
+v 0.23327 0.750984 -0.191422
+v 0.329845 0.712932 -0.159268
+v 0.178779 0.718135 -0.16643
+v 0.252112 0.713887 -0.166078
+v 0.409003 0.736167 -0.174314
+v 0.39942 0.710994 -0.155057
+v 0.352617 0.756796 -0.208565
+v 0.409429 0.752424 -0.202781
+v 0.303456 0.759588 -0.213776
+v 0.0705006 0.811753 -0.205807
+v 0.0602661 0.797425 -0.177797
+v 0.106741 0.782082 -0.178969
+v 0.0845451 0.798015 -0.188581
+v 0 0.80352 -0.16013
+v 0.0359638 0.781745 -0.13792
+v 0.0258744 0.814456 -0.19275
+v 0.0313039 0.800824 -0.16616
+v 0.0771688 0.744134 -0.128582
+v 0 0.760454 -0.0953915
+v 0.0372542 0.755486 -0.106376
+v 0.128352 0.75368 -0.16956
+v 0.122404 0.729951 -0.151266
+v 0.140544 0.766364 -0.181936
+v 0.0269807 0.832763 -0.306956
+v 0.0647564 0.824008 -0.337021
+v 0.0438017 0.818098 -0.377412
+v 0 0.831312 -0.342444
+v 0.0352238 0.828719 -0.341387
+v 0.105446 0.812324 -0.352131
+v 0.0673729 0.8251 -0.299446
+v 0.0829065 0.820245 -0.328154
+v 0.163948 0.787613 -0.366372
+v 0.134148 0.794627 -0.38492
+v 0.134862 0.801108 -0.364702
+v 0.0968568 0.789376 -0.411772
+v 0.176513 0.760515 -0.410655
+v 0.140375 0.776566 -0.412207
+v 0 0.800708 -0.414889
+v 0.0494105 0.797681 -0.413491
+v 0.0431316 0.822381 -0.222498
+v 0.0209585 0.83068 -0.246717
+v 0 0.825505 -0.217769
+v 0.0222752 0.824379 -0.219341
+v 0.0571055 0.825864 -0.248034
+v 0.0611506 0.820889 -0.225684
+v 0.0427143 0.830406 -0.274631
+v 0.0589761 0.827121 -0.27268
+v 0 0.834904 -0.275945
+v 0.0223763 0.833445 -0.275661
+v 0.192333 0.761236 -0.378584
+v 0.220893 0.723503 -0.390265
+v 0.20182 0.742487 -0.402976
+v 0.202503 0.759013 -0.342238
+v 0.185497 0.772855 -0.357643
+v 0.229147 0.73046 -0.348619
+v 0.217964 0.748268 -0.323878
+v 0.238328 0.704586 -0.373617
+v 0.182638 0.844163 -0.332751
+v 0.20855 0.866089 -0.317157
+v 0.208154 0.830042 -0.298948
+v 0.197796 0.836731 -0.320106
+v 0.225435 0.891324 -0.293612
+v 0.202088 0.898609 -0.32219
+v 0.216036 0.894273 -0.311279
+v 0.221713 0.885753 -0.253374
+v 0.22169 0.856724 -0.273536
+v 0.227222 0.886884 -0.273083
+v 0.207473 0.823979 -0.249259
+v 0.212209 0.824662 -0.273398
+v 0.113654 0.869597 -0.287244
+v 0.140188 0.893495 -0.304854
+v 0.144889 0.85895 -0.323167
+v 0.127225 0.865094 -0.306986
+v 0.170407 0.915574 -0.314537
+v 0.141278 0.921251 -0.283964
+v 0.154011 0.919177 -0.300925
+v 0.17602 0.88052 -0.329499
+v 0.186655 0.907214 -0.322451
+v 0.164181 0.851772 -0.332764
+v 0.125921 0.862724 -0.222037
+v 0.126235 0.896792 -0.232757
+v 0.106278 0.87162 -0.248059
+v 0.112672 0.868658 -0.232446
+v 0.136706 0.926532 -0.249134
+v 0.152825 0.914652 -0.226568
+v 0.141818 0.921807 -0.235425
+v 0.120264 0.900007 -0.266157
+v 0.136018 0.925025 -0.266007
+v 0.106638 0.871852 -0.266963
+v 0.201725 0.863227 -0.234578
+v 0.169297 0.842653 -0.222278
+v 0.191458 0.831381 -0.232334
+v 0.191735 0.902282 -0.228209
+v 0.209229 0.892731 -0.238166
+v 0.158724 0.882351 -0.220155
+v 0.171199 0.908866 -0.224253
+v 0.146124 0.853579 -0.218747
+v 0.183641 -0.455489 0.534379
+v 0.20511 -0.461837 0.475502
+v 0.228365 -0.446181 0.420977
+v 0.21326 -0.450303 0.478539
+v 0.102981 -0.465723 0.632974
+v 0.142559 -0.466669 0.57469
+v 0.143787 -0.462243 0.587914
+v 0.132033 -0.481167 0.556781
+v 0.17086 -0.474631 0.521789
+v 0.109552 -0.472057 0.595485
+v 0.140849 -0.473458 0.563316
+v 0.206663 -0.472505 0.418626
+v 0.176881 -0.48852 0.469991
+v 0.194975 -0.474631 0.472428
+v 0.0439995 -0.453929 0.682063
+v 0.0755892 -0.465387 0.639018
+v 0.0705061 -0.461083 0.663389
+v 0 -0.448613 0.69461
+v 0.0238268 -0.46053 0.664171
+v 0.0210984 -0.44987 0.691901
+v 0.0329053 -0.483094 0.604968
+v 0.0549351 -0.471091 0.629642
+v 0 -0.470799 0.638539
+v 0.0276324 -0.470883 0.636565
+v 0.0917758 -0.479034 0.591505
+v 0.0815781 -0.471375 0.616771
+v 0.065741 -0.440111 0.119726
+v 0.0317646 -0.445352 0.134276
+v 0 -0.441491 0.0941719
+v 0.0323291 -0.440606 0.100743
+v 0.138015 -0.443895 0.192281
+v 0.0965198 -0.444834 0.175662
+v 0.101319 -0.442167 0.150389
+v 0.0981278 -0.457279 0.230677
+v 0.0636123 -0.450028 0.182161
+v 0.124102 -0.450719 0.228684
+v 0.0942201 -0.449501 0.201118
+v 0 -0.451738 0.166652
+v 0.034368 -0.461296 0.211229
+v 0.0320458 -0.45108 0.17059
+v 0.205661 -0.440923 0.30331
+v 0.163536 -0.447685 0.255959
+v 0.174785 -0.442414 0.24495
+v 0.213095 -0.454425 0.36305
+v 0.224677 -0.442616 0.362277
+v 0.178344 -0.481123 0.364052
+v 0.179537 -0.460826 0.31166
+v 0.199389 -0.4673 0.363772
+v 0.138385 -0.462583 0.271465
+v 0.153491 -0.454897 0.266081
+v 0 -0.501354 0.566258
+v 0.0472884 -0.515301 0.519274
+v 0.0774159 -0.49456 0.56248
+v 0.0400356 -0.499329 0.565268
+v 0.0552219 -0.529321 0.415644
+v 0.102374 -0.516827 0.468605
+v 0 -0.53078 0.469003
+v 0.0529288 -0.526727 0.468796
+v 0.12906 -0.497175 0.517019
+v 0.151776 -0.503865 0.416665
+v 0.144853 -0.503338 0.468859
+v 0.109485 -0.488354 0.558077
+v 0 -0.522771 0.361177
+v 0.046641 -0.500432 0.308431
+v 0.101727 -0.509066 0.362546
+v 0.0524328 -0.518796 0.361628
+v 0.0778956 -0.474931 0.261875
+v 0 -0.481524 0.256229
+v 0.0399262 -0.479506 0.257737
+v 0.12954 -0.482953 0.312862
+v 0.111951 -0.469275 0.268257
+v 0.144743 -0.495777 0.363499
+v 0.16267 -0.0297463 0.947597
+v 0.137897 -0.0298265 0.961061
+v 0.139042 -0.0451633 0.963659
+v 0.164631 -0.0504489 0.951176
+v 0.150007 -0.038161 0.957034
+v 0.109331 -0.0304408 0.974293
+v 0.122462 -0.037914 0.969963
+v 0.130968 -0.0135459 0.956851
+v 0.124172 -0.0273895 0.966259
+v 0.133714 -0.0642499 0.965416
+v 0.109851 -0.0516794 0.976283
+v 0.125245 -0.0487781 0.969619
+v 0.172782 -0.0812671 0.947651
+v 0.213918 -0.078022 0.922841
+v 0.18873 -0.0434748 0.935764
+v 0.185903 -0.064746 0.940486
+v 0.119758 -0.0911109 0.968067
+v 0.163882 -0.120989 0.949271
+v 0.149065 -0.0885975 0.957545
+v 0.209867 -0.161573 0.925244
+v 0.133001 -0.161124 0.956956
+v 0.170754 -0.160591 0.943861
+v 0.24743 -0.125194 0.898745
+v 0.2493 -0.167014 0.898651
+v 0.247517 -0.0516572 0.891815
+v 0.248772 -0.0872467 0.896118
+v 0.206641 -0.0168625 0.913183
+v 0.165953 -0.00373407 0.934179
+v 0.181563 -0.0218941 0.9332
+v 0.225597 0.0165132 0.878354
+v 0.237857 -0.0169093 0.886885
+v 0.188508 0.055577 0.881861
+v 0.216542 0.0481267 0.863247
+v 0.128713 0.0776757 0.9026
+v 0.155037 0.0342159 0.918365
+v 0.159167 0.0664178 0.894094
+v 0.116698 0.0146029 0.948102
+v 0.143447 0.00727528 0.940085
+v 0.0272441 0.00174637 0.985368
+v 0.0591053 0.0273947 0.96148
+v 0.0314129 0.062618 0.944549
+v 0 0.031249 0.967483
+v 0.0296124 0.0304081 0.965777
+v 0.0939443 0.0532189 0.93429
+v 0.0802969 -0.0053962 0.974961
+v 0.0883591 0.0217178 0.955411
+v 0.0652762 0.0925539 0.91594
+v 0.0973395 0.0863776 0.910036
+v 0 0.0974522 0.921309
+v 0.032753 0.0962353 0.919852
+v 0.0640997 -0.158632 0.974036
+v 0.0313918 -0.121887 0.983992
+v 0 -0.156323 0.979553
+v 0.0317497 -0.157085 0.978196
+v 0.0956523 -0.123707 0.972085
+v 0.0976501 -0.160225 0.966985
+v 0.0810019 -0.0733797 0.982855
+v 0.05977 -0.0942344 0.98419
+v 0.0898671 -0.0931811 0.977114
+v 0 -0.0930316 0.990478
+v 0.0273784 -0.0760818 0.993721
+v 0.0298425 -0.0936976 0.988793
+v 0.0955999 -0.0476973 0.985969
+v 0.0789269 -0.06284 0.994959
+v 0.0864529 -0.0640166 0.98579
+v 0.0940951 -0.0565412 0.98502
+v 0.0861068 -0.0306433 0.989153
+v 0.0788425 -0.0488271 1.00064
+v 0.0938769 -0.0385712 0.986828
+v 0.0525009 -0.0662129 1.0231
+v 0.0666347 -0.0644079 1.00864
+v 0.0598411 -0.0536198 1.01768
+v 0.0652608 -0.0582549 1.01251
+v 0.0598989 -0.0753391 1.00401
+v 0.0525276 -0.0775374 1.01483
+v 0.0652946 -0.0700523 1.00523
+v 0.0476184 -0.0223971 1.0006
+v 0.0582598 -0.0403976 1.0119
+v 0.0694706 -0.025394 0.994502
+v 0 -0.0210975 1.00705
+v 0.0209047 -0.039677 1.02317
+v 0.0242006 -0.0212264 1.00515
+v 0.0152462 -0.0676801 1.04602
+v 0.0344846 -0.0534302 1.03232
+v 0 -0.0555541 1.03946
+v 0.0177789 -0.0547726 1.03745
+v 0.0426567 -0.0627588 1.03251
+v 0.0490439 -0.0525287 1.02501
+v 0.0477856 -0.0718253 0.995571
+v 0.0209476 -0.0779737 1.00716
+v 0 -0.0724905 1.00177
+v 0.0242757 -0.0726037 0.999895
+v 0.0583871 -0.073576 0.998233
+v 0.0697639 -0.0690371 0.990088
+v 0.0427012 -0.0854758 1.01505
+v 0.0345351 -0.0845283 1.01121
+v 0.0491057 -0.0804193 1.00671
+v 0 -0.0876552 1.01738
+v 0.0152667 -0.0940994 1.02438
+v 0.0178069 -0.0869021 1.01538
+v 0.0271444 -0.0763309 1.04139
+v 0.0394909 -0.0796254 1.02906
+v 0.0399318 -0.0720589 1.03309
+v 0 -0.0809251 1.049
+v 0.0132272 -0.0889316 1.04223
+v 0.0137291 -0.0795664 1.04692
+v 0.0271637 -0.0905263 1.02914
+v 0 -0.0956202 1.03609
+v 0.01374 -0.0942759 1.03402
+v 0.0399551 -0.0846544 1.02277
+v -0.341299 -0.375944 0.366232
+v -0.317818 -0.392423 0.305485
+v -0.289638 -0.411984 0.363044
+v -0.313121 -0.40468 0.424303
+v -0.317288 -0.397295 0.364448
+v -0.312495 -0.42258 0.208415
+v -0.274846 -0.418754 0.252658
+v -0.350284 -0.364453 0.244783
+v -0.316601 -0.396416 0.250199
+v -0.194817 -0.437402 0.239358
+v -0.251544 -0.422435 0.302332
+v -0.2099 -0.457622 0.202318
+v -0.231211 -0.430966 0.245486
+v -0.261632 -0.427221 0.422701
+v -0.23935 -0.43199 0.361842
+v -0.262332 -0.422664 0.362137
+v -0.275025 -0.421949 0.482067
+v -0.291362 -0.413379 0.536815
+v -0.328162 -0.387696 0.483083
+v -0.303426 -0.408222 0.482265
+v -0.225399 -0.440193 0.480871
+v -0.222989 -0.439421 0.53835
+v -0.247502 -0.431674 0.481825
+v -0.164011 -0.483033 0.631406
+v -0.236564 -0.444228 0.584476
+v -0.156119 -0.458526 0.595789
+v -0.191143 -0.453861 0.591118
+v -0.315111 -0.397679 0.592146
+v -0.26979 -0.455493 0.624003
+v -0.280089 -0.425608 0.586435
+v -0.339519 -0.257182 -0.0802831
+v -0.283535 -0.198083 -0.11242
+v -0.23546 -0.269794 -0.12322
+v -0.300873 -0.335808 -0.11045
+v -0.292915 -0.260195 -0.110308
+v -0.26898 -0.103901 -0.112945
+v -0.224047 -0.163495 -0.142015
+v -0.318497 -0.140059 -0.0746981
+v -0.274851 -0.146856 -0.114171
+v -0.123113 -0.206388 -0.170045
+v -0.173106 -0.230478 -0.142628
+v -0.172014 -0.141551 -0.170002
+v -0.171621 -0.184999 -0.160037
+v -0.176383 -0.34915 -0.10631
+v -0.119478 -0.295776 -0.113872
+v -0.174532 -0.282877 -0.121531
+v -0.273281 0.0420733 -0.0438498
+v -0.232462 0.00608204 -0.106821
+v -0.269878 -0.0318881 -0.0928838
+v -0.307808 -0.00318692 -0.0317095
+v -0.272342 0.00331308 -0.0724007
+v -0.148915 0.00578811 -0.165362
+v -0.182883 -0.0457092 -0.157469
+v -0.199052 0.0623656 -0.105989
+v -0.190658 0.0074224 -0.137241
+v -0.222802 -0.0788504 -0.141507
+v -0.131964 -0.111136 -0.188613
+v -0.176218 -0.0952439 -0.168768
+v -0.308256 -0.0631453 -0.0619102
+v -0.26804 -0.0666021 -0.106124
+v -0.26887 -0.396352 0.909752
+v -0.214141 -0.334681 0.919903
+v -0.263105 -0.289222 0.900577
+v -0.30851 -0.340088 0.875405
+v -0.266995 -0.336923 0.904292
+v -0.104904 -0.328487 0.922781
+v -0.164801 -0.284207 0.93026
+v -0.149733 -0.3934 0.920339
+v -0.15673 -0.332639 0.924128
+v -0.172003 -0.202637 0.93974
+v -0.215216 -0.244729 0.920604
+v -0.126656 -0.240414 0.945024
+v -0.169955 -0.242358 0.935335
+v -0.293898 -0.252639 0.867139
+v -0.253262 -0.209214 0.897882
+v -0.258195 -0.248301 0.898482
+v -0.375987 -0.340814 -0.0337268
+v -0.380901 -0.277243 0.032671
+v -0.358457 -0.200112 -0.0267162
+v -0.367895 -0.263853 -0.0306343
+v -0.375596 -0.311145 0.175939
+v -0.382253 -0.237542 0.105277
+v -0.379176 -0.364086 0.102343
+v -0.381399 -0.294389 0.103665
+v -0.379025 -0.149836 0.112764
+v -0.369603 -0.166291 0.0406057
+v -0.385185 -0.214712 0.178025
+v -0.38088 -0.190046 0.108528
+v -0.342627 -0.106257 -0.015664
+v -0.349444 -0.148083 -0.0217878
+v -0.351404 -0.354331 0.427814
+v -0.367965 -0.307651 0.370431
+v -0.363729 -0.333817 0.306674
+v -0.35769 -0.345276 0.36828
+v -0.376188 -0.22098 0.374406
+v -0.380079 -0.252636 0.309953
+v -0.366553 -0.273812 0.432936
+v -0.373629 -0.265433 0.372527
+v -0.379525 -0.278609 0.243679
+v -0.386398 -0.194374 0.247084
+v -0.384425 -0.235652 0.245143
+v -0.369702 -0.323364 0.243084
+v -0.359143 -0.0454376 0.384517
+v -0.376221 -0.0834201 0.319025
+v -0.375379 -0.132559 0.377674
+v -0.363531 -0.0923241 0.440867
+v -0.369754 -0.0887861 0.380331
+v -0.380073 -0.0641595 0.195141
+v -0.385994 -0.115397 0.252169
+v -0.369328 -0.0337485 0.265179
+v -0.380479 -0.0754649 0.257209
+v -0.383633 -0.167556 0.313214
+v -0.386429 -0.137312 0.183762
+v -0.387444 -0.154662 0.24911
+v -0.370145 -0.182621 0.437038
+v -0.377148 -0.176656 0.375911
+v -0.338487 -0.0443826 0.00235778
+v -0.357964 -0.0317511 0.0731655
+v -0.331012 0.0171674 0.0389716
+v -0.336366 -0.0157204 0.0175237
+v -0.363023 -0.0919062 0.0524717
+v -0.339774 -0.073126 -0.00816016
+v -0.37698 -0.0821329 0.123634
+v -0.378431 -0.114848 0.117331
+v -0.363023 -0.0117289 0.147082
+v -0.372551 -0.0487428 0.133083
+v -0.324728 -0.29766 0.82767
+v -0.340911 -0.350598 0.77666
+v -0.336842 -0.40362 0.835552
+v -0.331904 -0.344898 0.831349
+v -0.311718 -0.219243 0.822794
+v -0.331546 -0.262105 0.774295
+v -0.317327 -0.257308 0.824743
+v -0.344609 -0.271269 0.661246
+v -0.340676 -0.308781 0.717453
+v -0.338685 -0.226798 0.720092
+v -0.339619 -0.266826 0.718796
+v -0.335933 -0.414813 0.716014
+v -0.334716 -0.360298 0.654176
+v -0.339263 -0.356428 0.715872
+v -0.304296 -0.0566442 0.818942
+v -0.32815 -0.0982774 0.774453
+v -0.309753 -0.137557 0.821642
+v -0.282982 -0.0932074 0.86163
+v -0.309064 -0.0966912 0.82069
+v -0.349859 -0.0980743 0.670701
+v -0.342261 -0.141944 0.72292
+v -0.335097 -0.0554519 0.724795
+v -0.341372 -0.0985454 0.724072
+v -0.328063 -0.182474 0.773912
+v -0.348891 -0.186539 0.666645
+v -0.340465 -0.184932 0.72153
+v -0.284251 -0.173813 0.864211
+v -0.309915 -0.178867 0.822052
+v -0.364199 -0.140124 0.498352
+v -0.3576 -0.096426 0.55865
+v -0.350887 -0.0507559 0.502911
+v -0.360004 -0.0947951 0.500374
+v -0.363053 -0.232129 0.494204
+v -0.360088 -0.187609 0.554338
+v -0.364779 -0.186112 0.496433
+v -0.356689 -0.14249 0.613383
+v -0.35206 -0.232064 0.608144
+v -0.355112 -0.187591 0.610995
+v -0.34723 -0.0531297 0.617509
+v -0.354744 -0.0974434 0.615492
+v -0.338808 -0.358381 0.541229
+v -0.354812 -0.320109 0.488079
+v -0.344689 -0.357576 0.485189
+v -0.344841 -0.317709 0.600763
+v -0.335024 -0.360119 0.596759
+v -0.354746 -0.277669 0.548235
+v -0.349578 -0.27523 0.604646
+v -0.360332 -0.277544 0.491251
+v -0.270021 0.183796 0.151405
+v -0.250535 0.219935 0.200112
+v -0.22988 0.247545 0.157829
+v -0.253104 0.203435 0.118078
+v -0.251096 0.217673 0.158942
+v -0.247672 0.201215 0.285394
+v -0.224342 0.248556 0.249012
+v -0.273837 0.172969 0.230557
+v -0.249328 0.213762 0.242242
+v -0.166122 0.2964 0.255682
+v -0.201028 0.279793 0.204606
+v -0.19325 0.264928 0.298974
+v -0.197174 0.276415 0.252836
+v -0.208445 0.255016 0.0997486
+v -0.173937 0.295795 0.151795
+v -0.204742 0.274117 0.153286
+v -0.238934 0.137098 0.472953
+v -0.21248 0.178956 0.429644
+v -0.243641 0.166107 0.375362
+v -0.269047 0.115689 0.415447
+v -0.241351 0.149381 0.423
+v -0.149393 0.222637 0.439843
+v -0.185529 0.224498 0.389084
+v -0.178799 0.187723 0.48375
+v -0.181979 0.203634 0.435289
+v -0.218341 0.218477 0.337805
+v -0.157504 0.266542 0.348704
+v -0.189327 0.246276 0.344018
+v -0.272811 0.14513 0.319056
+v -0.245762 0.184358 0.32963
+v -0.212012 0.0755801 0.841201
+v -0.188626 0.112308 0.821548
+v -0.217701 0.11212 0.775527
+v -0.235856 0.0765799 0.805621
+v -0.213324 0.0965225 0.811853
+v -0.13257 0.138159 0.837992
+v -0.163968 0.1484 0.789706
+v -0.159999 0.0987976 0.865373
+v -0.161693 0.126272 0.830304
+v -0.169138 0.174847 0.695193
+v -0.195015 0.146226 0.738197
+v -0.136442 0.179619 0.750387
+v -0.166543 0.164739 0.744396
+v -0.249108 0.0961373 0.728207
+v -0.226814 0.130295 0.684497
+v -0.222367 0.12354 0.732548
+v -0.264402 0.101968 0.520397
+v -0.233686 0.131486 0.579487
+v -0.207042 0.158534 0.530777
+v -0.236433 0.132176 0.525634
+v -0.201599 0.157166 0.637907
+v -0.258575 0.102323 0.628487
+v -0.230534 0.131902 0.632959
+v -0.139858 0.195421 0.647569
+v -0.173735 0.178964 0.589175
+v -0.171473 0.178284 0.642915
+v -0.143538 0.198754 0.539805
+v -0.176115 0.180806 0.535582
+v -0.337836 -0.00655256 0.448549
+v -0.320929 0.0389878 0.398635
+v -0.34742 0.00319315 0.333602
+v -0.342419 -0.00261151 0.390867
+v -0.298744 0.0894187 0.355449
+v -0.293232 0.0711038 0.460405
+v -0.296022 0.0786559 0.407077
+v -0.30121 0.115034 0.257783
+v -0.327429 0.0566863 0.291075
+v -0.300703 0.102007 0.305644
+v -0.351215 0.0204208 0.221415
+v -0.350974 0.0108683 0.277027
+v -0.308985 -0.0158364 0.772679
+v -0.30029 0.0266191 0.725054
+v -0.328122 -0.0112347 0.673608
+v -0.320738 -0.0133772 0.724898
+v -0.268509 -0.018423 0.854481
+v -0.274888 0.0183198 0.812257
+v -0.291897 -0.0177918 0.816142
+v -0.267341 0.0573644 0.769829
+v -0.24017 0.0457536 0.838346
+v -0.256292 0.0501447 0.807253
+v -0.281859 0.0674534 0.67711
+v -0.275748 0.0634772 0.725933
+v -0.239418 0.102195 -0.0351077
+v -0.213035 0.170022 -0.00953204
+v -0.181753 0.135303 -0.0848536
+v -0.20757 0.117335 -0.0616182
+v -0.298292 0.0729821 0.0271766
+v -0.265278 0.133372 0.0353559
+v -0.270541 0.0874645 -0.00640695
+v -0.236523 0.195781 0.0609139
+v -0.276588 0.155574 0.0981021
+v -0.258648 0.173682 0.0768666
+v -0.190293 0.237055 0.0258931
+v -0.212268 0.218116 0.0448664
+v -0.288873 0.0678482 0.57056
+v -0.311113 0.0304905 0.622016
+v -0.286019 0.0682635 0.624906
+v -0.315185 0.0311388 0.510574
+v -0.291063 0.0681481 0.515313
+v -0.334099 -0.00961306 0.563635
+v -0.335537 -0.00863692 0.506376
+v -0.332102 -0.0102295 0.619617
+v -0.324423 0.0791082 0.189745
+v -0.33595 0.0447295 0.115485
+v -0.346277 0.0318576 0.16704
+v -0.295396 0.137443 0.169109
+v -0.299573 0.127115 0.211987
+v -0.305188 0.103086 0.100412
+v -0.288286 0.145212 0.129998
+v -0.320025 0.0585873 0.0683354
+v -0.177314 0.204818 -0.0510199
+v -0.167034 0.18223 -0.122798
+v -0.149034 0.119255 -0.152767
+v -0.168725 0.158519 -0.103729
+v -0.159665 0.285267 0.0452783
+v -0.167426 0.259011 -0.0241111
+v -0.175009 0.248965 0.00116269
+v -0.17918 0.290497 -0.0609743
+v -0.177164 0.238112 -0.093383
+v -0.149721 0.3008 -0.00470029
+v -0.168557 0.27236 -0.0447148
+v -0.160058 0.168229 -0.195838
+v -0.194921 0.221357 -0.173014
+v -0.175228 0.201681 -0.146613
+v -0.21819 0.281995 -0.133552
+v -0.26422 0.274293 -0.228207
+v -0.233098 0.21084 -0.267301
+v -0.227725 0.245742 -0.199837
+v -0.173263 0.341693 -0.0238379
+v -0.225521 0.344757 -0.0845003
+v -0.200073 0.314908 -0.0732157
+v -0.267222 0.417414 -0.116444
+v -0.27622 0.344814 -0.176785
+v -0.21453 0.407775 -0.0317089
+v -0.249809 0.379204 -0.0978893
+v -0.306638 0.268641 -0.338166
+v -0.314639 0.343639 -0.292523
+v -0.294985 0.306469 -0.259249
+v -0.279797 0.207117 -0.366981
+v -0.240251 0.15911 -0.387649
+v -0.192666 0.159829 -0.308601
+v -0.238726 0.181983 -0.331175
+v -0.330786 0.26947 -0.445487
+v -0.307399 0.208213 -0.46356
+v -0.31171 0.235804 -0.406418
+v -0.259412 0.1698 -0.542722
+v -0.265715 0.160342 -0.469212
+v -0.305787 0.218563 -0.547441
+v -0.290901 0.186126 -0.509143
+v -0.195451 0.128497 -0.407372
+v -0.213461 0.12942 -0.465108
+v -0.233311 0.142158 -0.432916
+v -0.329541 0.345256 -0.410956
+v -0.321008 0.352655 -0.511417
+v -0.32675 0.280027 -0.535933
+v -0.333345 0.309542 -0.480189
+v -0.255976 0.429549 -0.563622
+v -0.289278 0.358786 -0.584918
+v -0.295245 0.436762 -0.484187
+v -0.295399 0.395443 -0.540067
+v -0.246645 0.312827 -0.640337
+v -0.296456 0.295786 -0.60247
+v -0.240548 0.35766 -0.62796
+v -0.274305 0.331026 -0.617491
+v -0.272516 0.239582 -0.609537
+v -0.307295 0.256707 -0.578835
+v -0.288912 -0.666279 -0.0831981
+v -0.233667 -0.718383 -0.0394682
+v -0.288313 -0.71953 0.0292002
+v -0.335047 -0.66747 -0.026194
+v -0.288523 -0.704927 -0.0329901
+v -0.136552 -0.67511 -0.0280042
+v -0.172796 -0.723594 0.0247169
+v -0.187443 -0.669698 -0.0818819
+v -0.179633 -0.708233 -0.0350267
+v -0.182766 -0.687312 0.135767
+v -0.227442 -0.724741 0.0933852
+v -0.136631 -0.686121 0.0787634
+v -0.176191 -0.716151 0.0857864
+v -0.327044 -0.674066 0.0872825
+v -0.276152 -0.679477 0.14116
+v -0.281161 -0.710704 0.0910758
+v -0.288217 -0.679435 0.838379
+v -0.248642 -0.724519 0.790895
+v -0.198896 -0.723335 0.85193
+v -0.2461 -0.678205 0.889508
+v -0.246831 -0.711912 0.845432
+v -0.24088 -0.688682 0.691628
+v -0.198408 -0.728532 0.734591
+v -0.283587 -0.684515 0.738785
+v -0.244434 -0.716483 0.736358
+v -0.115859 -0.693483 0.748115
+v -0.148661 -0.727347 0.795626
+v -0.157976 -0.694713 0.696986
+v -0.153047 -0.720555 0.741401
+v -0.15574 -0.680004 0.892121
+v -0.113033 -0.684171 0.844965
+v -0.151829 -0.713932 0.849146
+v -0.30391 -0.498066 0.668956
+v -0.32282 -0.565952 0.726198
+v -0.342489 -0.483419 0.78141
+v -0.331237 -0.488983 0.719836
+v -0.319732 -0.633454 0.786428
+v -0.28283 -0.632563 0.691009
+v -0.308326 -0.632734 0.733956
+v -0.294197 -0.618974 0.884469
+v -0.331331 -0.556659 0.842236
+v -0.315639 -0.625673 0.839496
+v -0.31354 -0.474577 0.887895
+v -0.337524 -0.478425 0.840051
+v -0.0969194 -0.478778 0.897495
+v -0.0774719 -0.570173 0.85133
+v -0.0654567 -0.515878 0.79543
+v -0.0634711 -0.431765 0.851899
+v -0.0707234 -0.496321 0.852392
+v -0.0840444 -0.64607 0.797118
+v -0.112805 -0.624088 0.888986
+v -0.0899111 -0.634521 0.847274
+v -0.119063 -0.647679 0.702012
+v -0.0832655 -0.591144 0.740087
+v -0.0943475 -0.648249 0.746058
+v -0.069956 -0.47422 0.735292
+v -0.106684 -0.534636 0.682874
+v -0.0773661 -0.528459 0.735056
+v -0.208287 -0.52835 0.637223
+v -0.252371 -0.581833 0.652832
+v -0.260651 -0.513618 0.642398
+v -0.15335 -0.595644 0.66033
+v -0.153645 -0.534978 0.650489
+v -0.20034 -0.651356 0.660475
+v -0.156436 -0.650983 0.672896
+v -0.244653 -0.641994 0.666514
+v -0.209392 -0.47177 0.928314
+v -0.149543 -0.554429 0.922812
+v -0.147798 -0.472236 0.922294
+v -0.262868 -0.552296 0.920108
+v -0.267734 -0.472455 0.917082
+v -0.203436 -0.628169 0.923071
+v -0.253551 -0.622939 0.912658
+v -0.153584 -0.625261 0.914716
+v -0.387436 -0.440321 0.0296682
+v -0.369537 -0.538419 0.0961211
+v -0.348561 -0.464054 0.158716
+v -0.376444 -0.45013 0.0999599
+v -0.354445 -0.427851 -0.0878125
+v -0.378455 -0.525308 -0.0356201
+v -0.380964 -0.432504 -0.0361787
+v -0.368236 -0.614976 0.0306267
+v -0.340742 -0.597899 -0.0798229
+v -0.364087 -0.605614 -0.0296813
+v -0.325558 -0.615423 0.139592
+v -0.354786 -0.614851 0.0904298
+v -0.12704 -0.448099 -0.0735785
+v -0.182701 -0.527846 -0.109937
+v -0.243518 -0.430642 -0.121062
+v -0.179139 -0.436248 -0.106528
+v -0.241923 -0.609859 -0.117359
+v -0.141316 -0.607217 -0.0730372
+v -0.186967 -0.607615 -0.104287
+v -0.304443 -0.521287 -0.117606
+v -0.296601 -0.602967 -0.109127
+v -0.305291 -0.427537 -0.115465
+v -0.25113 -0.502745 0.197094
+v -0.188536 -0.577394 0.174934
+v -0.148115 -0.512563 0.149929
+v -0.196177 -0.511769 0.183546
+v -0.293784 -0.559568 0.181063
+v -0.304357 -0.484216 0.189956
+v -0.233064 -0.638226 0.175543
+v -0.282371 -0.626877 0.167977
+v -0.145237 -0.635178 0.13052
+v -0.185474 -0.638484 0.162245
+v -0.0953701 -0.405611 -0.0214782
+v -0.0997946 -0.492468 0.0352712
+v -0.105712 -0.548895 -0.0268432
+v -0.102107 -0.469936 -0.0239275
+v -0.113288 -0.573273 0.0910993
+v -0.112515 -0.451919 0.0935688
+v -0.115559 -0.506404 0.0968639
+v -0.106751 -0.633108 0.0275567
+v -0.117988 -0.635837 0.0828
+v -0.114552 -0.619641 -0.0267392
+v -0.308658 0.43336 -0.368305
+v -0.285353 0.520853 -0.361551
+v -0.271184 0.525759 -0.460079
+v -0.291391 0.478507 -0.418527
+v -0.298509 0.427522 -0.236282
+v -0.317802 0.38717 -0.32759
+v -0.235335 0.485531 -0.0647615
+v -0.271214 0.498067 -0.167977
+v -0.272045 0.45855 -0.143225
+v -0.277875 0.55689 -0.190965
+v -0.282577 0.535883 -0.259681
+v -0.247008 0.54474 -0.114114
+v -0.271665 0.531425 -0.180446
+v -0.267 0.601931 -0.388185
+v -0.278055 0.600765 -0.320108
+v -0.278645 0.563976 -0.33172
+v -0.252363 0.682167 -0.15033
+v -0.326452 0.645309 -0.138504
+v -0.267679 0.610284 -0.129549
+v -0.192626 0.647576 -0.125774
+v -0.258116 0.645451 -0.13607
+v -0.430349 0.658277 -0.154382
+v -0.380321 0.61857 -0.152413
+v -0.388682 0.680406 -0.144893
+v -0.384792 0.647908 -0.143704
+v -0.35448 0.589213 -0.202564
+v -0.324346 0.587714 -0.15672
+v -0.401583 0.612716 -0.19176
+v -0.367842 0.597467 -0.17194
+v -0.239082 0.578957 -0.111996
+v -0.289471 0.569074 -0.162469
+v -0.279361 0.583208 -0.137022
+v -0.318359 0.584666 -0.230094
+v -0.294439 0.591849 -0.266039
+v -0.294318 0.572729 -0.209871
+v -0.380591 0.614582 -0.245535
+v -0.343967 0.620068 -0.286887
+v -0.347363 0.598425 -0.244565
+v -0.345079 0.681327 -0.333156
+v -0.312606 0.638668 -0.319124
+v -0.375881 0.662505 -0.310821
+v -0.34177 0.649107 -0.318477
+v -0.28588 0.662049 -0.349089
+v -0.290372 0.624108 -0.317788
+v -0.419572 0.658706 -0.261892
+v -0.444197 0.711394 -0.263256
+v -0.393743 0.706765 -0.3123
+v -0.412958 0.685943 -0.291128
+v -0.447289 0.655956 -0.196803
+v -0.417303 0.633713 -0.225256
+v -0.447397 0.702392 -0.15815
+v -0.471552 0.708579 -0.199184
+v -0.460337 0.681448 -0.17325
+v -0.43241 0.746455 -0.258303
+v -0.452589 0.744444 -0.206816
+v -0.460792 0.730831 -0.231063
+v -0.0467382 0.325284 0.136739
+v -0.0925539 0.320139 0.170699
+v -0.0462436 0.320164 0.221337
+v -0.0468991 0.323707 0.17766
+v -0.132936 0.312061 0.210485
+v -0.135299 0.30832 0.112598
+v -0.13572 0.310986 0.159838
+v -0.125628 0.296564 0.306829
+v -0.0886491 0.312396 0.263435
+v -0.129481 0.307575 0.259519
+v -0.0434892 0.30185 0.311934
+v -0.0450231 0.313323 0.266515
+v -0.0377827 0.232065 0.495188
+v -0.0735327 0.221293 0.545706
+v -0.0364517 0.2252 0.601479
+v -0.0369868 0.226526 0.54726
+v -0.107651 0.210086 0.597079
+v -0.111325 0.219111 0.491389
+v -0.109197 0.212143 0.543201
+v -0.105384 0.203917 0.704755
+v -0.0717153 0.218255 0.65442
+v -0.106456 0.208743 0.651454
+v -0.0357781 0.218679 0.710249
+v -0.0360809 0.22396 0.656319
+v -0.0703592 0.199987 0.759265
+v -0.035033 0.18494 0.809623
+v -0.0354469 0.205232 0.761742
+v -0.10286 0.172311 0.802347
+v -0.104202 0.191409 0.755412
+v -0.0994805 0.118705 0.880737
+v -0.0684286 0.154761 0.849486
+v -0.101303 0.147713 0.844481
+v -0.0337403 0.129119 0.889852
+v -0.0344822 0.159128 0.852718
+v -0.032544 -0.433336 0.0684456
+v -0.0619855 -0.419454 0.0516405
+v -0.0290027 -0.401336 0.00260959
+v -0.0312142 -0.420036 0.03584
+v -0.0804386 -0.407577 0.025012
+v -0.101981 -0.43742 0.118873
+v -0.0918712 -0.426509 0.0746889
+v -0.0781392 -0.343402 -0.0680291
+v -0.0524989 -0.374486 -0.0268658
+v -0.0771327 -0.378765 -0.022981
+v -0.024587 -0.350255 -0.0669927
+v -0.0265726 -0.377866 -0.0315624
+v -0.0173499 -0.340897 0.891348
+v -0.032518 -0.377955 0.851217
+v -0.0162142 -0.396858 0.808469
+v -0.0161103 -0.370733 0.852319
+v -0.0473693 -0.423079 0.799815
+v -0.0576974 -0.360469 0.893822
+v -0.0495204 -0.395304 0.851292
+v -0.0641499 -0.454693 0.696538
+v -0.0352263 -0.426311 0.757202
+v -0.0543417 -0.44245 0.745122
+v -0.0190571 -0.436735 0.723869
+v -0.0173127 -0.418962 0.764189
+v -0.08278 0.283223 0.354838
+v -0.0403407 0.264584 0.4009
+v -0.0418935 0.284411 0.356338
+v -0.117787 0.25622 0.397308
+v -0.121652 0.278062 0.352302
+v -0.0772361 0.242537 0.445498
+v -0.114267 0.235186 0.443214
+v -0.0389355 0.245943 0.446792
+v -0.0448664 0.351036 0.0368917
+v -0.084777 0.331134 0.0392602
+v -0.0441574 0.329119 0.0693112
+v -0.0436281 0.336529 0.047541
+v -0.122579 0.311429 0.0436831
+v -0.125569 0.33518 0.0110101
+v -0.120968 0.319807 0.0225689
+v -0.0891709 0.320583 0.0906503
+v -0.129137 0.308034 0.0737864
+v -0.0455092 0.326229 0.0998335
+v -0.0632284 0.496693 0.0522852
+v -0.116503 0.442256 0.0399308
+v -0.0541098 0.40816 0.0483714
+v -0.0593708 0.448955 0.0551193
+v -0.153438 0.391277 0.0121446
+v -0.18104 0.470039 0.00485434
+v -0.169159 0.42866 0.0126096
+v -0.0954346 0.370179 0.029827
+v -0.13765 0.35956 0.0095734
+v -0.0488177 0.375217 0.039732
+v -0.0692761 0.505582 -0.562042
+v -0.130136 0.568004 -0.528764
+v -0.0626485 0.640189 -0.506046
+v -0.0664738 0.570216 -0.533525
+v -0.232047 0.565216 -0.501918
+v -0.174025 0.632377 -0.495223
+v -0.199187 0.49892 -0.545668
+v -0.188173 0.564066 -0.51907
+v -0.150533 0.744043 -0.44196
+v -0.113201 0.701726 -0.474505
+v -0.199905 0.685497 -0.462496
+v -0.161614 0.694175 -0.469576
+v -0.0536794 0.762368 -0.447664
+v -0.0582379 0.707055 -0.477971
+v -0.0726958 0.340957 -0.664799
+v -0.138282 0.375681 -0.645424
+v -0.0711811 0.413275 -0.623292
+v -0.0716465 0.376815 -0.648416
+v -0.199873 0.404748 -0.612935
+v -0.193634 0.339499 -0.652866
+v -0.194895 0.370921 -0.637938
+v -0.138821 0.452917 -0.589054
+v -0.202195 0.446614 -0.579567
+v -0.0706181 0.454733 -0.593233
+v -0.0306399 -0.195728 0.970285
+v -0.057123 -0.235922 0.956621
+v -0.0240468 -0.271835 0.943264
+v -0.0277858 -0.234252 0.959139
+v -0.0802769 -0.277764 0.939515
+v -0.0956803 -0.199455 0.960676
+v -0.0895627 -0.238122 0.952018
+v -0.0425527 -0.311463 0.921053
+v -0.0688023 -0.319918 0.921674
+v -0.0202817 -0.307657 0.921165
+v -0.049547 -0.311956 -0.105817
+v -0.0238367 -0.283103 -0.139843
+v -0.023709 -0.319134 -0.103998
+v -0.0816243 -0.264189 -0.144485
+v -0.0796433 -0.304819 -0.108871
+v -0.0878074 -0.176513 -0.193941
+v -0.0521008 -0.23408 -0.173683
+v -0.0840611 -0.222685 -0.173847
+v -0.0267024 -0.190718 -0.197111
+v -0.0248684 -0.240765 -0.171792
+v -0.0599751 -0.128388 -0.209467
+v -0.0321871 -0.0691596 -0.222731
+v -0.0292367 -0.131565 -0.213064
+v -0.101087 -0.0620719 -0.200364
+v -0.0937169 -0.121882 -0.201748
+v -0.115674 0.055105 -0.187883
+v -0.0713902 -0.00682142 -0.215572
+v -0.109216 -0.00036684 -0.192884
+v -0.0381981 0.0419961 -0.235526
+v -0.035269 -0.00935459 -0.229192
+v -0.0803711 0.0844567 -0.225271
+v -0.0428926 0.103386 -0.257977
+v -0.0406901 0.0790388 -0.244814
+v -0.120922 0.124855 -0.209363
+v -0.118034 0.096215 -0.193935
+v -0.138478 0.148595 -0.265069
+v -0.0884331 0.12573 -0.258533
+v -0.128964 0.142918 -0.232488
+v -0.0470212 0.120443 -0.299626
+v -0.0449537 0.11665 -0.27594
+v -0.097999 0.124878 -0.319976
+v -0.0518947 0.108853 -0.365753
+v -0.0492429 0.116377 -0.329956
+v -0.150911 0.126322 -0.356271
+v -0.145782 0.140078 -0.308863
+v -0.16045 0.112915 -0.441079
+v -0.107541 0.107031 -0.401861
+v -0.153901 0.116281 -0.399697
+v -0.0595919 0.101024 -0.449026
+v -0.0552525 0.102268 -0.405836
+v -0.125551 0.112279 -0.490196
+v -0.0709982 0.126709 -0.538642
+v -0.065189 0.10952 -0.494143
+v -0.194944 0.134806 -0.523346
+v -0.176258 0.119186 -0.482359
+v -0.219301 0.191387 -0.600189
+v -0.1475 0.153854 -0.575332
+v -0.210129 0.15949 -0.56385
+v -0.0790718 0.182988 -0.615599
+v -0.0759743 0.151547 -0.579976
+v -0.154044 0.222564 -0.638941
+v -0.0775425 0.260206 -0.661002
+v -0.0792455 0.219987 -0.642965
+v -0.213772 0.26767 -0.647803
+v -0.21995 0.228648 -0.628685
+v -0.144378 0.302534 -0.665208
+v -0.202463 0.304851 -0.656009
+v -0.0750102 0.301309 -0.668637
+v -0.0606884 0.60785 0.00104436
+v -0.126715 0.537337 0.0111468
+v -0.0643107 0.550463 0.0321785
+v -0.0403517 0.7183 -0.0719016
+v -0.0987466 0.656786 -0.062447
+v -0.0504326 0.666442 -0.034872
+v -0.171165 0.605799 -0.0764289
+v -0.126096 0.69557 -0.127071
+v -0.142824 0.650926 -0.0969906
+v -0.191637 0.550233 -0.0477524
+v -0.209701 0.569968 -0.0807895
+v -0.185308 0.513743 -0.0172354
+v -0.258514 0.602197 -0.445199
+v -0.237614 0.630468 -0.469873
+v -0.253217 0.578269 -0.474783
+v -0.246529 0.67248 -0.397952
+v -0.240943 0.662224 -0.43894
+v -0.254771 0.635976 -0.4207
+v -0.212896 0.714905 -0.428435
+v -0.224501 0.676789 -0.452578
+v -0.214224 0.916672 -0.246046
+v -0.201014 0.943338 -0.25145
+v -0.21544 0.938681 -0.279423
+v -0.226436 0.912723 -0.26973
+v -0.216965 0.931825 -0.261167
+v -0.159856 0.939686 -0.239651
+v -0.17745 0.953646 -0.260405
+v -0.179801 0.930945 -0.23207
+v -0.180783 0.946411 -0.244635
+v -0.165912 0.939303 -0.292739
+v -0.191876 0.948989 -0.288378
+v -0.1537 0.943252 -0.269054
+v -0.173109 0.951035 -0.277835
+v -0.216862 0.918936 -0.303965
+v -0.196917 0.927676 -0.311546
+v -0.207634 0.937732 -0.29671
+v -0.209996 0.761876 -0.268358
+v -0.199084 0.77337 -0.281405
+v -0.191438 0.774734 -0.312022
+v -0.207814 0.759869 -0.30299
+v -0.201734 0.767127 -0.28955
+v -0.203233 0.797472 -0.273932
+v -0.192387 0.789092 -0.301842
+v -0.202175 0.775482 -0.251997
+v -0.199221 0.78193 -0.2764
+v -0.165819 0.804418 -0.335288
+v -0.178221 0.788374 -0.325575
+v -0.18742 0.811743 -0.321532
+v -0.181068 0.796668 -0.322844
+v -0.180956 0.7796 -0.34144
+v -0.161743 0.793141 -0.342758
+v -0.178357 0.783728 -0.331262
+v -0.192516 0.777938 -0.227759
+v -0.168503 0.781447 -0.210541
+v -0.151415 0.801648 -0.215482
+v -0.181336 0.804317 -0.228606
+v -0.174268 0.789156 -0.220565
+v -0.153638 0.772539 -0.191376
+v -0.137434 0.789176 -0.20042
+v -0.186175 0.766882 -0.204739
+v -0.162292 0.776737 -0.200861
+v -0.0963023 0.812563 -0.21585
+v -0.12229 0.805309 -0.211386
+v -0.103071 0.800257 -0.198013
+v -0.114773 0.801816 -0.205592
+v -0.135327 0.828572 -0.217894
+v -0.108345 0.822919 -0.221075
+v -0.128262 0.813355 -0.215464
+v -0.143329 0.804627 -0.337252
+v -0.119159 0.809804 -0.331351
+v -0.13845 0.801677 -0.350685
+v -0.140844 0.802002 -0.341998
+v -0.153713 0.826966 -0.334024
+v -0.127696 0.819206 -0.325202
+v -0.147191 0.8121 -0.335056
+v -0.0961567 0.829666 -0.288835
+v -0.10501 0.817882 -0.310157
+v -0.116614 0.840248 -0.308051
+v -0.109848 0.825231 -0.308777
+v -0.0934554 0.817549 -0.318853
+v -0.086003 0.820458 -0.291304
+v -0.100185 0.816042 -0.313185
+v -0.084054 0.824638 -0.268864
+v -0.0780919 0.822576 -0.248928
+v -0.0710179 0.824335 -0.270682
+v -0.0786961 0.822795 -0.269513
+v -0.0959818 0.847028 -0.267898
+v -0.0888081 0.831892 -0.249223
+v -0.0891348 0.832003 -0.268429
+v -0.0899479 0.821575 -0.232115
+v -0.102029 0.843892 -0.232989
+v -0.095203 0.828981 -0.232883
+v -0.0751675 0.819879 -0.228529
+v -0.084018 0.819324 -0.230668
+v -0.231902 0.754223 -0.26947
+v -0.226075 0.759994 -0.240858
+v -0.208076 0.764415 -0.241789
+v -0.216972 0.759952 -0.252956
+v -0.265103 0.760852 -0.220189
+v -0.223444 0.760451 -0.217689
+v -0.264144 0.760816 -0.243923
+v -0.240714 0.760641 -0.229576
+v -0.20189 0.765574 -0.22074
+v -0.215759 0.757673 -0.200462
+v -0.205912 0.761992 -0.209879
+v -0.201854 0.770051 -0.234112
+v -0.234872 0.742799 -0.306286
+v -0.277858 0.727498 -0.319523
+v -0.278995 0.751255 -0.279238
+v -0.256224 0.744786 -0.293184
+v -0.317986 0.708047 -0.333577
+v -0.258236 0.693552 -0.356883
+v -0.284726 0.698216 -0.343913
+v -0.37536 0.738148 -0.30116
+v -0.358205 0.712515 -0.324745
+v -0.402685 0.758643 -0.236201
+v -0.343965 0.756617 -0.269721
+v -0.390758 0.753705 -0.27032
+v -0.311167 0.763685 -0.236173
+v -0.300161 0.759231 -0.259113
+v -0.277376 0.753251 -0.197126
+v -0.248651 0.737773 -0.18066
+v -0.183529 0.754714 -0.188935
+v -0.23327 0.750984 -0.191422
+v -0.352617 0.756796 -0.208565
+v -0.303456 0.759588 -0.213776
+v -0.409003 0.736167 -0.174314
+v -0.409429 0.752424 -0.202781
+v -0.329845 0.712932 -0.159268
+v -0.39942 0.710994 -0.155057
+v -0.178779 0.718135 -0.16643
+v -0.252112 0.713887 -0.166078
+v -0.106741 0.782082 -0.178969
+v -0.0602661 0.797425 -0.177797
+v -0.0705006 0.811753 -0.205807
+v -0.0845451 0.798015 -0.188581
+v -0.128352 0.75368 -0.16956
+v -0.140544 0.766364 -0.181936
+v -0.0771688 0.744134 -0.128582
+v -0.122404 0.729951 -0.151266
+v -0.0359638 0.781745 -0.13792
+v -0.0372542 0.755486 -0.106376
+v -0.0258744 0.814456 -0.19275
+v -0.0313039 0.800824 -0.16616
+v -0.0438017 0.818098 -0.377412
+v -0.0647564 0.824008 -0.337021
+v -0.0269807 0.832763 -0.306956
+v -0.0352238 0.828719 -0.341387
+v -0.0968568 0.789376 -0.411772
+v -0.0494105 0.797681 -0.413491
+v -0.176513 0.760515 -0.410655
+v -0.134148 0.794627 -0.38492
+v -0.140375 0.776566 -0.412207
+v -0.105446 0.812324 -0.352131
+v -0.163948 0.787613 -0.366372
+v -0.134862 0.801108 -0.364702
+v -0.0673729 0.8251 -0.299446
+v -0.0829065 0.820245 -0.328154
+v -0.0209585 0.83068 -0.246717
+v -0.0431316 0.822381 -0.222498
+v -0.0222752 0.824379 -0.219341
+v -0.0427143 0.830406 -0.274631
+v -0.0223763 0.833445 -0.275661
+v -0.0571055 0.825864 -0.248034
+v -0.0589761 0.827121 -0.27268
+v -0.0611506 0.820889 -0.225684
+v -0.220893 0.723503 -0.390265
+v -0.192333 0.761236 -0.378584
+v -0.20182 0.742487 -0.402976
+v -0.229147 0.73046 -0.348619
+v -0.238328 0.704586 -0.373617
+v -0.202503 0.759013 -0.342238
+v -0.217964 0.748268 -0.323878
+v -0.185497 0.772855 -0.357643
+v -0.208154 0.830042 -0.298948
+v -0.20855 0.866089 -0.317157
+v -0.182638 0.844163 -0.332751
+v -0.197796 0.836731 -0.320106
+v -0.207473 0.823979 -0.249259
+v -0.22169 0.856724 -0.273536
+v -0.212209 0.824662 -0.273398
+v -0.225435 0.891324 -0.293612
+v -0.221713 0.885753 -0.253374
+v -0.227222 0.886884 -0.273083
+v -0.202088 0.898609 -0.32219
+v -0.216036 0.894273 -0.311279
+v -0.144889 0.85895 -0.323167
+v -0.140188 0.893495 -0.304854
+v -0.113654 0.869597 -0.287244
+v -0.127225 0.865094 -0.306986
+v -0.17602 0.88052 -0.329499
+v -0.164181 0.851772 -0.332764
+v -0.170407 0.915574 -0.314537
+v -0.186655 0.907214 -0.322451
+v -0.141278 0.921251 -0.283964
+v -0.154011 0.919177 -0.300925
+v -0.106278 0.87162 -0.248059
+v -0.126235 0.896792 -0.232757
+v -0.125921 0.862724 -0.222037
+v -0.112672 0.868658 -0.232446
+v -0.120264 0.900007 -0.266157
+v -0.106638 0.871852 -0.266963
+v -0.136706 0.926532 -0.249134
+v -0.136018 0.925025 -0.266007
+v -0.152825 0.914652 -0.226568
+v -0.141818 0.921807 -0.235425
+v -0.169297 0.842653 -0.222278
+v -0.201725 0.863227 -0.234578
+v -0.191458 0.831381 -0.232334
+v -0.158724 0.882351 -0.220155
+v -0.146124 0.853579 -0.218747
+v -0.191735 0.902282 -0.228209
+v -0.171199 0.908866 -0.224253
+v -0.209229 0.892731 -0.238166
+v -0.228365 -0.446181 0.420977
+v -0.20511 -0.461837 0.475502
+v -0.183641 -0.455489 0.534379
+v -0.21326 -0.450303 0.478539
+v -0.176881 -0.48852 0.469991
+v -0.17086 -0.474631 0.521789
+v -0.206663 -0.472505 0.418626
+v -0.194975 -0.474631 0.472428
+v -0.109552 -0.472057 0.595485
+v -0.142559 -0.466669 0.57469
+v -0.132033 -0.481167 0.556781
+v -0.140849 -0.473458 0.563316
+v -0.102981 -0.465723 0.632974
+v -0.143787 -0.462243 0.587914
+v -0.0755892 -0.465387 0.639018
+v -0.0439995 -0.453929 0.682063
+v -0.0705061 -0.461083 0.663389
+v -0.0917758 -0.479034 0.591505
+v -0.0549351 -0.471091 0.629642
+v -0.0815781 -0.471375 0.616771
+v -0.0238268 -0.46053 0.664171
+v -0.0329053 -0.483094 0.604968
+v -0.0276324 -0.470883 0.636565
+v -0.0210984 -0.44987 0.691901
+v -0.0317646 -0.445352 0.134276
+v -0.065741 -0.440111 0.119726
+v -0.0323291 -0.440606 0.100743
+v -0.034368 -0.461296 0.211229
+v -0.0636123 -0.450028 0.182161
+v -0.0320458 -0.45108 0.17059
+v -0.124102 -0.450719 0.228684
+v -0.0965198 -0.444834 0.175662
+v -0.0981278 -0.457279 0.230677
+v -0.0942201 -0.449501 0.201118
+v -0.138015 -0.443895 0.192281
+v -0.101319 -0.442167 0.150389
+v -0.163536 -0.447685 0.255959
+v -0.205661 -0.440923 0.30331
+v -0.174785 -0.442414 0.24495
+v -0.138385 -0.462583 0.271465
+v -0.179537 -0.460826 0.31166
+v -0.153491 -0.454897 0.266081
+v -0.213095 -0.454425 0.36305
+v -0.178344 -0.481123 0.364052
+v -0.199389 -0.4673 0.363772
+v -0.224677 -0.442616 0.362277
+v -0.0774159 -0.49456 0.56248
+v -0.0472884 -0.515301 0.519274
+v -0.0400356 -0.499329 0.565268
+v -0.12906 -0.497175 0.517019
+v -0.109485 -0.488354 0.558077
+v -0.151776 -0.503865 0.416665
+v -0.102374 -0.516827 0.468605
+v -0.144853 -0.503338 0.468859
+v -0.0552219 -0.529321 0.415644
+v -0.0529288 -0.526727 0.468796
+v -0.101727 -0.509066 0.362546
+v -0.046641 -0.500432 0.308431
+v -0.0524328 -0.518796 0.361628
+v -0.12954 -0.482953 0.312862
+v -0.144743 -0.495777 0.363499
+v -0.0778956 -0.474931 0.261875
+v -0.111951 -0.469275 0.268257
+v -0.0399262 -0.479506 0.257737
+v -0.164631 -0.0504489 0.951176
+v -0.139042 -0.0451633 0.963659
+v -0.137897 -0.0298265 0.961061
+v -0.16267 -0.0297463 0.947597
+v -0.150007 -0.038161 0.957034
+v -0.109851 -0.0516794 0.976283
+v -0.122462 -0.037914 0.969963
+v -0.133714 -0.0642499 0.965416
+v -0.125245 -0.0487781 0.969619
+v -0.130968 -0.0135459 0.956851
+v -0.109331 -0.0304408 0.974293
+v -0.124172 -0.0273895 0.966259
+v -0.18873 -0.0434748 0.935764
+v -0.213918 -0.078022 0.922841
+v -0.172782 -0.0812671 0.947651
+v -0.185903 -0.064746 0.940486
+v -0.24743 -0.125194 0.898745
+v -0.247517 -0.0516572 0.891815
+v -0.248772 -0.0872467 0.896118
+v -0.209867 -0.161573 0.925244
+v -0.2493 -0.167014 0.898651
+v -0.133001 -0.161124 0.956956
+v -0.163882 -0.120989 0.949271
+v -0.170754 -0.160591 0.943861
+v -0.119758 -0.0911109 0.968067
+v -0.149065 -0.0885975 0.957545
+v -0.165953 -0.00373407 0.934179
+v -0.206641 -0.0168625 0.913183
+v -0.181563 -0.0218941 0.9332
+v -0.116698 0.0146029 0.948102
+v -0.155037 0.0342159 0.918365
+v -0.143447 0.00727528 0.940085
+v -0.188508 0.055577 0.881861
+v -0.128713 0.0776757 0.9026
+v -0.159167 0.0664178 0.894094
+v -0.225597 0.0165132 0.878354
+v -0.216542 0.0481267 0.863247
+v -0.237857 -0.0169093 0.886885
+v -0.0314129 0.062618 0.944549
+v -0.0591053 0.0273947 0.96148
+v -0.0272441 0.00174637 0.985368
+v -0.0296124 0.0304081 0.965777
+v -0.0652762 0.0925539 0.91594
+v -0.032753 0.0962353 0.919852
+v -0.0939443 0.0532189 0.93429
+v -0.0973395 0.0863776 0.910036
+v -0.0802969 -0.0053962 0.974961
+v -0.0883591 0.0217178 0.955411
+v -0.0313918 -0.121887 0.983992
+v -0.0640997 -0.158632 0.974036
+v -0.0317497 -0.157085 0.978196
+v -0.0273784 -0.0760818 0.993721
+v -0.05977 -0.0942344 0.98419
+v -0.0298425 -0.0936976 0.988793
+v -0.0956523 -0.123707 0.972085
+v -0.0810019 -0.0733797 0.982855
+v -0.0898671 -0.0931811 0.977114
+v -0.0976501 -0.160225 0.966985
+v -0.0864529 -0.0640166 0.98579
+v -0.0789269 -0.06284 0.994959
+v -0.0955999 -0.0476973 0.985969
+v -0.0940951 -0.0565412 0.98502
+v -0.0525276 -0.0775374 1.01483
+v -0.0666347 -0.0644079 1.00864
+v -0.0598989 -0.0753391 1.00401
+v -0.0652946 -0.0700523 1.00523
+v -0.0598411 -0.0536198 1.01768
+v -0.0788425 -0.0488271 1.00064
+v -0.0525009 -0.0662129 1.0231
+v -0.0652608 -0.0582549 1.01251
+v -0.0861068 -0.0306433 0.989153
+v -0.0938769 -0.0385712 0.986828
+v -0.0582598 -0.0403976 1.0119
+v -0.0476184 -0.0223971 1.0006
+v -0.0694706 -0.025394 0.994502
+v -0.0426567 -0.0627588 1.03251
+v -0.0344846 -0.0534302 1.03232
+v -0.0490439 -0.0525287 1.02501
+v -0.0209047 -0.039677 1.02317
+v -0.0152462 -0.0676801 1.04602
+v -0.0177789 -0.0547726 1.03745
+v -0.0242006 -0.0212264 1.00515
+v -0.0209476 -0.0779737 1.00716
+v -0.0477856 -0.0718253 0.995571
+v -0.0242757 -0.0726037 0.999895
+v -0.0152667 -0.0940994 1.02438
+v -0.0345351 -0.0845283 1.01121
+v -0.0178069 -0.0869021 1.01538
+v -0.0583871 -0.073576 0.998233
+v -0.0427012 -0.0854758 1.01505
+v -0.0491057 -0.0804193 1.00671
+v -0.0697639 -0.0690371 0.990088
+v -0.0394909 -0.0796254 1.02906
+v -0.0271444 -0.0763309 1.04139
+v -0.0399318 -0.0720589 1.03309
+v -0.0271637 -0.0905263 1.02914
+v -0.0399551 -0.0846544 1.02277
+v -0.0132272 -0.0889316 1.04223
+v -0.01374 -0.0942759 1.03402
+v -0.0137291 -0.0795664 1.04692
+vt 0.800375 0.667457
+vt 0.789584 0.668215
+vt 0.799923 0.663933
+vt 0.789057 0.664897
+vt 0.811842 0.670848
+vt 0.80103 0.671718
+vt 0.811217 0.666381
+vt 0.810346 0.655189
+vt 0.810548 0.658683
+vt 0.799442 0.658809
+vt 0.799641 0.661178
+vt 0.788653 0.662549
+vt 0.821717 0.660492
+vt 0.821466 0.655998
+vt 0.832622 0.658487
+vt 0.832389 0.653218
+vt 0.821251 0.651569
+vt 0.822539 0.669852
+vt 0.822041 0.665116
+vt 0.832855 0.663756
+vt 0.768389 0.668485
+vt 0.778915 0.668524
+vt 0.769427 0.673433
+vt 0.779732 0.673035
+vt 0.778165 0.664791
+vt 0.74577 0.661865
+vt 0.756532 0.663039
+vt 0.747747 0.667793
+vt 0.758027 0.6682
+vt 0.759367 0.673605
+vt 0.753496 0.653114
+vt 0.765177 0.656219
+vt 0.755019 0.658037
+vt 0.76625 0.660042
+vt 0.743793 0.655937
+vt 0.777492 0.661858
+vt 0.776857 0.659324
+vt 0.860743 0.66105
+vt 0.85437 0.658994
+vt 0.861885 0.655612
+vt 0.85471 0.654338
+vt 0.858253 0.665619
+vt 0.86356 0.668573
+vt 0.867109 0.663417
+vt 0.870052 0.650753
+vt 0.862482 0.650015
+vt 0.870421 0.644717
+vt 0.862589 0.644612
+vt 0.85505 0.649682
+vt 0.883125 0.662497
+vt 0.876218 0.659429
+vt 0.885992 0.655087
+vt 0.877897 0.652301
+vt 0.878895 0.645534
+vt 0.869196 0.672347
+vt 0.87346 0.666407
+vt 0.879453 0.669777
+vt 0.889305 0.677736
+vt 0.893116 0.683944
+vt 0.881403 0.682863
+vt 0.884277 0.688027
+vt 0.896144 0.692562
+vt 0.90593 0.689794
+vt 0.901798 0.679591
+vt 0.889995 0.666526
+vt 0.884742 0.673283
+vt 0.878268 0.679148
+vt 0.902596 0.665971
+vt 0.89431 0.659538
+vt 0.908133 0.659549
+vt 0.89794 0.652608
+vt 0.916609 0.686107
+vt 0.910596 0.674701
+vt 0.918591 0.668435
+vt 0.713434 0.678889
+vt 0.718232 0.673865
+vt 0.719971 0.68226
+vt 0.723135 0.677582
+vt 0.720227 0.663887
+vt 0.722594 0.669482
+vt 0.714094 0.668877
+vt 0.690854 0.689174
+vt 0.699881 0.681004
+vt 0.701094 0.693026
+vt 0.707762 0.685198
+vt 0.716169 0.688193
+vt 0.697269 0.662673
+vt 0.702107 0.668949
+vt 0.686646 0.668347
+vt 0.692886 0.67536
+vt 0.6822 0.683174
+vt 0.710252 0.663404
+vt 0.717861 0.658291
+vt 0.706788 0.657767
+vt 0.858365 0.672357
+vt 0.853611 0.668611
+vt 0.843559 0.666338
+vt 0.848795 0.664994
+vt 0.847566 0.670615
+vt 0.855227 0.679121
+vt 0.862995 0.676364
+vt 0.858343 0.683022
+vt 0.867102 0.680206
+vt 0.834415 0.678265
+vt 0.843484 0.67676
+vt 0.836434 0.683129
+vt 0.846261 0.68128
+vt 0.848914 0.685681
+vt 0.840865 0.672225
+vt 0.838324 0.667682
+vt 0.832962 0.673545
+vt 0.812703 0.675707
+vt 0.801925 0.676679
+vt 0.791163 0.677558
+vt 0.792268 0.683394
+vt 0.803066 0.682261
+vt 0.82446 0.679693
+vt 0.823312 0.674685
+vt 0.815369 0.686723
+vt 0.826081 0.684957
+vt 0.817113 0.692704
+vt 0.827981 0.690382
+vt 0.79352 0.689825
+vt 0.804461 0.688386
+vt 0.806115 0.694921
+vt 0.798864 0.71184
+vt 0.810186 0.708981
+vt 0.801046 0.72017
+vt 0.812549 0.716812
+vt 0.823667 0.712859
+vt 0.826337 0.721153
+vt 0.81509 0.725328
+vt 0.819076 0.698906
+vt 0.808032 0.701735
+vt 0.796857 0.704106
+vt 0.829963 0.69587
+vt 0.840668 0.692798
+vt 0.832092 0.701818
+vt 0.842717 0.698135
+vt 0.834432 0.708622
+vt 0.837107 0.716573
+vt 0.844937 0.704332
+vt 0.867471 0.703655
+vt 0.86533 0.696176
+vt 0.877095 0.699305
+vt 0.875007 0.692109
+vt 0.872828 0.687102
+vt 0.853212 0.694487
+vt 0.855272 0.700217
+vt 0.857607 0.707609
+vt 0.851162 0.689863
+vt 0.861137 0.686828
+vt 0.870286 0.68346
+vt 0.737342 0.674399
+vt 0.736161 0.679693
+vt 0.731151 0.674738
+vt 0.729268 0.679242
+vt 0.727195 0.684244
+vt 0.743049 0.685744
+vt 0.743345 0.679784
+vt 0.751789 0.685777
+vt 0.751276 0.679628
+vt 0.743533 0.67406
+vt 0.73204 0.698591
+vt 0.733516 0.691513
+vt 0.741945 0.699272
+vt 0.742539 0.692176
+vt 0.751932 0.692359
+vt 0.72474 0.690242
+vt 0.722029 0.697328
+vt 0.716708 0.714828
+vt 0.715092 0.72481
+vt 0.702617 0.714132
+vt 0.700538 0.724407
+vt 0.728191 0.725265
+vt 0.727285 0.735883
+vt 0.713954 0.735509
+vt 0.730575 0.706575
+vt 0.719186 0.705595
+vt 0.70688 0.704276
+vt 0.740902 0.715992
+vt 0.741399 0.707225
+vt 0.75229 0.716217
+vt 0.752093 0.707508
+vt 0.740453 0.725533
+vt 0.740092 0.735897
+vt 0.752425 0.725529
+vt 0.789126 0.722623
+vt 0.790991 0.731868
+vt 0.776943 0.72426
+vt 0.778298 0.733889
+vt 0.787299 0.713921
+vt 0.76386 0.71596
+vt 0.764649 0.725167
+vt 0.765442 0.735096
+vt 0.773153 0.698992
+vt 0.774288 0.706826
+vt 0.762394 0.699466
+vt 0.763091 0.707391
+vt 0.785576 0.705767
+vt 0.784024 0.698116
+vt 0.781583 0.684283
+vt 0.780606 0.678305
+vt 0.77033 0.678903
+vt 0.772112 0.691699
+vt 0.782707 0.69092
+vt 0.761214 0.685501
+vt 0.761821 0.692189
+vt 0.760412 0.679339
+vt 0.899727 0.753469
+vt 0.893091 0.747999
+vt 0.905106 0.743418
+vt 0.897762 0.739947
+vt 0.887783 0.741206
+vt 0.880731 0.745814
+vt 0.886161 0.754214
+vt 0.904716 0.775474
+vt 0.898651 0.768079
+vt 0.914323 0.765617
+vt 0.907202 0.758849
+vt 0.913707 0.74723
+vt 0.877978 0.781668
+vt 0.872582 0.772759
+vt 0.888929 0.775502
+vt 0.883177 0.767576
+vt 0.894526 0.783756
+vt 0.872228 0.750111
+vt 0.877492 0.759308
+vt 0.867387 0.763696
+vt 0.843778 0.771729
+vt 0.83028 0.775596
+vt 0.839713 0.76143
+vt 0.826842 0.765165
+vt 0.83386 0.786091
+vt 0.815809 0.779609
+vt 0.818643 0.790465
+vt 0.860927 0.77752
+vt 0.856148 0.767799
+vt 0.851602 0.757722
+vt 0.852231 0.792074
+vt 0.865736 0.787127
+vt 0.856409 0.802551
+vt 0.870501 0.796949
+vt 0.837491 0.796705
+vt 0.821654 0.801351
+vt 0.841215 0.807639
+vt 0.724356 0.780932
+vt 0.721897 0.791331
+vt 0.712133 0.77893
+vt 0.708432 0.787628
+vt 0.688991 0.788336
+vt 0.696924 0.781352
+vt 0.7032 0.796344
+vt 0.734947 0.804179
+vt 0.736823 0.793158
+vt 0.751835 0.804371
+vt 0.752608 0.793134
+vt 0.738282 0.782037
+vt 0.732844 0.815397
+vt 0.730608 0.827229
+vt 0.715066 0.812163
+vt 0.711339 0.823597
+vt 0.751007 0.816099
+vt 0.681689 0.797702
+vt 0.697958 0.806263
+vt 0.692594 0.817566
+vt 0.802671 0.795049
+vt 0.800517 0.783977
+vt 0.798447 0.773092
+vt 0.783398 0.776846
+vt 0.784699 0.788076
+vt 0.788691 0.822864
+vt 0.787323 0.810943
+vt 0.807157 0.818048
+vt 0.804858 0.806345
+vt 0.768947 0.80275
+vt 0.769269 0.814405
+vt 0.769629 0.82649
+vt 0.768262 0.779889
+vt 0.768648 0.791284
+vt 0.829319 0.730383
+vt 0.817782 0.734631
+vt 0.807922 0.748352
+vt 0.80562 0.738467
+vt 0.820629 0.744508
+vt 0.843731 0.736281
+vt 0.840238 0.725959
+vt 0.854152 0.732184
+vt 0.850604 0.721518
+vt 0.847491 0.747042
+vt 0.836031 0.750919
+vt 0.858049 0.743181
+vt 0.823637 0.754744
+vt 0.810425 0.758547
+vt 0.739591 0.758816
+vt 0.739858 0.747133
+vt 0.753132 0.758368
+vt 0.752889 0.746823
+vt 0.726681 0.747058
+vt 0.739135 0.77052
+vt 0.725558 0.770057
+vt 0.753262 0.7701
+vt 0.69786 0.758579
+vt 0.712375 0.758296
+vt 0.700367 0.76912
+vt 0.712782 0.769064
+vt 0.712907 0.746891
+vt 0.698005 0.747078
+vt 0.929639 0.699595
+vt 0.928443 0.716654
+vt 0.918934 0.700571
+vt 0.918764 0.716584
+vt 0.908376 0.702434
+vt 0.917291 0.732639
+vt 0.908091 0.730817
+vt 0.926839 0.733706
+vt 0.899583 0.717133
+vt 0.899655 0.729345
+vt 0.890467 0.718513
+vt 0.891789 0.729325
+vt 0.898356 0.704249
+vt 0.888732 0.70646
+vt 0.766981 0.757058
+vt 0.767683 0.768445
+vt 0.782175 0.765786
+vt 0.779622 0.744124
+vt 0.766227 0.745834
+vt 0.794659 0.751851
+vt 0.792827 0.741649
+vt 0.796513 0.762357
+vt 0.870023 0.713177
+vt 0.860485 0.717218
+vt 0.863872 0.728122
+vt 0.881776 0.720957
+vt 0.879363 0.709514
+vt 0.876505 0.735646
+vt 0.884297 0.731857
+vt 0.867737 0.739401
+vt 0.942885 0.685053
+vt 0.953367 0.687709
+vt 0.940102 0.70044
+vt 0.950147 0.702062
+vt 0.938165 0.716742
+vt 0.968754 0.704629
+vt 0.964644 0.717556
+vt 0.959599 0.703413
+vt 0.956139 0.717201
+vt 0.963128 0.689798
+vt 0.953145 0.730828
+vt 0.951463 0.743182
+vt 0.945243 0.731521
+vt 0.944286 0.744982
+vt 0.960533 0.730482
+vt 0.936315 0.732906
+vt 0.935707 0.747908
+vt 0.168776 0.186925
+vt 0.153188 0.186399
+vt 0.169355 0.173108
+vt 0.15372 0.172537
+vt 0.137511 0.174005
+vt 0.154886 0.15836
+vt 0.139434 0.160894
+vt 0.135878 0.200202
+vt 0.153921 0.199631
+vt 0.13741 0.213278
+vt 0.156324 0.212569
+vt 0.169774 0.199674
+vt 0.115769 0.200908
+vt 0.0946273 0.201684
+vt 0.11798 0.188318
+vt 0.0992616 0.189776
+vt 0.115489 0.212947
+vt 0.120823 0.175844
+vt 0.123982 0.163427
+vt 0.103896 0.177868
+vt 0.197705 0.18287
+vt 0.184318 0.177386
+vt 0.204286 0.174321
+vt 0.191177 0.166862
+vt 0.182759 0.189357
+vt 0.218051 0.196712
+vt 0.206672 0.195261
+vt 0.218754 0.189245
+vt 0.208611 0.187062
+vt 0.213838 0.179979
+vt 0.195322 0.2024
+vt 0.206572 0.20383
+vt 0.197221 0.212029
+vt 0.207879 0.212331
+vt 0.217851 0.204542
+vt 0.183316 0.200806
+vt 0.185616 0.211745
+vt 0.200177 0.221191
+vt 0.189289 0.222187
+vt 0.176497 0.223558
+vt 0.183072 0.235218
+vt 0.194814 0.232099
+vt 0.222696 0.225794
+vt 0.213531 0.227462
+vt 0.220244 0.219432
+vt 0.210159 0.220329
+vt 0.219048 0.242324
+vt 0.210793 0.236729
+vt 0.224444 0.237674
+vt 0.218111 0.23337
+vt 0.225701 0.231231
+vt 0.192704 0.246651
+vt 0.20267 0.241446
+vt 0.21291 0.248905
+vt 0.147552 0.942756
+vt 0.13896 0.937482
+vt 0.15442 0.936036
+vt 0.145004 0.930904
+vt 0.131991 0.943023
+vt 0.122815 0.936756
+vt 0.130641 0.931218
+vt 0.148962 0.952945
+vt 0.155644 0.947301
+vt 0.157149 0.955791
+vt 0.164222 0.950166
+vt 0.162965 0.941085
+vt 0.125551 0.956525
+vt 0.133402 0.953212
+vt 0.134656 0.9604
+vt 0.141677 0.957166
+vt 0.14902 0.960085
+vt 0.115375 0.942445
+vt 0.124894 0.947972
+vt 0.116632 0.951525
+vt 0.871579 0.942636
+vt 0.878596 0.946725
+vt 0.86416 0.948207
+vt 0.872055 0.95214
+vt 0.886187 0.942108
+vt 0.885478 0.951547
+vt 0.893652 0.946846
+vt 0.872605 0.934139
+vt 0.865133 0.939046
+vt 0.865671 0.931509
+vt 0.858017 0.93669
+vt 0.857275 0.944342
+vt 0.895145 0.930139
+vt 0.887213 0.93361
+vt 0.887365 0.926738
+vt 0.880339 0.930202
+vt 0.873942 0.927331
+vt 0.901581 0.942011
+vt 0.893918 0.937827
+vt 0.902324 0.934359
+vt 0.937778 0.969427
+vt 0.924742 0.963135
+vt 0.963417 0.953814
+vt 0.94277 0.951002
+vt 0.929636 0.938971
+vt 0.950985 0.939207
+vt 0.924587 0.948175
+vt 0.897606 0.964725
+vt 0.904637 0.974329
+vt 0.880983 0.968761
+vt 0.883219 0.980673
+vt 0.91214 0.985041
+vt 0.902437 0.951808
+vt 0.891517 0.95734
+vt 0.87954 0.960235
+vt 0.911331 0.94532
+vt 0.915423 0.938551
+vt 0.837802 0.927994
+vt 0.819804 0.940196
+vt 0.826204 0.927352
+vt 0.800609 0.943494
+vt 0.811605 0.952025
+vt 0.831685 0.94681
+vt 0.836697 0.937574
+vt 0.857877 0.916732
+vt 0.87928 0.910354
+vt 0.863614 0.921492
+vt 0.880267 0.917735
+vt 0.851798 0.911211
+vt 0.857703 0.929735
+vt 0.868668 0.924726
+vt 0.880724 0.922242
+vt 0.845039 0.94326
+vt 0.848987 0.936302
+vt 0.924475 0.930625
+vt 0.941648 0.929031
+vt 0.96114 0.927417
+vt 0.933224 0.916635
+vt 0.922496 0.92029
+vt 0.903495 0.926861
+vt 0.911938 0.93218
+vt 0.897103 0.91889
+vt 0.892712 0.923251
+vt 0.905308 0.905852
+vt 0.901263 0.912802
+vt 0.820926 0.962167
+vt 0.836809 0.955124
+vt 0.840048 0.970839
+vt 0.802886 0.96989
+vt 0.830758 0.980145
+vt 0.856645 0.954682
+vt 0.848379 0.949442
+vt 0.864116 0.967327
+vt 0.867473 0.958814
+vt 0.86125 0.978259
+vt 0.85863 0.990399
+vt 0.191842 0.938528
+vt 0.18483 0.928669
+vt 0.214023 0.932819
+vt 0.202519 0.920832
+vt 0.190621 0.899112
+vt 0.222723 0.912315
+vt 0.180131 0.909732
+vt 0.161958 0.928403
+vt 0.17217 0.935147
+vt 0.177196 0.942628
+vt 0.133862 0.910183
+vt 0.152639 0.912726
+vt 0.135915 0.920575
+vt 0.149326 0.922843
+vt 0.155653 0.899975
+vt 0.158518 0.885909
+vt 0.131 0.896051
+vt 0.0727151 0.905945
+vt 0.0872616 0.914579
+vt 0.045865 0.922565
+vt 0.0688679 0.927864
+vt 0.0891699 0.932953
+vt 0.0611106 0.940896
+vt 0.0850445 0.943365
+vt 0.116168 0.914322
+vt 0.108088 0.902447
+vt 0.0995652 0.889326
+vt 0.112481 0.9301
+vt 0.123364 0.923705
+vt 0.10407 0.937626
+vt 0.101008 0.945485
+vt 0.141749 0.967835
+vt 0.159735 0.964585
+vt 0.141615 0.973986
+vt 0.164947 0.968203
+vt 0.198452 0.956803
+vt 0.170264 0.972446
+vt 0.186612 0.957297
+vt 0.165935 0.957311
+vt 0.154732 0.962219
+vt 0.141924 0.964387
+vt 0.187555 0.948567
+vt 0.17427 0.950399
+vt 0.205846 0.945238
+vt 0.226639 0.94116
+vt 0.0918948 0.952851
+vt 0.072194 0.95227
+vt 0.0497805 0.951411
+vt 0.080546 0.963637
+vt 0.0937428 0.962144
+vt 0.116457 0.959008
+vt 0.10617 0.952878
+vt 0.123264 0.966181
+vt 0.12877 0.963081
+vt 0.111312 0.975863
+vt 0.117382 0.970674
+vt 0.168188 0.257701
+vt 0.179853 0.251475
+vt 0.181861 0.270696
+vt 0.19145 0.264485
+vt 0.168322 0.238524
+vt 0.157989 0.278114
+vt 0.139623 0.26474
+vt 0.161775 0.267111
+vt 0.143271 0.254169
+vt 0.176773 0.279775
+vt 0.122129 0.24267
+vt 0.0996677 0.231893
+vt 0.121641 0.233595
+vt 0.0964428 0.225792
+vt 0.121258 0.251367
+vt 0.141637 0.226246
+vt 0.118437 0.223765
+vt 0.0932179 0.219692
+vt 0.160802 0.225544
+vt 0.0419152 0.53618
+vt 0.0472563 0.523503
+vt 0.0332681 0.539408
+vt 0.038828 0.522754
+vt 0.0547796 0.5128
+vt 0.0629257 0.517019
+vt 0.0550589 0.52441
+vt 0.0478604 0.554178
+vt 0.0476885 0.543737
+vt 0.0376346 0.561691
+vt 0.0380519 0.549348
+vt 0.0277081 0.556063
+vt 0.0662616 0.533766
+vt 0.0581 0.532485
+vt 0.0632249 0.540131
+vt 0.0559107 0.540335
+vt 0.055828 0.548702
+vt 0.0616102 0.525629
+vt 0.0679204 0.520553
+vt 0.0687181 0.527585
+vt 0.0509533 0.580146
+vt 0.0426311 0.57189
+vt 0.0442882 0.588789
+vt 0.0332182 0.580753
+vt 0.0513392 0.563763
+vt 0.0757862 0.577556
+vt 0.0655669 0.57722
+vt 0.0718228 0.588438
+vt 0.060513 0.586655
+vt 0.0553583 0.596825
+vt 0.0637107 0.564356
+vt 0.0704192 0.569255
+vt 0.070071 0.55722
+vt 0.0760258 0.562179
+vt 0.0789464 0.569999
+vt 0.0586375 0.557107
+vt 0.0648092 0.550898
+vt 0.0904211 0.548013
+vt 0.0833422 0.555408
+vt 0.0957205 0.55254
+vt 0.0881221 0.560358
+vt 0.0767944 0.549978
+vt 0.090315 0.526886
+vt 0.0877993 0.53464
+vt 0.0982366 0.53011
+vt 0.0953147 0.539062
+vt 0.100791 0.542278
+vt 0.0746039 0.537227
+vt 0.0781908 0.530701
+vt 0.0798442 0.524364
+vt 0.0701373 0.544116
+vt 0.925171 0.815649
+vt 0.920243 0.805051
+vt 0.934775 0.80669
+vt 0.929739 0.796503
+vt 0.93322 0.778201
+vt 0.938087 0.788562
+vt 0.924761 0.786096
+vt 0.904862 0.803051
+vt 0.910168 0.813683
+vt 0.893079 0.810992
+vt 0.898686 0.821929
+vt 0.915568 0.824608
+vt 0.910263 0.784583
+vt 0.899742 0.793007
+vt 0.888113 0.80057
+vt 0.919899 0.77525
+vt 0.928896 0.766724
+vt 0.815787 0.855921
+vt 0.819336 0.868912
+vt 0.793818 0.862058
+vt 0.795798 0.875795
+vt 0.831679 0.83698
+vt 0.836161 0.849417
+vt 0.812478 0.843031
+vt 0.770591 0.852857
+vt 0.771369 0.867508
+vt 0.748599 0.854921
+vt 0.748212 0.870509
+vt 0.772261 0.882678
+vt 0.790259 0.835472
+vt 0.77004 0.839246
+vt 0.749297 0.841006
+vt 0.80965 0.830342
+vt 0.827993 0.824626
+vt 0.724119 0.869298
+vt 0.722193 0.885121
+vt 0.69961 0.865356
+vt 0.695663 0.88068
+vt 0.726136 0.854086
+vt 0.681189 0.844692
+vt 0.675206 0.860163
+vt 0.659402 0.836491
+vt 0.651382 0.852648
+vt 0.669133 0.87624
+vt 0.686995 0.830434
+vt 0.707465 0.836555
+vt 0.667117 0.821916
+vt 0.728335 0.840094
+vt 0.844791 0.630193
+vt 0.844724 0.634228
+vt 0.843982 0.630547
+vt 0.842478 0.634715
+vt 0.837664 0.639425
+vt 0.840419 0.635163
+vt 0.840973 0.638882
+vt 0.848717 0.637567
+vt 0.847346 0.633664
+vt 0.854083 0.636646
+vt 0.851791 0.632782
+vt 0.845599 0.629839
+vt 0.849337 0.641623
+vt 0.849581 0.645756
+vt 0.844215 0.642375
+vt 0.843773 0.646487
+vt 0.855223 0.640762
+vt 0.839469 0.64305
+vt 0.83491 0.643687
+vt 0.837964 0.647218
+vt 0.725656 0.643083
+vt 0.719643 0.64232
+vt 0.724939 0.640251
+vt 0.71996 0.638782
+vt 0.711679 0.641543
+vt 0.711659 0.645946
+vt 0.719749 0.645744
+vt 0.738356 0.646598
+vt 0.732577 0.6462
+vt 0.736627 0.644893
+vt 0.731247 0.643959
+vt 0.729918 0.641719
+vt 0.727515 0.648634
+vt 0.733906 0.64844
+vt 0.728655 0.651352
+vt 0.735235 0.65068
+vt 0.740086 0.648303
+vt 0.712958 0.649959
+vt 0.7207 0.648941
+vt 0.722074 0.652024
+vt 0.884966 0.82932
+vt 0.890192 0.840682
+vt 0.86976 0.836188
+vt 0.87442 0.847798
+vt 0.879899 0.818138
+vt 0.849222 0.830896
+vt 0.853822 0.842863
+vt 0.858647 0.854913
+vt 0.860816 0.813432
+vt 0.845073 0.819095
+vt 0.87515 0.807317
+vt 0.953522 0.784398
+vt 0.950371 0.775653
+vt 0.958094 0.777732
+vt 0.955796 0.769832
+vt 0.959544 0.757237
+vt 0.961105 0.764151
+vt 0.953683 0.761713
+vt 0.940794 0.771842
+vt 0.944716 0.781755
+vt 0.94895 0.791065
+vt 0.945308 0.756461
+vt 0.937496 0.760723
+vt 0.957984 0.750323
+vt 0.951941 0.753153
+vt 0.237985 0.738297
+vt 0.268705 0.737229
+vt 0.241633 0.762787
+vt 0.268705 0.761697
+vt 0.200475 0.715676
+vt 0.234693 0.711537
+vt 0.207029 0.741466
+vt 0.222333 0.788147
+vt 0.245519 0.78554
+vt 0.230341 0.810102
+vt 0.249523 0.807426
+vt 0.268705 0.784364
+vt 0.159946 0.773803
+vt 0.187254 0.769363
+vt 0.175961 0.794629
+vt 0.199147 0.791261
+vt 0.211159 0.812779
+vt 0.165843 0.72393
+vt 0.175598 0.746705
+vt 0.14393 0.752977
+vt 0.253345 0.280308
+vt 0.268706 0.280765
+vt 0.253464 0.295744
+vt 0.268576 0.296249
+vt 0.238317 0.265747
+vt 0.253558 0.266767
+vt 0.238168 0.279086
+vt 0.239577 0.31028
+vt 0.253859 0.311911
+vt 0.240382 0.325349
+vt 0.254334 0.327593
+vt 0.268615 0.312525
+vt 0.21323 0.291052
+vt 0.225134 0.292637
+vt 0.214544 0.305067
+vt 0.22632 0.307825
+vt 0.227134 0.322083
+vt 0.223441 0.277203
+vt 0.223004 0.263413
+vt 0.210121 0.276206
+vt 0.268934 0.241592
+vt 0.268768 0.248379
+vt 0.255927 0.241503
+vt 0.254998 0.248112
+vt 0.243677 0.241038
+vt 0.256594 0.23557
+vt 0.244966 0.235422
+vt 0.239676 0.25526
+vt 0.254158 0.256284
+vt 0.268799 0.256538
+vt 0.225586 0.253155
+vt 0.229504 0.24562
+vt 0.233074 0.239996
+vt 0.235164 0.235206
+vt 0.672084 0.639787
+vt 0.66845 0.628475
+vt 0.686608 0.639919
+vt 0.68396 0.631421
+vt 0.660815 0.654442
+vt 0.655577 0.64149
+vt 0.676137 0.650509
+vt 0.700237 0.640623
+vt 0.701506 0.646666
+vt 0.69947 0.634368
+vt 0.693146 0.655714
+vt 0.703781 0.652284
+vt 0.681027 0.660046
+vt 0.66725 0.665722
+vt 0.869213 0.629853
+vt 0.859507 0.631337
+vt 0.868187 0.625508
+vt 0.857298 0.627496
+vt 0.861301 0.635363
+vt 0.891113 0.635857
+vt 0.879896 0.634217
+vt 0.89156 0.629458
+vt 0.879626 0.628853
+vt 0.879077 0.62352
+vt 0.870406 0.639191
+vt 0.879607 0.639641
+vt 0.889853 0.641715
+vt 0.862262 0.639758
+vt 0.92121 0.637592
+vt 0.905828 0.63279
+vt 0.925118 0.630408
+vt 0.907542 0.62597
+vt 0.903798 0.639508
+vt 0.942567 0.656273
+vt 0.930472 0.650822
+vt 0.949711 0.646823
+vt 0.936482 0.642606
+vt 0.942694 0.634846
+vt 0.912967 0.652217
+vt 0.924862 0.659954
+vt 0.936428 0.667127
+vt 0.901136 0.646022
+vt 0.966001 0.650564
+vt 0.958961 0.649232
+vt 0.970644 0.639264
+vt 0.965457 0.639274
+vt 0.952791 0.65981
+vt 0.969418 0.663982
+vt 0.972601 0.651548
+vt 0.976942 0.665473
+vt 0.97898 0.652359
+vt 0.975831 0.639254
+vt 0.957281 0.67448
+vt 0.966331 0.676693
+vt 0.974903 0.678588
+vt 0.947274 0.67163
+vt 0.269766 0.158525
+vt 0.250658 0.159766
+vt 0.270029 0.149052
+vt 0.248065 0.150831
+vt 0.220307 0.146351
+vt 0.245291 0.141613
+vt 0.226699 0.154829
+vt 0.24059 0.178843
+vt 0.237237 0.170956
+vt 0.254532 0.176342
+vt 0.25289 0.168134
+vt 0.269586 0.166899
+vt 0.216438 0.167843
+vt 0.223799 0.174757
+vt 0.228354 0.181409
+vt 0.20652 0.159758
+vt 0.195322 0.151089
+vt 0.255674 0.193252
+vt 0.255353 0.184799
+vt 0.26904 0.192481
+vt 0.269203 0.183716
+vt 0.242017 0.186776
+vt 0.242969 0.210229
+vt 0.242663 0.202753
+vt 0.255922 0.209205
+vt 0.255817 0.201447
+vt 0.269103 0.200716
+vt 0.229862 0.196333
+vt 0.229944 0.203984
+vt 0.230377 0.211402
+vt 0.229853 0.188704
+vt 0.256133 0.216346
+vt 0.269011 0.215922
+vt 0.256393 0.223033
+vt 0.26887 0.222737
+vt 0.243486 0.217235
+vt 0.244951 0.229727
+vt 0.256648 0.229427
+vt 0.268973 0.2293
+vt 0.232926 0.224635
+vt 0.234642 0.230172
+vt 0.231438 0.218332
+vt 0.231875 0.681974
+vt 0.195405 0.687835
+vt 0.268705 0.681041
+vt 0.268707 0.649795
+vt 0.231298 0.650222
+vt 0.159651 0.701684
+vt 0.124255 0.719528
+vt 0.152896 0.681504
+vt 0.120595 0.706905
+vt 0.162996 0.634435
+vt 0.141452 0.664926
+vt 0.142052 0.61864
+vt 0.127664 0.650149
+vt 0.116935 0.694282
+vt 0.211524 0.580428
+vt 0.203702 0.612749
+vt 0.187831 0.570739
+vt 0.178593 0.601672
+vt 0.15644 0.58713
+vt 0.268705 0.617234
+vt 0.234725 0.616894
+vt 0.239135 0.585332
+vt 0.199167 0.27764
+vt 0.203439 0.290476
+vt 0.191338 0.282164
+vt 0.195938 0.298294
+vt 0.204702 0.30253
+vt 0.194745 0.309006
+vt 0.204397 0.314025
+vt 0.187394 0.290439
+vt 0.185378 0.30188
+vt 0.158413 0.418134
+vt 0.159054 0.422514
+vt 0.151701 0.418126
+vt 0.153357 0.422511
+vt 0.148767 0.427196
+vt 0.147324 0.422254
+vt 0.154331 0.42702
+vt 0.164378 0.426932
+vt 0.164461 0.422716
+vt 0.169254 0.42687
+vt 0.169733 0.422548
+vt 0.164807 0.418553
+vt 0.159617 0.435925
+vt 0.159655 0.431437
+vt 0.163693 0.43525
+vt 0.1639 0.431123
+vt 0.16807 0.431122
+vt 0.15511 0.43158
+vt 0.150273 0.432153
+vt 0.155198 0.436148
+vt 0.137897 0.391939
+vt 0.139469 0.389504
+vt 0.17063 0.392906
+vt 0.177579 0.390684
+vt 0.108525 0.396264
+vt 0.10962 0.392167
+vt 0.136029 0.394644
+vt 0.189135 0.398093
+vt 0.201386 0.395105
+vt 0.205359 0.404085
+vt 0.221392 0.400547
+vt 0.215689 0.391863
+vt 0.159402 0.397834
+vt 0.180988 0.400577
+vt 0.156251 0.401361
+vt 0.175531 0.40322
+vt 0.1956 0.40625
+vt 0.133566 0.39789
+vt 0.106177 0.400376
+vt 0.13333 0.401583
+vt 0.105778 0.419582
+vt 0.112909 0.431692
+vt 0.0857539 0.42372
+vt 0.0978159 0.435407
+vt 0.0792543 0.412786
+vt 0.0493589 0.420043
+vt 0.0604704 0.429958
+vt 0.113824 0.445537
+vt 0.130697 0.452864
+vt 0.104492 0.449389
+vt 0.127195 0.455721
+vt 0.123421 0.442151
+vt 0.0620736 0.445754
+vt 0.0967788 0.453971
+vt 0.042503 0.451596
+vt 0.0898755 0.458919
+vt 0.12693 0.459685
+vt 0.0109524 0.427572
+vt 0.0292777 0.436932
+vt -0.00486957 0.444274
+vt 0.218498 0.42425
+vt 0.211205 0.424626
+vt 0.236149 0.408646
+vt 0.221871 0.411246
+vt 0.209393 0.413479
+vt 0.194343 0.436667
+vt 0.197771 0.438115
+vt 0.182271 0.446693
+vt 0.18375 0.449328
+vt 0.200848 0.439853
+vt 0.190212 0.435797
+vt 0.185733 0.435099
+vt 0.197971 0.425316
+vt 0.191608 0.425692
+vt 0.179819 0.445093
+vt 0.200514 0.414982
+vt 0.193566 0.416303
+vt 0.160223 0.460849
+vt 0.159192 0.459271
+vt 0.17171 0.458153
+vt 0.171322 0.455881
+vt 0.17064 0.453743
+vt 0.14455 0.458507
+vt 0.146064 0.460995
+vt 0.148735 0.463545
+vt 0.158128 0.455703
+vt 0.14535 0.456142
+vt 0.158194 0.452773
+vt 0.147456 0.453335
+vt 0.169371 0.451874
+vt 0.167924 0.449711
+vt 0.0851413 0.39891
+vt 0.0922294 0.393554
+vt 0.0991561 0.388722
+vt 0.100257 0.388524
+vt 0.0806903 0.395723
+vt 0.0520286 0.41091
+vt 0.0799332 0.404918
+vt 0.0223556 0.413036
+vt 0.0433475 0.407848
+vt -0.0149432 0.424994
+vt 0.019659 0.417742
+vt 0.0699681 0.398298
+vt 0.0980548 0.388921
+vt 0.0596545 0.401078
+vt 0.112325 0.545469
+vt 0.112227 0.562147
+vt 0.121292 0.533311
+vt 0.124355 0.551553
+vt 0.0994555 0.572536
+vt 0.0969232 0.587328
+vt 0.112171 0.578561
+vt 0.106779 0.524557
+vt 0.107589 0.538443
+vt 0.110423 0.513527
+vt 0.112506 0.528264
+vt 0.118228 0.515069
+vt 0.0989423 0.555133
+vt 0.104283 0.542593
+vt 0.104701 0.530629
+vt 0.0882754 0.571292
+vt 0.0894754 0.56478
+vt 0.0853955 0.582515
+vt 0.0816758 0.596094
+vt 0.0753139 0.485928
+vt 0.0650052 0.492652
+vt 0.0850253 0.491819
+vt 0.076075 0.499356
+vt 0.0930081 0.48361
+vt 0.100394 0.488016
+vt 0.0945577 0.497873
+vt 0.0651895 0.505551
+vt 0.073766 0.51176
+vt 0.0546966 0.499376
+vt 0.0900973 0.518593
+vt 0.0785101 0.518041
+vt 0.0994002 0.522709
+vt 0.100625 0.514782
+vt 0.107779 0.492421
+vt 0.103732 0.504253
+vt 0.23946 0.508287
+vt 0.230571 0.517622
+vt 0.228964 0.503271
+vt 0.217261 0.507781
+vt 0.188396 0.491269
+vt 0.206255 0.496224
+vt 0.200655 0.50824
+vt 0.268702 0.540749
+vt 0.243925 0.538213
+vt 0.268704 0.526377
+vt 0.248486 0.524031
+vt 0.253257 0.51249
+vt 0.215104 0.553361
+vt 0.241505 0.558876
+vt 0.268704 0.561105
+vt 0.190298 0.545736
+vt 0.166291 0.537055
+vt 0.187254 0.524533
+vt 0.161754 0.51849
+vt 0.170538 0.486314
+vt 0.179959 0.505005
+vt 0.157216 0.499924
+vt 0.24946 0.367438
+vt 0.258636 0.367175
+vt 0.255765 0.379731
+vt 0.262283 0.37965
+vt 0.255826 0.354536
+vt 0.268674 0.354878
+vt 0.268728 0.367451
+vt 0.234227 0.359499
+vt 0.242109 0.369275
+vt 0.228611 0.363909
+vt 0.23567 0.3719
+vt 0.249247 0.379811
+vt 0.215158 0.338968
+vt 0.227222 0.344776
+vt 0.214279 0.345817
+vt 0.224857 0.351242
+vt 0.221552 0.355917
+vt 0.240937 0.339238
+vt 0.227708 0.334728
+vt 0.215289 0.329543
+vt 0.268674 0.342201
+vt 0.254691 0.341571
+vt 0.244771 0.497304
+vt 0.256308 0.499747
+vt 0.268704 0.500841
+vt 0.268703 0.486337
+vt 0.258069 0.48605
+vt 0.225998 0.491141
+vt 0.227882 0.481103
+vt 0.234955 0.49435
+vt 0.237658 0.483058
+vt 0.249236 0.471672
+vt 0.239501 0.471434
+vt 0.250178 0.458265
+vt 0.240915 0.459645
+vt 0.229767 0.471064
+vt 0.25897 0.471644
+vt 0.268704 0.471564
+vt 0.259441 0.456884
+vt 0.203423 0.33552
+vt 0.203968 0.325186
+vt 0.193422 0.320894
+vt 0.18965 0.357864
+vt 0.190908 0.345555
+vt 0.202072 0.352895
+vt 0.202774 0.344537
+vt 0.18128 0.331343
+vt 0.179249 0.347026
+vt 0.17058 0.329645
+vt 0.167693 0.348723
+vt 0.177228 0.362833
+vt 0.183331 0.315904
+vt 0.173467 0.310566
+vt 0.17135 0.406685
+vt 0.182441 0.411169
+vt 0.16826 0.410583
+vt 0.176559 0.414437
+vt 0.155839 0.404764
+vt 0.157521 0.413533
+vt 0.166076 0.414524
+vt 0.172571 0.417098
+vt 0.135724 0.413665
+vt 0.144491 0.409813
+vt 0.142544 0.416326
+vt 0.148879 0.413937
+vt 0.13814 0.405626
+vt 0.124506 0.410074
+vt 0.174252 0.441387
+vt 0.171559 0.438762
+vt 0.181259 0.434168
+vt 0.176807 0.433082
+vt 0.186036 0.426037
+vt 0.174588 0.426678
+vt 0.172391 0.431921
+vt 0.168862 0.436383
+vt 0.180644 0.419795
+vt 0.175028 0.421443
+vt 0.186883 0.417995
+vt 0.149859 0.449516
+vt 0.140851 0.446115
+vt 0.152195 0.445145
+vt 0.145413 0.441926
+vt 0.158597 0.449574
+vt 0.159394 0.440679
+vt 0.154101 0.440679
+vt 0.14915 0.438598
+vt 0.165593 0.443116
+vt 0.164422 0.439274
+vt 0.166713 0.446693
+vt 0.121193 0.418911
+vt 0.132677 0.420037
+vt 0.1248 0.429441
+vt 0.142327 0.427533
+vt 0.140909 0.42129
+vt 0.13921 0.436123
+vt 0.145149 0.43376
+vt 0.131925 0.438966
+vt 0.718331 0.590305
+vt 0.746935 0.609408
+vt 0.714187 0.592962
+vt 0.74538 0.618837
+vt 0.776045 0.619539
+vt 0.775836 0.606046
+vt 0.748316 0.600181
+vt 0.698297 0.570265
+vt 0.691016 0.56935
+vt 0.678793 0.553513
+vt 0.668258 0.550016
+vt 0.682993 0.567088
+vt 0.724973 0.581367
+vt 0.704095 0.568485
+vt 0.726629 0.571953
+vt 0.707052 0.562689
+vt 0.687534 0.554647
+vt 0.775571 0.595734
+vt 0.74935 0.591359
+vt 0.749786 0.580241
+vt 0.631527 0.521919
+vt 0.643349 0.524282
+vt 0.641663 0.531566
+vt 0.653323 0.535777
+vt 0.66485 0.539385
+vt 0.649849 0.514184
+vt 0.635476 0.513181
+vt 0.647883 0.501604
+vt 0.631937 0.501954
+vt 0.62139 0.512271
+vt 0.684619 0.531929
+vt 0.668859 0.529
+vt 0.681508 0.516892
+vt 0.664797 0.515372
+vt 0.663468 0.501153
+vt 0.676113 0.541788
+vt 0.689104 0.545131
+vt 0.919101 0.529512
+vt 0.932277 0.513107
+vt 0.933894 0.53169
+vt 0.952549 0.514234
+vt 0.938271 0.4968
+vt 0.915407 0.496828
+vt 0.912378 0.512315
+vt 0.874997 0.559455
+vt 0.89162 0.543518
+vt 0.884334 0.563198
+vt 0.903546 0.546094
+vt 0.915238 0.549145
+vt 0.888008 0.52727
+vt 0.879227 0.541892
+vt 0.86975 0.528566
+vt 0.864702 0.54298
+vt 0.866281 0.556104
+vt 0.894945 0.496872
+vt 0.893224 0.512192
+vt 0.87312 0.513029
+vt 0.860186 0.581167
+vt 0.86658 0.586129
+vt 0.833052 0.598471
+vt 0.836578 0.605656
+vt 0.854086 0.575883
+vt 0.804878 0.613577
+vt 0.803304 0.602321
+vt 0.806576 0.625184
+vt 0.801979 0.591764
+vt 0.80095 0.579355
+vt 0.826879 0.582241
+vt 0.824828 0.570521
+vt 0.848572 0.569954
+vt 0.845415 0.561476
+vt 0.723056 0.499144
+vt 0.723365 0.520768
+vt 0.700461 0.499917
+vt 0.701169 0.518891
+vt 0.702985 0.536491
+vt 0.773996 0.544472
+vt 0.748527 0.544271
+vt 0.773366 0.521907
+vt 0.747679 0.521923
+vt 0.747248 0.498271
+vt 0.725988 0.558685
+vt 0.74937 0.564127
+vt 0.774586 0.566031
+vt 0.705814 0.551558
+vt 0.799676 0.520274
+vt 0.799659 0.497042
+vt 0.825661 0.517765
+vt 0.825711 0.49681
+vt 0.799857 0.542467
+vt 0.848896 0.532613
+vt 0.850371 0.515118
+vt 0.850862 0.496842
+vt 0.824745 0.55595
+vt 0.846383 0.548548
+vt 0.800262 0.562542
+vt 0.622835 0.741898
+vt 0.629822 0.737341
+vt 0.630937 0.748106
+vt 0.637348 0.741916
+vt 0.638168 0.732456
+vt 0.632743 0.72671
+vt 0.623594 0.732979
+vt 0.601364 0.736825
+vt 0.612351 0.737387
+vt 0.601727 0.746549
+vt 0.614012 0.74515
+vt 0.621199 0.753622
+vt 0.623692 0.721651
+vt 0.615298 0.729693
+vt 0.603694 0.727299
+vt 0.64705 0.720202
+vt 0.651064 0.728223
+vt 0.666861 0.725286
+vt 0.666356 0.736499
+vt 0.651523 0.736804
+vt 0.655097 0.70327
+vt 0.638367 0.712843
+vt 0.644545 0.693632
+vt 0.627328 0.703786
+vt 0.680292 0.699272
+vt 0.669656 0.692929
+vt 0.66043 0.68407
+vt 0.686784 0.713263
+vt 0.693307 0.702612
+vt 0.683906 0.72429
+vt 0.682763 0.73584
+vt 0.664776 0.7482
+vt 0.649538 0.745843
+vt 0.644256 0.754878
+vt 0.68235 0.760278
+vt 0.681446 0.748063
+vt 0.67624 0.777475
+vt 0.68787 0.771805
+vt 0.657375 0.798004
+vt 0.64028 0.785894
+vt 0.666427 0.786487
+vt 0.651148 0.774455
+vt 0.634824 0.763445
+vt 0.622928 0.773335
+vt 0.597165 0.801047
+vt 0.584037 0.816073
+vt 0.575655 0.789046
+vt 0.561282 0.802854
+vt 0.606349 0.829025
+vt 0.594793 0.84477
+vt 0.570888 0.831255
+vt 0.629233 0.799056
+vt 0.610251 0.786335
+vt 0.590115 0.774943
+vt 0.648026 0.811186
+vt 0.638424 0.825833
+vt 0.628613 0.841743
+vt 0.618698 0.858285
+vt 0.643447 0.660513
+vt 0.635997 0.646287
+vt 0.6049 0.637043
+vt 0.628919 0.631286
+vt 0.614692 0.652678
+vt 0.634531 0.681735
+vt 0.651642 0.673186
+vt 0.584936 0.681759
+vt 0.604883 0.675219
+vt 0.597105 0.697898
+vt 0.616244 0.690669
+vt 0.593009 0.659162
+vt 0.58088 0.642801
+vt 0.571868 0.665802
+vt 0.586302 0.735665
+vt 0.587541 0.725613
+vt 0.566532 0.713946
+vt 0.585456 0.714475
+vt 0.569532 0.724249
+vt 0.584349 0.745549
+vt 0.579071 0.755761
+vt 0.565094 0.743135
+vt 0.557994 0.751956
+vt 0.550572 0.731836
+vt 0.547179 0.740294
+vt 0.532926 0.729581
+vt 0.530226 0.737883
+vt 0.540511 0.747664
+vt 0.550413 0.713789
+vt 0.552359 0.722818
+vt 0.536163 0.720718
+vt 0.553225 0.778158
+vt 0.567858 0.766798
+vt 0.546459 0.760192
+vt 0.496408 0.78312
+vt 0.5145 0.775693
+vt 0.521696 0.80043
+vt 0.537687 0.789339
+vt 0.512875 0.758052
+vt 0.492969 0.762083
+vt 0.500324 0.747435
+vt 0.478432 0.748437
+vt 0.471121 0.76581
+vt 0.5289 0.753414
+vt 0.51732 0.74582
+vt 0.55219 0.672662
+vt 0.56563 0.687095
+vt 0.519008 0.667707
+vt 0.537934 0.658133
+vt 0.533662 0.679776
+vt 0.558759 0.703094
+vt 0.577435 0.701334
+vt 0.516448 0.703502
+vt 0.530573 0.696655
+vt 0.529774 0.709521
+vt 0.542602 0.705297
+vt 0.515971 0.687177
+vt 0.500082 0.677282
+vt 0.500703 0.696106
+vt 0.496419 0.736002
+vt 0.513862 0.73676
+vt 0.515969 0.727011
+vt 0.476228 0.734685
+vt 0.454664 0.733089
+vt 0.481514 0.721061
+vt 0.463494 0.717678
+vt 0.505915 0.712954
+vt 0.489445 0.707801
+vt 0.472325 0.702267
+vt 0.521085 0.717345
+vt 0.817772 0.32972
+vt 0.817268 0.334177
+vt 0.807098 0.329266
+vt 0.806518 0.333516
+vt 0.806012 0.337004
+vt 0.795796 0.333142
+vt 0.795184 0.336418
+vt 0.838686 0.341177
+vt 0.827785 0.339607
+vt 0.838685 0.335915
+vt 0.827985 0.335004
+vt 0.82832 0.330282
+vt 0.827498 0.348469
+vt 0.816308 0.345239
+vt 0.82763 0.344073
+vt 0.816582 0.341797
+vt 0.838688 0.346438
+vt 0.805553 0.339703
+vt 0.794562 0.338721
+vt 0.805117 0.342008
+vt 0.784417 0.336886
+vt 0.785167 0.333173
+vt 0.774651 0.33352
+vt 0.775476 0.328538
+vt 0.785864 0.328653
+vt 0.771718 0.345756
+vt 0.772735 0.341953
+vt 0.782822 0.342267
+vt 0.783629 0.339781
+vt 0.763085 0.339283
+vt 0.761859 0.344306
+vt 0.752475 0.340764
+vt 0.750993 0.34675
+vt 0.760614 0.349246
+vt 0.765302 0.328633
+vt 0.764271 0.334092
+vt 0.753958 0.334778
+vt 0.872696 0.335583
+vt 0.866407 0.338084
+vt 0.86882 0.330441
+vt 0.863658 0.333516
+vt 0.867867 0.343569
+vt 0.860064 0.340257
+vt 0.860616 0.34494
+vt 0.882136 0.339457
+vt 0.878875 0.332422
+vt 0.888775 0.336165
+vt 0.88462 0.328842
+vt 0.874176 0.326498
+vt 0.876903 0.354499
+vt 0.876407 0.348394
+vt 0.885368 0.353577
+vt 0.88421 0.346688
+vt 0.892061 0.343631
+vt 0.868751 0.349227
+vt 0.861168 0.349623
+vt 0.869036 0.354668
+vt 0.905832 0.318045
+vt 0.897126 0.314006
+vt 0.909273 0.307792
+vt 0.899606 0.305356
+vt 0.888216 0.310241
+vt 0.885668 0.315488
+vt 0.89379 0.320385
+vt 0.912856 0.338167
+vt 0.907418 0.331814
+vt 0.922353 0.328655
+vt 0.914539 0.322618
+vt 0.91968 0.310994
+vt 0.895263 0.331765
+vt 0.899861 0.338745
+vt 0.903538 0.34573
+vt 0.882771 0.319356
+vt 0.889606 0.32508
+vt 0.726761 0.338632
+vt 0.720269 0.333665
+vt 0.728661 0.332789
+vt 0.724048 0.328479
+vt 0.728685 0.324646
+vt 0.725283 0.320091
+vt 0.718943 0.323554
+vt 0.708141 0.33392
+vt 0.716815 0.339387
+vt 0.703619 0.340611
+vt 0.713734 0.345346
+vt 0.724861 0.344474
+vt 0.705092 0.321759
+vt 0.698212 0.327693
+vt 0.695328 0.313899
+vt 0.68654 0.320218
+vt 0.692159 0.335172
+vt 0.721158 0.3143
+vt 0.712853 0.317398
+vt 0.705649 0.309794
+vt 0.849098 0.333114
+vt 0.852849 0.328742
+vt 0.854305 0.334344
+vt 0.858906 0.330607
+vt 0.863427 0.326736
+vt 0.848559 0.322731
+vt 0.846187 0.327308
+vt 0.839608 0.321509
+vt 0.838379 0.326237
+vt 0.84389 0.331884
+vt 0.862887 0.316059
+vt 0.859996 0.320044
+vt 0.853481 0.313684
+vt 0.851078 0.31815
+vt 0.841341 0.316617
+vt 0.867789 0.322599
+vt 0.871643 0.31863
+vt 0.808609 0.31864
+vt 0.807782 0.324278
+vt 0.797841 0.317858
+vt 0.797082 0.323766
+vt 0.818436 0.324849
+vt 0.821851 0.307693
+vt 0.820468 0.313742
+vt 0.810856 0.305827
+vt 0.809616 0.312441
+vt 0.79869 0.311344
+vt 0.829771 0.320444
+vt 0.831091 0.315141
+vt 0.832672 0.309663
+vt 0.828882 0.32546
+vt 0.827295 0.287378
+vt 0.81609 0.283741
+vt 0.829537 0.279056
+vt 0.81819 0.275174
+vt 0.802762 0.289081
+vt 0.80451 0.280694
+vt 0.814142 0.291631
+vt 0.836158 0.298133
+vt 0.838143 0.291297
+vt 0.84681 0.301473
+vt 0.848714 0.295264
+vt 0.84041 0.283327
+vt 0.823454 0.301435
+vt 0.834348 0.304125
+vt 0.84504 0.30685
+vt 0.801169 0.296892
+vt 0.812381 0.298942
+vt 0.876986 0.311536
+vt 0.878898 0.306488
+vt 0.870963 0.295312
+vt 0.88062 0.29932
+vt 0.869186 0.302754
+vt 0.865464 0.312169
+vt 0.874638 0.315263
+vt 0.855505 0.309448
+vt 0.857319 0.304788
+vt 0.859093 0.299048
+vt 0.861066 0.291675
+vt 0.732418 0.318062
+vt 0.734712 0.322982
+vt 0.743001 0.327869
+vt 0.736781 0.327408
+vt 0.741594 0.322579
+vt 0.738426 0.310793
+vt 0.729673 0.312154
+vt 0.736628 0.303749
+vt 0.726615 0.305165
+vt 0.757036 0.316401
+vt 0.748245 0.31651
+vt 0.75687 0.309754
+vt 0.747447 0.310045
+vt 0.746533 0.302937
+vt 0.748795 0.322531
+vt 0.749221 0.328331
+vt 0.756792 0.322664
+vt 0.731654 0.277237
+vt 0.718591 0.277987
+vt 0.730298 0.26671
+vt 0.717089 0.267402
+vt 0.704046 0.278768
+vt 0.706282 0.288897
+vt 0.720525 0.287868
+vt 0.756105 0.285792
+vt 0.744751 0.286232
+vt 0.755817 0.276484
+vt 0.743878 0.276719
+vt 0.743048 0.266393
+vt 0.734816 0.295809
+vt 0.745635 0.294985
+vt 0.756313 0.294527
+vt 0.710968 0.298566
+vt 0.723383 0.296998
+vt 0.792536 0.278545
+vt 0.791154 0.28731
+vt 0.780323 0.277204
+vt 0.793935 0.269245
+vt 0.781204 0.267531
+vt 0.78873 0.303265
+vt 0.777827 0.302657
+vt 0.789858 0.295534
+vt 0.778545 0.294752
+vt 0.767676 0.285819
+vt 0.767323 0.294432
+vt 0.767022 0.302415
+vt 0.768024 0.276581
+vt 0.768346 0.266629
+vt 0.776124 0.323009
+vt 0.786496 0.323337
+vt 0.787134 0.317284
+vt 0.766566 0.316536
+vt 0.766062 0.322804
+vt 0.777196 0.310038
+vt 0.766828 0.309761
+vt 0.78785 0.310554
+vt 0.887192 0.245173
+vt 0.894254 0.251226
+vt 0.882204 0.253513
+vt 0.889316 0.257984
+vt 0.899158 0.259059
+vt 0.906216 0.25556
+vt 0.900579 0.245749
+vt 0.873068 0.226854
+vt 0.883747 0.231913
+vt 0.868263 0.235954
+vt 0.878436 0.240212
+vt 0.873612 0.249405
+vt 0.899039 0.231269
+vt 0.889175 0.22393
+vt 0.904851 0.223823
+vt 0.894483 0.215603
+vt 0.878092 0.21784
+vt 0.907771 0.240362
+vt 0.914548 0.251706
+vt 0.914646 0.233613
+vt 0.83088 0.224634
+vt 0.816277 0.220886
+vt 0.83396 0.214024
+vt 0.818586 0.209908
+vt 0.827947 0.235154
+vt 0.840898 0.238644
+vt 0.844486 0.228275
+vt 0.855747 0.197071
+vt 0.852027 0.207701
+vt 0.840323 0.192176
+vt 0.837098 0.203273
+vt 0.821041 0.19888
+vt 0.861296 0.22221
+vt 0.8657 0.212494
+vt 0.870075 0.20254
+vt 0.852868 0.242144
+vt 0.856947 0.232012
+vt 0.689623 0.215775
+vt 0.70332 0.207275
+vt 0.697939 0.222493
+vt 0.709017 0.215819
+vt 0.725203 0.22199
+vt 0.713143 0.224363
+vt 0.722188 0.211648
+vt 0.691318 0.186238
+vt 0.709695 0.179461
+vt 0.697458 0.197483
+vt 0.714155 0.190926
+vt 0.68169 0.206684
+vt 0.751207 0.197567
+vt 0.734425 0.198302
+vt 0.74968 0.185753
+vt 0.731649 0.18705
+vt 0.728678 0.175143
+vt 0.738996 0.220423
+vt 0.736932 0.209325
+vt 0.752641 0.20886
+vt 0.784858 0.21308
+vt 0.800826 0.216826
+vt 0.784162 0.2244
+vt 0.799325 0.227809
+vt 0.802394 0.205632
+vt 0.768345 0.19871
+vt 0.768707 0.210268
+vt 0.768953 0.22173
+vt 0.786206 0.189958
+vt 0.767985 0.186934
+vt 0.786915 0.177877
+vt 0.767639 0.174701
+vt 0.803984 0.194191
+vt 0.805673 0.182317
+vt 0.809955 0.252364
+vt 0.822756 0.255918
+vt 0.80815 0.262293
+vt 0.820407 0.265831
+vt 0.832047 0.269793
+vt 0.837735 0.249207
+vt 0.825254 0.245637
+vt 0.811949 0.242115
+vt 0.856542 0.267463
+vt 0.846044 0.263615
+vt 0.859897 0.256492
+vt 0.849272 0.252848
+vt 0.843066 0.273938
+vt 0.853522 0.278101
+vt 0.742291 0.255201
+vt 0.729185 0.255607
+vt 0.755289 0.255203
+vt 0.754976 0.243665
+vt 0.741468 0.243564
+vt 0.715531 0.256156
+vt 0.700801 0.256437
+vt 0.714427 0.244869
+vt 0.700028 0.24508
+vt 0.72694 0.232765
+vt 0.714289 0.234159
+vt 0.701971 0.234531
+vt 0.754532 0.231935
+vt 0.740439 0.231904
+vt 0.911041 0.2953
+vt 0.921301 0.296781
+vt 0.93161 0.297423
+vt 0.929903 0.281016
+vt 0.920542 0.281282
+vt 0.901221 0.293843
+vt 0.891715 0.291968
+vt 0.901867 0.281252
+vt 0.892897 0.280142
+vt 0.909625 0.267746
+vt 0.901437 0.269362
+vt 0.893739 0.269552
+vt 0.927824 0.264632
+vt 0.918548 0.265802
+vt 0.783513 0.23552
+vt 0.768967 0.233217
+vt 0.768829 0.24463
+vt 0.796609 0.249173
+vt 0.797938 0.238614
+vt 0.782032 0.257262
+vt 0.795284 0.259419
+vt 0.768618 0.255873
+vt 0.866329 0.271283
+vt 0.863474 0.282119
+vt 0.873054 0.28587
+vt 0.878455 0.263629
+vt 0.869642 0.26007
+vt 0.884271 0.277953
+vt 0.886283 0.267215
+vt 0.882397 0.289232
+vt 0.941581 0.296368
+vt 0.939264 0.280761
+vt 0.944658 0.31122
+vt 0.954503 0.308374
+vt 0.951091 0.294591
+vt 0.945764 0.266671
+vt 0.937066 0.26534
+vt 0.944666 0.253926
+vt 0.936188 0.251002
+vt 0.964594 0.279716
+vt 0.956479 0.280128
+vt 0.960523 0.267756
+vt 0.953407 0.267371
+vt 0.951694 0.255808
+vt 0.963661 0.306017
+vt 0.960023 0.293069
+vt 0.968665 0.291676
+vt 0.386764 0.173365
+vt 0.386251 0.159166
+vt 0.403134 0.175347
+vt 0.40173 0.162164
+vt 0.370756 0.187276
+vt 0.370795 0.173446
+vt 0.38669 0.187253
+vt 0.422389 0.190328
+vt 0.419955 0.177717
+vt 0.441316 0.192402
+vt 0.437002 0.180281
+vt 0.417209 0.165162
+vt 0.401826 0.214817
+vt 0.403793 0.201683
+vt 0.424104 0.215186
+vt 0.424198 0.203052
+vt 0.44563 0.204522
+vt 0.385442 0.200518
+vt 0.36925 0.200049
+vt 0.382596 0.213492
+vt 0.356262 0.189263
+vt 0.355175 0.177265
+vt 0.348714 0.166736
+vt 0.334762 0.173858
+vt 0.341079 0.182428
+vt 0.340863 0.211835
+vt 0.342946 0.202073
+vt 0.352708 0.211778
+vt 0.355322 0.200767
+vt 0.331414 0.194664
+vt 0.331412 0.203371
+vt 0.319782 0.196009
+vt 0.319946 0.204047
+vt 0.330031 0.212063
+vt 0.324674 0.179566
+vt 0.32968 0.18654
+vt 0.319256 0.188731
+vt 0.34302 0.232291
+vt 0.34877 0.222299
+vt 0.354926 0.235741
+vt 0.361782 0.224028
+vt 0.337711 0.221083
+vt 0.32682 0.236795
+vt 0.334971 0.241716
+vt 0.31849 0.242434
+vt 0.324587 0.249202
+vt 0.345001 0.247217
+vt 0.32414 0.227342
+vt 0.319507 0.233375
+vt 0.314915 0.225618
+vt 0.311922 0.231227
+vt 0.313143 0.237776
+vt 0.327627 0.220146
+vt 0.317481 0.219239
+vt 0.63785 0.953205
+vt 0.630881 0.947663
+vt 0.647016 0.946972
+vt 0.6392 0.9414
+vt 0.622289 0.952938
+vt 0.615431 0.946184
+vt 0.624837 0.941085
+vt 0.636439 0.963394
+vt 0.644947 0.958154
+vt 0.644288 0.966733
+vt 0.653209 0.961707
+vt 0.654465 0.952626
+vt 0.612697 0.965958
+vt 0.620878 0.963127
+vt 0.620821 0.970267
+vt 0.628164 0.967348
+vt 0.635184 0.970582
+vt 0.606875 0.951267
+vt 0.614197 0.957483
+vt 0.605619 0.960347
+vt 0.367121 0.917394
+vt 0.374142 0.923099
+vt 0.360165 0.922438
+vt 0.367724 0.92812
+vt 0.381201 0.91911
+vt 0.388569 0.925813
+vt 0.380653 0.929651
+vt 0.367048 0.90811
+vt 0.360477 0.912329
+vt 0.360243 0.904335
+vt 0.353385 0.908573
+vt 0.353375 0.916883
+vt 0.388646 0.907697
+vt 0.381128 0.909826
+vt 0.380927 0.902848
+vt 0.374115 0.905116
+vt 0.367999 0.901318
+vt 0.396044 0.921905
+vt 0.388216 0.915684
+vt 0.396055 0.913595
+vt 0.328818 0.915038
+vt 0.333156 0.924854
+vt 0.309844 0.916921
+vt 0.317341 0.929308
+vt 0.32345 0.950038
+vt 0.299401 0.934005
+vt 0.334074 0.942122
+vt 0.352513 0.92816
+vt 0.344718 0.920885
+vt 0.341477 0.913527
+vt 0.373801 0.947808
+vt 0.357974 0.943112
+vt 0.374058 0.938334
+vt 0.362633 0.93464
+vt 0.352852 0.95399
+vt 0.347499 0.966072
+vt 0.373169 0.960769
+vt 0.418321 0.915362
+vt 0.435775 0.918439
+vt 0.422947 0.925132
+vt 0.443681 0.930756
+vt 0.455589 0.922184
+vt 0.430406 0.905691
+vt 0.418226 0.905767
+vt 0.397194 0.906411
+vt 0.405585 0.913617
+vt 0.409188 0.921144
+vt 0.376316 0.893935
+vt 0.392361 0.898152
+vt 0.375205 0.897753
+vt 0.386735 0.900808
+vt 0.405223 0.889198
+vt 0.398631 0.894039
+vt 0.377906 0.887331
+vt 0.336894 0.897334
+vt 0.318775 0.906333
+vt 0.327696 0.895339
+vt 0.301524 0.906655
+vt 0.333962 0.906288
+vt 0.360174 0.89468
+vt 0.356966 0.889714
+vt 0.353868 0.884022
+vt 0.353137 0.901438
+vt 0.363602 0.898194
+vt 0.345023 0.906797
+vt 0.434342 0.941415
+vt 0.453466 0.949534
+vt 0.415406 0.950555
+vt 0.42616 0.960391
+vt 0.417515 0.933928
+vt 0.390161 0.946583
+vt 0.394517 0.958315
+vt 0.398853 0.971248
+vt 0.39657 0.933133
+vt 0.385766 0.937253
+vt 0.40528 0.927706
+vt 0.57922 0.909293
+vt 0.58971 0.919914
+vt 0.547117 0.922496
+vt 0.567321 0.931013
+vt 0.577999 0.948709
+vt 0.555818 0.943001
+vt 0.585011 0.938851
+vt 0.617202 0.922908
+vt 0.614188 0.910157
+vt 0.635979 0.920365
+vt 0.638841 0.906233
+vt 0.611323 0.89609
+vt 0.607883 0.938585
+vt 0.620514 0.933025
+vt 0.633926 0.930757
+vt 0.597671 0.945329
+vt 0.592645 0.95281
+vt 0.700973 0.938045
+vt 0.70873 0.951078
+vt 0.680671 0.943135
+vt 0.684796 0.953548
+vt 0.697126 0.916127
+vt 0.723976 0.932747
+vt 0.682579 0.924761
+vt 0.65736 0.940282
+vt 0.665771 0.947808
+vt 0.668833 0.955667
+vt 0.653673 0.924504
+vt 0.646476 0.933887
+vt 0.670275 0.899507
+vt 0.661753 0.912628
+vt 0.571389 0.966985
+vt 0.583228 0.967478
+vt 0.599576 0.982628
+vt 0.604894 0.978385
+vt 0.628092 0.978017
+vt 0.628226 0.984167
+vt 0.610106 0.974767
+vt 0.582286 0.958749
+vt 0.563995 0.95542
+vt 0.543202 0.951342
+vt 0.603906 0.967493
+vt 0.595571 0.96058
+vt 0.615108 0.972401
+vt 0.627917 0.974569
+vt 0.676098 0.972325
+vt 0.697647 0.962452
+vt 0.689295 0.973819
+vt 0.72006 0.961593
+vt 0.677946 0.963033
+vt 0.646577 0.976363
+vt 0.652459 0.980856
+vt 0.658529 0.986044
+vt 0.653384 0.96919
+vt 0.64107 0.973262
+vt 0.663671 0.963059
+vt 0.36984 0.23949
+vt 0.357948 0.252377
+vt 0.346028 0.26526
+vt 0.355607 0.271672
+vt 0.369662 0.258935
+vt 0.397205 0.227812
+vt 0.377725 0.226506
+vt 0.417354 0.23588
+vt 0.420828 0.226027
+vt 0.4431 0.22875
+vt 0.446522 0.222697
+vt 0.398497 0.266882
+vt 0.394993 0.256063
+vt 0.417376 0.253869
+vt 0.416668 0.245046
+vt 0.439677 0.234803
+vt 0.375987 0.26863
+vt 0.360587 0.280948
+vt 0.379618 0.279895
+vt 0.470993 0.595329
+vt 0.466212 0.581429
+vt 0.482679 0.589544
+vt 0.476881 0.579741
+vt 0.489311 0.600458
+vt 0.499356 0.579702
+vt 0.49224 0.574945
+vt 0.479771 0.564752
+vt 0.472749 0.572159
+vt 0.474256 0.558067
+vt 0.468458 0.56471
+vt 0.46434 0.573191
+vt 0.492648 0.557082
+vt 0.486003 0.554296
+vt 0.497062 0.544746
+vt 0.490231 0.543792
+vt 0.480758 0.549772
+vt 0.509401 0.558946
+vt 0.500678 0.558385
+vt 0.505852 0.543851
+vt 0.497996 0.536049
+vt 0.505621 0.535332
+vt 0.501948 0.53012
+vt 0.507915 0.530515
+vt 0.51368 0.534353
+vt 0.482316 0.532549
+vt 0.489579 0.530568
+vt 0.484501 0.537962
+vt 0.491239 0.536243
+vt 0.491373 0.524968
+vt 0.485801 0.52569
+vt 0.484285 0.519868
+vt 0.479547 0.521219
+vt 0.479193 0.527051
+vt 0.496798 0.525508
+vt 0.50215 0.526678
+vt 0.490005 0.520085
+vt 0.473801 0.535793
+vt 0.470659 0.528429
+vt 0.469034 0.521857
+vt 0.460157 0.521751
+vt 0.461514 0.530505
+vt 0.469248 0.549809
+vt 0.476929 0.543255
+vt 0.456268 0.544777
+vt 0.462181 0.555305
+vt 0.449568 0.548141
+vt 0.456741 0.560093
+vt 0.451562 0.524114
+vt 0.453071 0.533958
+vt 0.446261 0.536501
+vt 0.933404 0.221309
+vt 0.924739 0.213284
+vt 0.938258 0.211078
+vt 0.929637 0.202912
+vt 0.924585 0.183534
+vt 0.934625 0.192739
+vt 0.919804 0.19418
+vt 0.910176 0.214711
+vt 0.920022 0.224053
+vt 0.929173 0.232598
+vt 0.892208 0.188152
+vt 0.904301 0.196133
+vt 0.88756 0.198715
+vt 0.899431 0.206272
+vt 0.909372 0.185384
+vt 0.914544 0.174329
+vt 0.89751 0.177035
+vt 0.829357 0.162643
+vt 0.809729 0.156901
+vt 0.833272 0.14992
+vt 0.812406 0.143745
+vt 0.789947 0.13803
+vt 0.815325 0.130471
+vt 0.791208 0.124039
+vt 0.787797 0.165074
+vt 0.807536 0.169823
+vt 0.826253 0.175235
+vt 0.767318 0.161769
+vt 0.746459 0.160576
+vt 0.767097 0.147949
+vt 0.744921 0.146466
+vt 0.767051 0.133048
+vt 0.767091 0.117607
+vt 0.743596 0.130619
+vt 0.722473 0.147996
+vt 0.719464 0.132562
+vt 0.695065 0.137329
+vt 0.716504 0.116494
+vt 0.690034 0.121812
+vt 0.725582 0.162157
+vt 0.70494 0.166437
+vt 0.656395 0.167991
+vt 0.677929 0.158979
+vt 0.665245 0.182639
+vt 0.684786 0.173336
+vt 0.670811 0.143368
+vt 0.663563 0.127131
+vt 0.647183 0.151721
+vt 0.844502 0.359871
+vt 0.847714 0.360296
+vt 0.847408 0.363957
+vt 0.849348 0.364269
+vt 0.851648 0.36844
+vt 0.850981 0.368241
+vt 0.851475 0.364632
+vt 0.850753 0.356913
+vt 0.846081 0.356324
+vt 0.850205 0.353003
+vt 0.844447 0.352351
+vt 0.841595 0.355786
+vt 0.855799 0.357604
+vt 0.861641 0.358449
+vt 0.855262 0.361451
+vt 0.860512 0.36239
+vt 0.855963 0.353655
+vt 0.853976 0.365096
+vt 0.852315 0.368639
+vt 0.858224 0.366005
+vt 0.720036 0.362311
+vt 0.71959 0.357661
+vt 0.728015 0.361123
+vt 0.727779 0.35756
+vt 0.73391 0.360061
+vt 0.73344 0.362971
+vt 0.728708 0.364784
+vt 0.736235 0.351526
+vt 0.735307 0.354339
+vt 0.729598 0.350921
+vt 0.728459 0.354192
+vt 0.720575 0.353357
+vt 0.740522 0.356645
+vt 0.741697 0.354388
+vt 0.746207 0.356041
+vt 0.747858 0.354388
+vt 0.742872 0.352131
+vt 0.739347 0.358902
+vt 0.738172 0.361159
+vt 0.744555 0.357694
+vt 0.878699 0.181029
+vt 0.883387 0.169647
+vt 0.867761 0.162851
+vt 0.888238 0.158069
+vt 0.871973 0.151014
+vt 0.874335 0.192023
+vt 0.859716 0.186019
+vt 0.843668 0.180528
+vt 0.847301 0.168505
+vt 0.85139 0.156282
+vt 0.855707 0.143958
+vt 0.959959 0.243101
+vt 0.954077 0.238396
+vt 0.961713 0.236753
+vt 0.956323 0.230743
+vt 0.954067 0.216174
+vt 0.958767 0.22329
+vt 0.950811 0.224595
+vt 0.952226 0.24645
+vt 0.945638 0.243001
+vt 0.958206 0.249448
+vt 0.941091 0.227849
+vt 0.937833 0.238648
+vt 0.949366 0.209059
+vt 0.945053 0.218173
+vt 0.336934 0.71568
+vt 0.33038 0.741466
+vt 0.302716 0.711537
+vt 0.299424 0.738297
+vt 0.295776 0.762787
+vt 0.393479 0.752977
+vt 0.377463 0.773803
+vt 0.361811 0.746705
+vt 0.350155 0.769363
+vt 0.371566 0.72393
+vt 0.338262 0.791261
+vt 0.32625 0.812779
+vt 0.315076 0.788147
+vt 0.307068 0.810102
+vt 0.361448 0.794629
+vt 0.29189 0.78554
+vt 0.287886 0.807426
+vt 0.299023 0.2659
+vt 0.299047 0.279209
+vt 0.283956 0.266822
+vt 0.283986 0.280354
+vt 0.283688 0.295775
+vt 0.327087 0.27663
+vt 0.323816 0.291325
+vt 0.313749 0.277461
+vt 0.311916 0.292761
+vt 0.31433 0.263721
+vt 0.296663 0.325331
+vt 0.297456 0.310292
+vt 0.309865 0.321929
+vt 0.31066 0.307783
+vt 0.322425 0.305028
+vt 0.283294 0.311929
+vt 0.282898 0.327607
+vt 0.281812 0.241506
+vt 0.281311 0.235544
+vt 0.29388 0.241063
+vt 0.292617 0.235389
+vt 0.282538 0.248141
+vt 0.307969 0.245729
+vt 0.304463 0.240079
+vt 0.302421 0.235265
+vt 0.297743 0.255375
+vt 0.311831 0.253377
+vt 0.283363 0.256332
+vt 0.665914 0.351378
+vt 0.682493 0.354562
+vt 0.661557 0.365446
+vt 0.679123 0.366239
+vt 0.694271 0.365301
+vt 0.676262 0.378559
+vt 0.692167 0.374571
+vt 0.699871 0.348099
+vt 0.686879 0.344167
+vt 0.6718 0.339046
+vt 0.709168 0.357329
+vt 0.711074 0.351244
+vt 0.708348 0.363848
+vt 0.708071 0.370584
+vt 0.867597 0.363815
+vt 0.865591 0.367703
+vt 0.874845 0.369526
+vt 0.873284 0.373919
+vt 0.863133 0.371378
+vt 0.876827 0.360099
+vt 0.868698 0.3595
+vt 0.885694 0.365176
+vt 0.885881 0.359586
+vt 0.896427 0.363349
+vt 0.895778 0.357262
+vt 0.884753 0.370807
+vt 0.883435 0.376459
+vt 0.896131 0.370165
+vt 0.908527 0.359153
+vt 0.909799 0.366215
+vt 0.924545 0.360491
+vt 0.927731 0.367892
+vt 0.910658 0.373446
+vt 0.917439 0.345549
+vt 0.906434 0.352429
+vt 0.933647 0.34626
+vt 0.928384 0.337086
+vt 0.944982 0.340124
+vt 0.93903 0.329282
+vt 0.939163 0.354526
+vt 0.944804 0.362339
+vt 0.951802 0.349505
+vt 0.954599 0.336095
+vt 0.960613 0.346611
+vt 0.967306 0.344981
+vt 0.971931 0.35627
+vt 0.966904 0.356527
+vt 0.949139 0.32438
+vt 0.958528 0.321259
+vt 0.967014 0.318797
+vt 0.975048 0.316665
+vt 0.970264 0.331384
+vt 0.97736 0.329695
+vt 0.976958 0.356012
+vt 0.973591 0.343753
+vt 0.979672 0.342725
+vt 0.320524 0.146521
+vt 0.313373 0.154734
+vt 0.2954 0.141698
+vt 0.292003 0.150779
+vt 0.28884 0.159589
+vt 0.322801 0.167455
+vt 0.333528 0.159639
+vt 0.345648 0.151344
+vt 0.301572 0.170503
+vt 0.31486 0.174313
+vt 0.297761 0.178302
+vt 0.309891 0.180957
+vt 0.286147 0.16786
+vt 0.284108 0.176013
+vt 0.296019 0.186221
+vt 0.282908 0.18447
+vt 0.282346 0.192959
+vt 0.307895 0.195694
+vt 0.308085 0.188134
+vt 0.295125 0.202337
+vt 0.307775 0.203492
+vt 0.294766 0.209933
+vt 0.307347 0.211107
+vt 0.28222 0.201212
+vt 0.28215 0.209032
+vt 0.294211 0.217025
+vt 0.281754 0.216221
+vt 0.281328 0.222946
+vt 0.304681 0.22447
+vt 0.306231 0.218115
+vt 0.292675 0.229646
+vt 0.302952 0.230124
+vt 0.281165 0.229372
+vt 0.305534 0.681974
+vt 0.306111 0.650222
+vt 0.342004 0.687835
+vt 0.333707 0.612749
+vt 0.302684 0.616894
+vt 0.325885 0.580428
+vt 0.298274 0.585332
+vt 0.395357 0.618639
+vt 0.374414 0.634434
+vt 0.380969 0.58713
+vt 0.358816 0.601672
+vt 0.349578 0.570739
+vt 0.384513 0.681504
+vt 0.395957 0.664926
+vt 0.416814 0.706905
+vt 0.420474 0.694282
+vt 0.409745 0.650149
+vt 0.377758 0.701684
+vt 0.413154 0.719528
+vt 0.338053 0.278233
+vt 0.345863 0.28289
+vt 0.333606 0.290848
+vt 0.34226 0.309133
+vt 0.341074 0.298701
+vt 0.351677 0.302345
+vt 0.349731 0.291219
+vt 0.332267 0.302652
+vt 0.332584 0.313884
+vt 0.387723 0.422423
+vt 0.382162 0.422373
+vt 0.388869 0.417427
+vt 0.382859 0.417822
+vt 0.377549 0.413541
+vt 0.384244 0.41338
+vt 0.377169 0.417955
+vt 0.377116 0.426931
+vt 0.381664 0.426972
+vt 0.377432 0.431442
+vt 0.381856 0.431562
+vt 0.386531 0.427438
+vt 0.367249 0.422554
+vt 0.372123 0.422513
+vt 0.368692 0.426806
+vt 0.372857 0.426712
+vt 0.373317 0.430853
+vt 0.371782 0.41828
+vt 0.37118 0.414106
+vt 0.366505 0.41823
+vt 0.426004 0.390403
+vt 0.398456 0.389437
+vt 0.424679 0.386349
+vt 0.396424 0.386762
+vt 0.363793 0.388475
+vt 0.394705 0.384351
+vt 0.356716 0.386401
+vt 0.378598 0.396645
+vt 0.375308 0.393171
+vt 0.401577 0.396345
+vt 0.401115 0.392641
+vt 0.428621 0.394516
+vt 0.353918 0.396416
+vt 0.339672 0.402446
+vt 0.345629 0.394106
+vt 0.329812 0.40053
+vt 0.35953 0.398947
+vt 0.33321 0.391382
+vt 0.318726 0.38845
+vt 0.313563 0.397303
+vt 0.476084 0.423195
+vt 0.45045 0.417502
+vt 0.486588 0.412986
+vt 0.456271 0.406372
+vt 0.430198 0.413801
+vt 0.423791 0.426121
+vt 0.43912 0.429517
+vt 0.507665 0.429493
+vt 0.542219 0.436093
+vt 0.475452 0.439099
+vt 0.495356 0.444524
+vt 0.525393 0.419675
+vt 0.43331 0.443713
+vt 0.441295 0.448141
+vt 0.411039 0.45059
+vt 0.411532 0.454564
+vt 0.448493 0.452954
+vt 0.423753 0.440055
+vt 0.413961 0.436871
+vt 0.407352 0.447798
+vt 0.31374 0.40806
+vt 0.32634 0.410021
+vt 0.317907 0.421046
+vt 0.299322 0.405773
+vt 0.325214 0.421259
+vt 0.338474 0.421652
+vt 0.3353 0.411329
+vt 0.344846 0.421904
+vt 0.34232 0.412499
+vt 0.355509 0.44277
+vt 0.342793 0.432971
+vt 0.357815 0.441106
+vt 0.346865 0.432004
+vt 0.351296 0.431202
+vt 0.336492 0.43632
+vt 0.339458 0.434504
+vt 0.354149 0.44545
+vt 0.366963 0.45175
+vt 0.367513 0.449587
+vt 0.378353 0.456489
+vt 0.366715 0.454041
+vt 0.379286 0.45488
+vt 0.368666 0.447681
+vt 0.369978 0.445476
+vt 0.38013 0.451272
+vt 0.379885 0.448331
+vt 0.392918 0.451424
+vt 0.393862 0.45378
+vt 0.390643 0.448651
+vt 0.392502 0.456314
+vt 0.389991 0.458936
+vt 0.434919 0.382651
+vt 0.453791 0.389264
+vt 0.433807 0.382477
+vt 0.442133 0.387347
+vt 0.449541 0.392567
+vt 0.49183 0.400597
+vt 0.464657 0.391608
+vt 0.513113 0.405332
+vt 0.475128 0.394166
+vt 0.436031 0.382826
+vt 0.516095 0.409998
+vt 0.483347 0.40387
+vt 0.551098 0.416497
+vt 0.455111 0.398484
+vt 0.472738 0.503366
+vt 0.465757 0.493643
+vt 0.482409 0.502085
+vt 0.475421 0.491708
+vt 0.456009 0.49597
+vt 0.450804 0.485866
+vt 0.459618 0.483598
+vt 0.474669 0.513513
+vt 0.481402 0.512442
+vt 0.489397 0.512463
+vt 0.46027 0.514483
+vt 0.470459 0.516768
+vt 0.449301 0.508912
+vt 0.450715 0.51651
+vt 0.441643 0.516565
+vt 0.44392 0.523436
+vt 0.44199 0.488134
+vt 0.446094 0.499081
+vt 0.437618 0.505763
+vt 0.409641 0.53125
+vt 0.417486 0.517634
+vt 0.424725 0.549489
+vt 0.428372 0.534587
+vt 0.421164 0.563953
+vt 0.440531 0.58304
+vt 0.440131 0.567599
+vt 0.438076 0.530458
+vt 0.43219 0.518805
+vt 0.42533 0.504018
+vt 0.445266 0.55059
+vt 0.442015 0.535846
+vt 0.453943 0.564519
+vt 0.453765 0.571874
+vt 0.456182 0.585446
+vt 0.459898 0.602127
+vt 0.349013 0.491269
+vt 0.336754 0.508239
+vt 0.331154 0.496224
+vt 0.320148 0.507781
+vt 0.297949 0.508264
+vt 0.308444 0.503271
+vt 0.306838 0.517622
+vt 0.350155 0.524533
+vt 0.35745 0.505005
+vt 0.375656 0.518489
+vt 0.380193 0.499924
+vt 0.366871 0.486313
+vt 0.322305 0.553361
+vt 0.347111 0.545735
+vt 0.371118 0.537055
+vt 0.293484 0.538213
+vt 0.295904 0.558876
+vt 0.288923 0.524031
+vt 0.284152 0.512489
+vt 0.281519 0.354585
+vt 0.27883 0.367232
+vt 0.275331 0.37971
+vt 0.281861 0.37985
+vt 0.288051 0.367559
+vt 0.296239 0.339242
+vt 0.282591 0.341601
+vt 0.310112 0.344766
+vt 0.309416 0.334597
+vt 0.322149 0.338741
+vt 0.321809 0.329248
+vt 0.30337 0.359703
+vt 0.312751 0.351414
+vt 0.309216 0.364244
+vt 0.316363 0.356301
+vt 0.323307 0.345678
+vt 0.295502 0.369475
+vt 0.288392 0.37999
+vt 0.302069 0.372187
+vt 0.281101 0.499747
+vt 0.27934 0.48605
+vt 0.292638 0.497304
+vt 0.28723 0.458265
+vt 0.288173 0.471672
+vt 0.277967 0.456884
+vt 0.278439 0.471644
+vt 0.299751 0.483057
+vt 0.297907 0.471434
+vt 0.309526 0.481102
+vt 0.307642 0.471064
+vt 0.296494 0.459645
+vt 0.302454 0.49435
+vt 0.311411 0.491141
+vt 0.343632 0.320416
+vt 0.333105 0.324778
+vt 0.333814 0.334838
+vt 0.363585 0.310039
+vt 0.366431 0.327171
+vt 0.353718 0.315393
+vt 0.355806 0.329495
+vt 0.346397 0.343469
+vt 0.357889 0.343783
+vt 0.347817 0.354895
+vt 0.35997 0.358164
+vt 0.369277 0.344302
+vt 0.334695 0.343565
+vt 0.335664 0.351626
+vt 0.379291 0.400052
+vt 0.363918 0.402333
+vt 0.367243 0.406178
+vt 0.353116 0.407088
+vt 0.35919 0.410239
+vt 0.41091 0.404675
+vt 0.399926 0.408536
+vt 0.397021 0.400516
+vt 0.390935 0.404866
+vt 0.37815 0.408898
+vt 0.386805 0.409108
+vt 0.393279 0.411364
+vt 0.363338 0.412821
+vt 0.369666 0.410088
+vt 0.350438 0.422105
+vt 0.355707 0.430165
+vt 0.363148 0.437257
+vt 0.365676 0.434559
+vt 0.360087 0.428972
+vt 0.349098 0.414046
+vt 0.35544 0.415713
+vt 0.361911 0.422489
+vt 0.36115 0.417241
+vt 0.368224 0.432108
+vt 0.364426 0.427706
+vt 0.379285 0.445126
+vt 0.388009 0.44487
+vt 0.385408 0.440532
+vt 0.396797 0.441249
+vt 0.391983 0.437144
+vt 0.371003 0.442416
+vt 0.371902 0.438797
+vt 0.377944 0.436209
+vt 0.372836 0.434912
+vt 0.388047 0.433887
+vt 0.38323 0.436089
+vt 0.411804 0.424136
+vt 0.414761 0.413476
+vt 0.40336 0.414868
+vt 0.405273 0.433865
+vt 0.397822 0.431174
+vt 0.394183 0.422617
+vt 0.391746 0.428935
+vt 0.395216 0.416313
+vt 0.775924 0.375151
+vt 0.775728 0.388743
+vt 0.746453 0.386032
+vt 0.747928 0.395308
+vt 0.717395 0.406114
+vt 0.713072 0.403611
+vt 0.744823 0.376573
+vt 0.749092 0.40422
+vt 0.749645 0.415483
+vt 0.72449 0.414957
+vt 0.72621 0.424666
+vt 0.7756 0.399248
+vt 0.70368 0.428763
+vt 0.687208 0.443737
+vt 0.697322 0.42707
+vt 0.677635 0.445158
+vt 0.707038 0.434775
+vt 0.68965 0.428163
+vt 0.681322 0.43065
+vt 0.666675 0.448773
+vt 0.663815 0.460707
+vt 0.651924 0.464538
+vt 0.630412 0.480156
+vt 0.639991 0.468922
+vt 0.642366 0.477595
+vt 0.66825 0.472074
+vt 0.675624 0.457981
+vt 0.68415 0.468328
+vt 0.689031 0.45406
+vt 0.649396 0.488552
+vt 0.664408 0.486592
+vt 0.681197 0.484039
+vt 0.634969 0.490079
+vt 0.620833 0.49139
+vt 0.912385 0.481335
+vt 0.932278 0.480511
+vt 0.952544 0.479369
+vt 0.933883 0.461959
+vt 0.919103 0.464149
+vt 0.869741 0.465154
+vt 0.887953 0.466437
+vt 0.873151 0.480724
+vt 0.893241 0.481526
+vt 0.865824 0.437779
+vt 0.874945 0.43437
+vt 0.878921 0.451884
+vt 0.891561 0.450226
+vt 0.864248 0.450831
+vt 0.915223 0.44455
+vt 0.903554 0.447624
+vt 0.884353 0.430602
+vt 0.854087 0.418075
+vt 0.860227 0.412749
+vt 0.833101 0.395604
+vt 0.866565 0.407739
+vt 0.836568 0.388337
+vt 0.826949 0.411924
+vt 0.848341 0.424031
+vt 0.824944 0.4236
+vt 0.84502 0.432484
+vt 0.803376 0.3921
+vt 0.802127 0.402739
+vt 0.801153 0.415145
+vt 0.804904 0.380706
+vt 0.806571 0.368936
+vt 0.702688 0.462561
+vt 0.700952 0.48073
+vt 0.723247 0.47733
+vt 0.725766 0.438352
+vt 0.705712 0.446642
+vt 0.74847 0.451919
+vt 0.749286 0.431814
+vt 0.774044 0.450536
+vt 0.774704 0.42899
+vt 0.747651 0.474504
+vt 0.773403 0.473071
+vt 0.799984 0.45176
+vt 0.799741 0.47385
+vt 0.825727 0.475971
+vt 0.824826 0.438046
+vt 0.800446 0.431839
+vt 0.848903 0.461112
+vt 0.846154 0.445298
+vt 0.850422 0.478567
+vt 0.62364 0.273336
+vt 0.631303 0.268742
+vt 0.633284 0.279074
+vt 0.640023 0.273125
+vt 0.639784 0.263935
+vt 0.632769 0.258173
+vt 0.623739 0.264647
+vt 0.600029 0.280268
+vt 0.598688 0.270903
+vt 0.613376 0.277111
+vt 0.610984 0.269667
+vt 0.621969 0.284647
+vt 0.621719 0.253201
+vt 0.613611 0.261924
+vt 0.599928 0.261159
+vt 0.670072 0.278979
+vt 0.653616 0.276716
+vt 0.669832 0.267969
+vt 0.654614 0.268283
+vt 0.648422 0.284838
+vt 0.690213 0.290189
+vt 0.687377 0.279381
+vt 0.68627 0.268024
+vt 0.683842 0.304224
+vt 0.697203 0.300525
+vt 0.645124 0.311795
+vt 0.656458 0.301386
+vt 0.663002 0.320505
+vt 0.672639 0.311009
+vt 0.637949 0.292669
+vt 0.625515 0.302527
+vt 0.64648 0.25088
+vt 0.652498 0.259521
+vt 0.667905 0.256541
+vt 0.65209 0.231125
+vt 0.635774 0.242815
+vt 0.639961 0.22002
+vt 0.62248 0.233347
+vt 0.677556 0.227123
+vt 0.666981 0.218489
+vt 0.656893 0.207226
+vt 0.684642 0.243963
+vt 0.689474 0.232327
+vt 0.684466 0.25604
+vt 0.602642 0.176918
+vt 0.580529 0.190632
+vt 0.589953 0.161017
+vt 0.566382 0.1753
+vt 0.557833 0.204588
+vt 0.573039 0.21849
+vt 0.594645 0.205765
+vt 0.635672 0.179448
+vt 0.62466 0.163426
+vt 0.613523 0.146733
+vt 0.646437 0.19413
+vt 0.627782 0.207015
+vt 0.588272 0.232651
+vt 0.608699 0.2205
+vt 0.608211 0.373618
+vt 0.616832 0.356454
+vt 0.634284 0.378082
+vt 0.640225 0.361705
+vt 0.646724 0.346174
+vt 0.603899 0.333518
+vt 0.593087 0.35113
+vt 0.582299 0.32801
+vt 0.570201 0.34546
+vt 0.582138 0.369154
+vt 0.635039 0.324678
+vt 0.614437 0.31673
+vt 0.593411 0.310569
+vt 0.65434 0.332333
+vt 0.561644 0.296106
+vt 0.564966 0.285457
+vt 0.580542 0.294418
+vt 0.583116 0.283009
+vt 0.582766 0.272899
+vt 0.546607 0.278466
+vt 0.547908 0.287734
+vt 0.528899 0.281331
+vt 0.53186 0.290461
+vt 0.545646 0.297066
+vt 0.561955 0.266155
+vt 0.543727 0.269714
+vt 0.555322 0.257166
+vt 0.537387 0.262094
+vt 0.526554 0.27266
+vt 0.581699 0.262937
+vt 0.576957 0.252641
+vt 0.565582 0.241532
+vt 0.543702 0.248829
+vt 0.550357 0.230094
+vt 0.496387 0.262785
+vt 0.509284 0.25153
+vt 0.513745 0.264403
+vt 0.525705 0.256225
+vt 0.51058 0.233141
+vt 0.488727 0.247432
+vt 0.491672 0.225616
+vt 0.466102 0.243633
+vt 0.473932 0.261753
+vt 0.534066 0.218808
+vt 0.517242 0.2076
+vt 0.516335 0.345283
+vt 0.530115 0.332372
+vt 0.5362 0.354987
+vt 0.549386 0.339171
+vt 0.561842 0.32354
+vt 0.526052 0.315178
+vt 0.511862 0.325172
+vt 0.511906 0.308506
+vt 0.496234 0.31623
+vt 0.49647 0.33558
+vt 0.537837 0.306011
+vt 0.553916 0.307529
+vt 0.525234 0.302122
+vt 0.572839 0.308275
+vt 0.511827 0.28428
+vt 0.509974 0.274041
+vt 0.492244 0.274874
+vt 0.501435 0.298913
+vt 0.516739 0.294259
+vt 0.458569 0.293763
+vt 0.476889 0.290416
+vt 0.467587 0.30982
+vt 0.484835 0.304206
+vt 0.471621 0.276173
+vt 0.449551 0.277706
+vt 0.79029 0.672484
+vt 0.810842 0.6624
+vt 0.788538 0.66243
+vt 0.832155 0.647948
+vt 0.833088 0.669026
+vt 0.767347 0.664111
+vt 0.749725 0.673721
+vt 0.741816 0.650008
+vt 0.85403 0.66365
+vt 0.868881 0.657298
+vt 0.85539 0.645025
+vt 0.888074 0.648027
+vt 0.874212 0.676013
+vt 0.886607 0.695639
+vt 0.89598 0.672443
+vt 0.929571 0.681021
+vt 0.72496 0.675077
+vt 0.707382 0.674426
+vt 0.711754 0.695525
+vt 0.674442 0.675309
+vt 0.715494 0.652696
+vt 0.851241 0.674754
+vt 0.83861 0.688008
+vt 0.813905 0.681083
+vt 0.795054 0.69679
+vt 0.803314 0.729031
+vt 0.821237 0.705648
+vt 0.847489 0.712022
+vt 0.863108 0.691149
+vt 0.734908 0.685353
+vt 0.751974 0.699565
+vt 0.699285 0.735409
+vt 0.729177 0.71566
+vt 0.752628 0.735708
+vt 0.775609 0.715256
+vt 0.771295 0.685081
+vt 0.892477 0.736968
+vt 0.891992 0.760942
+vt 0.924257 0.752441
+vt 0.883161 0.790753
+vt 0.862406 0.753942
+vt 0.813012 0.768991
+vt 0.847704 0.781855
+vt 0.824711 0.812689
+vt 0.703179 0.776568
+vt 0.71878 0.801121
+vt 0.75314 0.781706
+vt 0.750148 0.828139
+vt 0.674567 0.808938
+vt 0.785894 0.799205
+vt 0.832437 0.740517
+vt 0.726143 0.758511
+vt 0.908808 0.716733
+vt 0.780855 0.754707
+vt 0.872961 0.724451
+vt 0.947275 0.716911
+vt 0.972864 0.691703
+vt 0.956423 0.743409
+vt 0.170337 0.155827
+vt 0.136076 0.187112
+vt 0.172299 0.21177
+vt 0.0899929 0.213591
+vt 0.10853 0.16596
+vt 0.195445 0.192426
+vt 0.220169 0.183556
+vt 0.218642 0.212212
+vt 0.204829 0.229447
+vt 0.228379 0.234767
+vt 0.205687 0.259413
+vt 0.137176 0.927417
+vt 0.140528 0.948093
+vt 0.168383 0.944741
+vt 0.141913 0.961891
+vt 0.11077 0.946594
+vt 0.878967 0.954757
+vt 0.879367 0.938048
+vt 0.853203 0.941375
+vt 0.880712 0.92523
+vt 0.906681 0.938262
+vt 0.989055 0.9382
+vt 0.912651 0.957127
+vt 0.886501 1.00065
+vt 0.775015 0.959635
+vt 0.84864 0.929044
+vt 0.877392 0.895069
+vt 0.91222 0.923978
+vt 0.849069 0.962192
+vt 0.254826 0.925518
+vt 0.170213 0.919654
+vt 0.126415 0.872706
+vt 0.0190149 0.939185
+vt 0.100899 0.922774
+vt 0.142077 0.988088
+vt 0.17534 0.957124
+vt 0.106027 0.960245
+vt 0.148988 0.242126
+vt 0.176354 0.291487
+vt 0.102893 0.237993
+vt 0.044388 0.5061
+vt 0.0504472 0.534124
+vt 0.0221481 0.572717
+vt 0.0611958 0.546374
+vt 0.0719578 0.52396
+vt 0.0576334 0.571748
+vt 0.0664284 0.60486
+vt 0.0810156 0.564522
+vt 0.0825732 0.542567
+vt 0.101769 0.534189
+vt 0.944378 0.797731
+vt 0.915109 0.794589
+vt 0.905965 0.833567
+vt 0.842874 0.862029
+vt 0.791851 0.848437
+vt 0.748723 0.889561
+vt 0.703598 0.850293
+vt 0.642603 0.8718
+vt 0.843173 0.630902
+vt 0.84458 0.638283
+vt 0.846408 0.629484
+vt 0.71498 0.637314
+vt 0.726461 0.64589
+vt 0.734897 0.643187
+vt 0.865004 0.824698
+vt 0.962666 0.771065
+vt 0.94749 0.766441
+vt 0.268705 0.710477
+vt 0.214528 0.765549
+vt 0.268705 0.80475
+vt 0.191977 0.815455
+vt 0.127915 0.732152
+vt 0.268814 0.267129
+vt 0.238957 0.294344
+vt 0.268669 0.328244
+vt 0.215045 0.317962
+vt 0.269051 0.235581
+vt 0.241893 0.247542
+vt 0.652939 0.625528
+vt 0.689461 0.648127
+vt 0.870091 0.634597
+vt 0.889967 0.621532
+vt 0.917129 0.645104
+vt 0.960269 0.639284
+vt 0.9614 0.662237
+vt 0.981019 0.639244
+vt 0.270276 0.136874
+vt 0.232631 0.163206
+vt 0.269409 0.175133
+vt 0.242681 0.194855
+vt 0.269141 0.208639
+vt 0.244466 0.223601
+vt 0.189521 0.653078
+vt 0.113276 0.681659
+vt 0.170828 0.555621
+vt 0.268705 0.586687
+vt 0.197069 0.290309
+vt 0.147015 0.418787
+vt 0.159401 0.426978
+vt 0.169209 0.419346
+vt 0.166865 0.4343
+vt 0.151569 0.435731
+vt 0.101359 0.388325
+vt 0.164076 0.395797
+vt 0.253799 0.393043
+vt 0.188648 0.408382
+vt 0.10719 0.406598
+vt 0.0810976 0.440196
+vt 0.135792 0.449792
+vt 0.137248 0.466241
+vt -0.0522421 0.436952
+vt 0.203585 0.425014
+vt 0.183198 0.455456
+vt 0.176955 0.4435
+vt 0.158284 0.457002
+vt 0.0646922 0.403325
+vt 0.0969534 0.389119
+vt 0.127418 0.569795
+vt 0.101838 0.557019
+vt 0.115165 0.496827
+vt 0.0856225 0.479205
+vt 0.0876161 0.508163
+vt 0.224113 0.501179
+vt 0.216094 0.528531
+vt 0.268704 0.514179
+vt 0.152679 0.481359
+vt 0.268801 0.37957
+vt 0.241478 0.353126
+vt 0.242729 0.379892
+vt 0.214493 0.347926
+vt 0.247642 0.484503
+vt 0.231652 0.461026
+vt 0.268704 0.455503
+vt 0.192219 0.333104
+vt 0.164806 0.367802
+vt 0.156354 0.409665
+vt 0.179537 0.42641
+vt 0.158914 0.444672
+vt 0.1348 0.428233
+vt 0.776574 0.644712
+vt 0.722394 0.585817
+vt 0.651799 0.541213
+vt 0.694305 0.554226
+vt 0.775203 0.583316
+vt 0.656336 0.526524
+vt 0.611254 0.502624
+vt 0.680604 0.50058
+vt 0.971205 0.496778
+vt 0.903217 0.52798
+vt 0.896582 0.566601
+vt 0.859045 0.553539
+vt 0.874064 0.496896
+vt 0.829393 0.590056
+vt 0.725299 0.540094
+vt 0.773136 0.497495
+vt 0.8247 0.537119
+vt 0.64197 0.737033
+vt 0.61964 0.737363
+vt 0.604328 0.759732
+vt 0.608299 0.714718
+vt 0.667128 0.712276
+vt 0.663413 0.762979
+vt 0.546984 0.81774
+vt 0.617742 0.813781
+vt 0.624356 0.66752
+vt 0.55686 0.648558
+vt 0.568421 0.733816
+vt 0.529143 0.743889
+vt 0.538817 0.714517
+vt 0.531598 0.768055
+vt 0.445833 0.7485
+vt 0.547123 0.691597
+vt 0.481156 0.686856
+vt 0.499408 0.724217
+vt 0.796428 0.328877
+vt 0.8169 0.338119
+vt 0.838683 0.330654
+vt 0.838689 0.3517
+vt 0.793926 0.338777
+vt 0.773732 0.337889
+vt 0.74951 0.352736
+vt 0.755441 0.328792
+vt 0.859512 0.335574
+vt 0.874845 0.341757
+vt 0.878929 0.322664
+vt 0.894264 0.350808
+vt 0.86172 0.354306
+vt 0.890121 0.302638
+vt 0.900633 0.325513
+vt 0.932192 0.315548
+vt 0.730562 0.326947
+vt 0.713106 0.328223
+vt 0.72296 0.350316
+vt 0.678736 0.328632
+vt 0.716325 0.307131
+vt 0.856285 0.324517
+vt 0.84324 0.311687
+vt 0.81933 0.31943
+vt 0.799782 0.304286
+vt 0.806322 0.271776
+vt 0.825244 0.294637
+vt 0.850879 0.287575
+vt 0.86722 0.30781
+vt 0.740091 0.316932
+vt 0.756566 0.302501
+vt 0.702583 0.267919
+vt 0.73303 0.286789
+vt 0.75555 0.26631
+vt 0.779434 0.286256
+vt 0.776741 0.316735
+vt 0.894098 0.262064
+vt 0.892695 0.238422
+vt 0.863735 0.245736
+vt 0.882937 0.208649
+vt 0.924796 0.246563
+vt 0.814013 0.231601
+vt 0.847948 0.218052
+vt 0.823544 0.187373
+vt 0.704382 0.227027
+vt 0.718515 0.201921
+vt 0.673705 0.195595
+vt 0.748091 0.173595
+vt 0.753811 0.220319
+vt 0.785435 0.201844
+vt 0.834654 0.259639
+vt 0.728096 0.244245
+vt 0.910872 0.281409
+vt 0.782742 0.246644
+vt 0.87545 0.274707
+vt 0.948004 0.28051
+vt 0.956452 0.255796
+vt 0.972736 0.303635
+vt 0.370772 0.156168
+vt 0.404072 0.188535
+vt 0.432688 0.16816
+vt 0.449944 0.216643
+vt 0.366322 0.212187
+vt 0.343097 0.19204
+vt 0.319164 0.211948
+vt 0.318073 0.183115
+vt 0.332918 0.229442
+vt 0.33175 0.259955
+vt 0.309225 0.234833
+vt 0.632664 0.937599
+vt 0.629313 0.958275
+vt 0.659071 0.956775
+vt 0.627927 0.972073
+vt 0.601457 0.954923
+vt 0.374133 0.932158
+vt 0.374091 0.913509
+vt 0.349193 0.91288
+vt 0.374675 0.900396
+vt 0.400674 0.918841
+vt 0.275352 0.917971
+vt 0.344112 0.934726
+vt 0.371547 0.982105
+vt 0.480772 0.938676
+vt 0.406833 0.906276
+vt 0.38004 0.872706
+vt 0.345731 0.899719
+vt 0.405214 0.941284
+vt 0.515015 0.935699
+vt 0.599629 0.929832
+vt 0.643425 0.882887
+vt 0.750826 0.949367
+vt 0.668941 0.932959
+vt 0.627764 0.99827
+vt 0.594501 0.967304
+vt 0.663814 0.97043
+vt 0.389421 0.243699
+vt 0.436255 0.240856
+vt 0.360739 0.292908
+vt 0.479266 0.621214
+vt 0.485524 0.569542
+vt 0.463254 0.566666
+vt 0.48531 0.54359
+vt 0.519446 0.53819
+vt 0.495549 0.530351
+vt 0.47602 0.523291
+vt 0.496385 0.522841
+vt 0.465093 0.54022
+vt 0.446797 0.527765
+vt 0.944666 0.201943
+vt 0.914837 0.204675
+vt 0.904503 0.165124
+vt 0.839442 0.136903
+vt 0.788702 0.151899
+vt 0.742974 0.111175
+vt 0.700099 0.152559
+vt 0.637093 0.13245
+vt 0.850315 0.368043
+vt 0.851223 0.360798
+vt 0.852982 0.368837
+vt 0.723976 0.366596
+vt 0.734474 0.357174
+vt 0.742904 0.359347
+vt 0.863449 0.174562
+vt 0.963467 0.230405
+vt 0.947856 0.233441
+vt 0.322881 0.76555
+vt 0.409494 0.732152
+vt 0.345432 0.815455
+vt 0.298143 0.294421
+vt 0.321942 0.31772
+vt 0.295585 0.24761
+vt 0.660357 0.382546
+vt 0.69659 0.356392
+vt 0.876132 0.364721
+vt 0.893585 0.379
+vt 0.921054 0.3528
+vt 0.961877 0.356785
+vt 0.962698 0.33339
+vt 0.981984 0.355755
+vt 0.306768 0.162912
+vt 0.295194 0.194359
+vt 0.29317 0.223456
+vt 0.347888 0.653078
+vt 0.366581 0.55562
+vt 0.424133 0.681659
+vt 0.340033 0.290817
+vt 0.388965 0.413938
+vt 0.377097 0.422446
+vt 0.385455 0.431061
+vt 0.370091 0.429971
+vt 0.366833 0.415002
+vt 0.432694 0.382302
+vt 0.370508 0.391231
+vt 0.427991 0.40079
+vt 0.346746 0.40443
+vt 0.280737 0.3905
+vt 0.456111 0.433947
+vt 0.589083 0.427663
+vt 0.401629 0.461384
+vt 0.402075 0.444828
+vt 0.332854 0.421481
+vt 0.360577 0.439441
+vt 0.355077 0.451593
+vt 0.380059 0.45258
+vt 0.470234 0.396534
+vt 0.437143 0.383
+vt 0.468433 0.48133
+vt 0.462264 0.506664
+vt 0.433175 0.490402
+vt 0.401796 0.544866
+vt 0.440576 0.55139
+vt 0.313296 0.501179
+vt 0.321314 0.528529
+vt 0.38473 0.481358
+vt 0.295899 0.353213
+vt 0.32351 0.348357
+vt 0.294922 0.38013
+vt 0.289767 0.484501
+vt 0.305757 0.461026
+vt 0.344952 0.331857
+vt 0.372123 0.361433
+vt 0.379071 0.404986
+vt 0.356951 0.422334
+vt 0.378668 0.44021
+vt 0.40174 0.423148
+vt 0.776574 0.349534
+vt 0.721657 0.410555
+vt 0.77532 0.411729
+vt 0.694232 0.4441
+vt 0.649571 0.457689
+vt 0.655529 0.474973
+vt 0.903209 0.465704
+vt 0.85855 0.440351
+vt 0.896562 0.42714
+vt 0.829462 0.404087
+vt 0.725094 0.457517
+vt 0.824792 0.456782
+vt 0.644474 0.268488
+vt 0.619495 0.269287
+vt 0.604207 0.292645
+vt 0.603235 0.247789
+vt 0.669742 0.291872
+vt 0.665491 0.242026
+vt 0.542812 0.189584
+vt 0.615129 0.192251
+vt 0.625589 0.340224
+vt 0.556065 0.36469
+vt 0.56464 0.275636
+vt 0.534275 0.296732
+vt 0.525704 0.266294
+vt 0.528338 0.24086
+vt 0.440532 0.261649
+vt 0.542934 0.319725
+vt 0.476605 0.325877
+vt 0.495044 0.287182
+f 739/1 735/2 736/3
+f 189/4 736/3 735/2
+f 192/5 738/6 737/7
+f 739/1 737/7 738/6
+f 190/8 741/9 740/10
+f 743/11 740/10 741/9
+f 736/3 189/4 743/11
+f 742/12 743/11 189/4
+f 745/13 747/14 191/15
+f 744/16 191/15 747/14
+f 746/17 747/14 190/8
+f 741/9 190/8 747/14
+f 748/18 192/5 750/19
+f 737/7 750/19 192/5
+f 191/15 749/20 745/13
+f 750/19 745/13 749/20
+f 752/21 754/22 194/23
+f 751/24 194/23 754/22
+f 753/25 189/4 754/22
+f 735/2 754/22 189/4
+f 195/26 756/27 755/28
+f 758/29 755/28 756/27
+f 194/23 757/30 752/21
+f 758/29 752/21 757/30
+f 759/31 196/32 762/33
+f 760/34 762/33 196/32
+f 761/35 762/33 195/26
+f 756/27 195/26 762/33
+f 764/36 742/12 753/25
+f 189/4 753/25 742/12
+f 196/32 763/37 760/34
+f 764/36 760/34 763/37
+f 769/38 765/39 766/40
+f 198/41 766/40 765/39
+f 768/42 769/38 201/43
+f 767/44 201/43 769/38
+f 771/45 773/46 199/47
+f 770/48 199/47 773/46
+f 772/49 773/46 198/41
+f 766/40 198/41 773/46
+f 200/50 775/51 774/52
+f 777/53 774/52 775/51
+f 199/47 776/54 771/45
+f 777/53 771/45 776/54
+f 201/43 767/44 778/55
+f 780/56 778/55 767/44
+f 779/57 780/56 200/50
+f 775/51 200/50 780/56
+f 782/58 785/59 203/60
+f 781/61 203/60 785/59
+f 784/62 785/59 205/63
+f 783/64 205/63 785/59
+f 200/50 786/65 779/57
+f 788/66 779/57 786/65
+f 203/60 787/67 782/58
+f 788/66 782/58 787/67
+f 790/68 791/69 204/70
+f 789/71 204/70 791/69
+f 774/52 791/69 200/50
+f 786/65 200/50 791/69
+f 205/63 783/64 792/72
+f 794/73 792/72 783/64
+f 204/70 793/74 790/68
+f 794/73 790/68 793/74
+f 796/75 799/76 207/77
+f 795/78 207/77 799/76
+f 210/79 798/80 797/81
+f 799/76 797/81 798/80
+f 208/82 801/83 800/84
+f 803/85 800/84 801/83
+f 207/77 802/86 796/75
+f 803/85 796/75 802/86
+f 209/87 805/88 804/89
+f 807/90 804/89 805/88
+f 806/91 807/90 208/82
+f 801/83 208/82 807/90
+f 797/81 810/92 210/79
+f 808/93 210/79 810/92
+f 809/94 810/92 209/87
+f 805/88 209/87 810/92
+f 201/43 811/95 768/42
+f 814/96 768/42 811/95
+f 214/97 813/98 812/99
+f 814/96 812/99 813/98
+f 816/100 817/101 212/102
+f 815/103 212/102 817/101
+f 778/55 817/101 201/43
+f 811/95 201/43 817/101
+f 213/104 819/105 818/106
+f 821/107 818/106 819/105
+f 212/102 820/108 816/100
+f 821/107 816/100 820/108
+f 812/99 824/109 214/97
+f 822/110 214/97 824/109
+f 823/111 824/109 213/104
+f 819/105 213/104 824/109
+f 825/112 828/113 192/5
+f 738/6 192/5 828/113
+f 827/114 828/113 217/115
+f 826/116 217/115 828/113
+f 213/104 829/117 823/111
+f 830/118 823/111 829/117
+f 830/118 825/112 748/18
+f 192/5 748/18 825/112
+f 832/119 833/120 216/121
+f 831/122 216/121 833/120
+f 818/106 833/120 213/104
+f 829/117 213/104 833/120
+f 217/115 826/116 834/123
+f 836/124 834/123 826/116
+f 216/121 835/125 832/119
+f 836/124 832/119 835/125
+f 219/126 838/127 837/128
+f 841/129 837/128 838/127
+f 839/130 221/131 841/129
+f 840/132 841/129 221/131
+f 842/133 844/134 216/121
+f 835/125 216/121 844/134
+f 843/135 844/134 219/126
+f 838/127 219/126 844/134
+f 847/136 845/137 846/138
+f 220/139 846/138 845/137
+f 216/121 831/122 842/133
+f 847/136 842/133 831/122
+f 850/140 848/141 839/130
+f 221/131 839/130 848/141
+f 846/138 220/139 850/140
+f 849/142 850/140 220/139
+f 223/143 852/144 851/145
+f 854/146 851/145 852/144
+f 203/60 781/61 853/147
+f 854/146 853/147 781/61
+f 220/139 855/148 849/142
+f 857/149 849/142 855/148
+f 852/144 223/143 857/149
+f 856/150 857/149 223/143
+f 820/108 212/102 859/151
+f 858/152 859/151 212/102
+f 845/137 859/151 220/139
+f 855/148 220/139 859/151
+f 787/67 203/60 860/153
+f 853/147 860/153 203/60
+f 212/102 815/103 858/152
+f 860/153 858/152 815/103
+f 225/154 862/155 861/156
+f 864/157 861/156 862/155
+f 207/77 795/78 863/158
+f 864/157 863/158 795/78
+f 866/159 868/160 226/161
+f 865/162 226/161 868/160
+f 867/163 868/160 225/154
+f 862/155 225/154 868/160
+f 227/164 870/165 869/166
+f 872/167 869/166 870/165
+f 226/161 871/168 866/159
+f 872/167 866/159 871/168
+f 863/158 874/169 207/77
+f 802/86 207/77 874/169
+f 873/170 874/169 227/164
+f 870/165 227/164 874/169
+f 876/171 879/172 229/173
+f 875/174 229/173 879/172
+f 877/175 231/176 879/172
+f 878/177 879/172 231/176
+f 227/164 880/178 873/170
+f 882/179 873/170 880/178
+f 229/173 881/180 876/171
+f 882/179 876/171 881/180
+f 884/181 885/182 230/183
+f 883/184 230/183 885/182
+f 869/166 885/182 227/164
+f 880/178 227/164 885/182
+f 888/185 886/186 877/175
+f 231/176 877/175 886/186
+f 230/183 887/187 884/181
+f 888/185 884/181 887/187
+f 892/188 889/189 890/190
+f 233/191 890/190 889/189
+f 892/188 891/192 837/128
+f 219/126 837/128 891/192
+f 893/193 895/194 230/183
+f 887/187 230/183 895/194
+f 894/195 895/194 233/191
+f 890/190 233/191 895/194
+f 234/196 897/197 896/198
+f 898/199 896/198 897/197
+f 230/183 883/184 893/193
+f 898/199 893/193 883/184
+f 900/200 843/135 891/192
+f 219/126 891/192 843/135
+f 899/201 900/200 234/196
+f 897/197 234/196 900/200
+f 217/115 901/202 827/114
+f 903/203 827/114 901/202
+f 194/23 751/24 902/204
+f 903/203 902/204 751/24
+f 234/196 904/205 899/201
+f 905/206 899/201 904/205
+f 834/123 905/206 217/115
+f 901/202 217/115 905/206
+f 906/207 907/208 226/161
+f 871/168 226/161 907/208
+f 896/198 907/208 234/196
+f 904/205 234/196 907/208
+f 908/209 757/30 902/204
+f 194/23 902/204 757/30
+f 226/161 865/162 906/207
+f 908/209 906/207 865/162
+f 910/210 913/211 237/212
+f 909/213 237/212 913/211
+f 912/214 913/211 240/215
+f 911/216 240/215 913/211
+f 238/217 915/218 914/219
+f 917/220 914/219 915/218
+f 917/220 910/210 916/221
+f 237/212 916/221 910/210
+f 918/222 239/223 921/224
+f 919/225 921/224 239/223
+f 920/226 921/224 238/217
+f 915/218 238/217 921/224
+f 240/215 911/216 922/227
+f 924/228 922/227 911/216
+f 239/223 923/229 919/225
+f 924/228 919/225 923/229
+f 926/230 929/231 242/232
+f 925/233 242/232 929/231
+f 929/231 927/234 928/235
+f 244/236 928/235 927/234
+f 239/223 930/237 923/229
+f 932/238 923/229 930/237
+f 242/232 931/239 926/230
+f 932/238 926/230 931/239
+f 934/240 935/241 243/242
+f 933/243 243/242 935/241
+f 918/222 935/241 239/223
+f 930/237 239/223 935/241
+f 938/244 936/245 927/234
+f 244/236 927/234 936/245
+f 243/242 937/246 934/240
+f 938/244 934/240 937/246
+f 246/247 940/248 939/249
+f 943/250 939/249 940/248
+f 249/251 942/252 941/253
+f 943/250 941/253 942/252
+f 945/254 947/255 247/256
+f 944/257 247/256 947/255
+f 946/258 947/255 246/247
+f 940/248 246/247 947/255
+f 951/259 948/260 949/261
+f 248/262 949/261 948/260
+f 247/256 950/263 945/254
+f 951/259 945/254 950/263
+f 952/264 249/251 954/265
+f 941/253 954/265 249/251
+f 953/266 954/265 248/262
+f 949/261 248/262 954/265
+f 955/267 958/268 244/236
+f 928/235 244/236 958/268
+f 957/269 958/268 252/270
+f 956/271 252/270 958/268
+f 251/272 960/273 959/274
+f 961/275 959/274 960/273
+f 244/236 936/245 955/267
+f 961/275 955/267 936/245
+f 962/276 964/277 247/256
+f 950/263 247/256 964/277
+f 963/278 964/277 251/272
+f 960/273 251/272 964/277
+f 252/270 956/271 965/279
+f 966/280 965/279 956/271
+f 247/256 944/257 962/276
+f 966/280 962/276 944/257
+f 221/131 967/281 840/132
+f 970/282 840/132 967/281
+f 255/283 969/284 968/285
+f 970/282 968/285 969/284
+f 972/286 973/287 254/288
+f 971/289 254/288 973/287
+f 848/141 973/287 221/131
+f 967/281 221/131 973/287
+f 976/290 931/239 974/291
+f 242/232 974/291 931/239
+f 254/288 975/292 972/286
+f 976/290 972/286 975/292
+f 968/285 978/293 255/283
+f 977/294 255/283 978/293
+f 974/291 242/232 978/293
+f 925/233 978/293 242/232
+f 980/295 982/296 257/297
+f 979/298 257/297 982/296
+f 886/186 982/296 231/176
+f 981/299 231/176 982/296
+f 985/300 946/258 983/301
+f 246/247 983/301 946/258
+f 257/297 984/302 980/295
+f 985/300 980/295 984/302
+f 258/303 987/304 986/305
+f 988/306 986/305 987/304
+f 246/247 939/249 983/301
+f 988/306 983/301 939/249
+f 231/176 981/299 878/177
+f 990/307 878/177 981/299
+f 990/307 987/304 989/308
+f 258/303 989/308 987/304
+f 991/309 260/310 994/311
+f 992/312 994/311 260/310
+f 792/72 994/311 205/63
+f 993/313 205/63 994/311
+f 916/221 237/212 997/314
+f 995/315 997/314 237/212
+f 260/310 996/316 992/312
+f 997/314 992/312 996/316
+f 999/317 1000/318 261/319
+f 998/320 261/319 1000/318
+f 237/212 909/213 995/315
+f 1000/318 995/315 909/213
+f 205/63 993/313 784/62
+f 1002/321 784/62 993/313
+f 1002/321 999/317 1001/322
+f 261/319 1001/322 999/317
+f 1003/323 1005/324 257/297
+f 984/302 257/297 1005/324
+f 965/279 1005/324 252/270
+f 1004/325 252/270 1005/324
+f 233/191 1006/326 894/195
+f 1007/327 894/195 1006/326
+f 257/297 979/298 1003/323
+f 1007/327 1003/323 979/298
+f 1008/328 1009/329 255/283
+f 969/284 255/283 1009/329
+f 889/189 1009/329 233/191
+f 1006/326 233/191 1009/329
+f 252/270 1004/325 957/269
+f 1010/330 957/269 1004/325
+f 255/283 977/294 1008/328
+f 1010/330 1008/328 977/294
+f 223/143 1011/331 856/150
+f 1013/332 856/150 1011/331
+f 254/288 971/289 1012/333
+f 1013/332 1012/333 971/289
+f 1014/334 1015/335 261/319
+f 1001/322 261/319 1015/335
+f 851/145 1015/335 223/143
+f 1011/331 223/143 1015/335
+f 240/215 1016/336 912/214
+f 1017/337 912/214 1016/336
+f 261/319 998/320 1014/334
+f 1017/337 1014/334 998/320
+f 1012/333 1018/338 254/288
+f 975/292 254/288 1018/338
+f 922/227 1018/338 240/215
+f 1016/336 240/215 1018/338
+f 1019/339 265/340 1022/341
+f 1020/342 1022/341 265/340
+f 1022/341 1021/343 991/309
+f 260/310 991/309 1021/343
+f 1023/344 266/345 1026/346
+f 1024/347 1026/346 266/345
+f 1025/348 1026/346 265/340
+f 1020/342 265/340 1026/346
+f 1030/349 1027/350 1028/351
+f 267/352 1028/351 1027/350
+f 266/345 1029/353 1024/347
+f 1030/349 1024/347 1029/353
+f 1021/343 1032/354 260/310
+f 996/316 260/310 1032/354
+f 1028/351 267/352 1032/354
+f 1031/355 1032/354 267/352
+f 269/356 1034/357 1033/358
+f 1036/359 1033/358 1034/357
+f 1036/359 1035/360 1023/361
+f 266/362 1023/361 1035/360
+f 1038/363 1040/364 270/365
+f 1037/366 270/365 1040/364
+f 1039/367 1040/364 269/356
+f 1034/357 269/356 1040/364
+f 1044/368 1041/369 1042/370
+f 271/371 1042/370 1041/369
+f 270/365 1043/372 1038/363
+f 1044/368 1038/363 1043/372
+f 1035/360 1046/373 266/362
+f 1029/374 266/362 1046/373
+f 1042/370 271/371 1046/373
+f 1045/375 1046/373 271/371
+f 1048/376 1050/377 273/378
+f 1047/379 273/378 1050/377
+f 1033/358 1050/377 269/356
+f 1049/380 269/356 1050/377
+f 274/381 1052/382 1051/383
+f 1054/384 1051/383 1052/382
+f 273/378 1053/385 1048/376
+f 1054/384 1048/376 1053/385
+f 1056/386 1058/387 275/388
+f 1055/389 275/388 1058/387
+f 1057/390 1058/387 274/381
+f 1052/382 274/381 1058/387
+f 269/356 1049/380 1039/367
+f 1060/391 1039/367 1049/380
+f 275/388 1059/392 1056/386
+f 1060/391 1056/386 1059/392
+f 1061/393 1064/394 275/388
+f 1059/392 275/388 1064/394
+f 1063/395 1064/394 279/396
+f 1062/397 279/396 1064/394
+f 277/398 1066/399 1065/400
+f 1067/401 1065/400 1066/399
+f 275/388 1055/389 1061/393
+f 1067/401 1061/393 1055/389
+f 278/402 1069/403 1068/404
+f 1071/405 1068/404 1069/403
+f 1070/406 1071/405 277/398
+f 1066/399 277/398 1071/405
+f 279/396 1062/397 1072/407
+f 1074/408 1072/407 1062/397
+f 1073/409 1074/408 278/402
+f 1069/403 278/402 1074/408
+f 1076/410 1079/411 281/412
+f 1075/413 281/412 1079/411
+f 1077/414 284/415 1079/411
+f 1078/416 1079/411 284/415
+f 1081/417 1083/418 282/419
+f 1080/420 282/419 1083/418
+f 281/412 1082/421 1076/410
+f 1083/418 1076/410 1082/421
+f 283/422 1085/423 1084/424
+f 1087/425 1084/424 1085/423
+f 282/419 1086/426 1081/417
+f 1087/425 1081/417 1086/426
+f 284/415 1077/414 1088/427
+f 1090/428 1088/427 1077/414
+f 1090/428 1085/423 1089/429
+f 283/422 1089/429 1085/423
+f 1092/430 1095/431 286/432
+f 1091/433 286/432 1095/431
+f 1095/431 1093/434 1094/435
+f 289/436 1094/435 1093/434
+f 1097/437 1099/438 287/439
+f 1096/440 287/439 1099/438
+f 286/432 1098/441 1092/430
+f 1099/438 1092/430 1098/441
+f 288/442 1101/443 1100/444
+f 1103/445 1100/444 1101/443
+f 287/439 1102/446 1097/437
+f 1103/445 1097/437 1102/446
+f 289/436 1093/434 1104/447
+f 1106/448 1104/447 1093/434
+f 1101/443 288/442 1106/448
+f 1105/449 1106/448 288/442
+f 225/450 1107/451 867/452
+f 1110/453 867/452 1107/451
+f 292/454 1109/455 1108/456
+f 1110/453 1108/456 1109/455
+f 1112/457 1113/458 291/459
+f 1111/460 291/459 1113/458
+f 861/461 1113/458 225/450
+f 1107/451 225/450 1113/458
+f 289/436 1114/462 1094/435
+f 1116/463 1094/435 1114/462
+f 291/459 1115/464 1112/457
+f 1116/463 1112/457 1115/464
+f 1108/456 1118/465 292/454
+f 1117/466 292/454 1118/465
+f 289/436 1104/447 1114/462
+f 1118/465 1114/462 1104/447
+f 1120/467 1123/468 294/469
+f 1119/470 294/469 1123/468
+f 1122/471 1123/468 296/472
+f 1121/473 296/472 1123/468
+f 1127/474 1124/475 1125/476
+f 295/477 1125/476 1124/475
+f 294/469 1126/478 1120/467
+f 1127/474 1120/467 1126/478
+f 1128/479 1130/480 287/439
+f 1102/446 287/439 1130/480
+f 295/477 1129/481 1125/476
+f 1130/480 1125/476 1129/481
+f 296/472 1121/473 1131/482
+f 1132/483 1131/482 1121/473
+f 287/439 1096/440 1128/479
+f 1132/483 1128/479 1096/440
+f 1133/484 1135/485 292/454
+f 1109/455 292/454 1135/485
+f 755/486 1135/485 195/487
+f 1134/488 195/487 1135/485
+f 288/442 1136/489 1105/449
+f 1137/490 1105/449 1136/489
+f 292/454 1117/466 1133/484
+f 1137/490 1133/484 1117/466
+f 295/477 1138/491 1129/481
+f 1139/492 1129/481 1138/491
+f 1136/489 288/442 1139/492
+f 1100/444 1139/492 288/442
+f 195/487 1134/488 761/493
+f 1140/494 761/493 1134/488
+f 295/477 1124/475 1138/491
+f 1140/494 1138/491 1124/475
+f 1143/495 1122/471 1141/496
+f 296/472 1141/496 1122/471
+f 1143/495 1142/497 808/498
+f 210/499 808/498 1142/497
+f 1144/500 1145/501 286/432
+f 1098/441 286/432 1145/501
+f 1141/496 296/472 1145/501
+f 1131/482 1145/501 296/472
+f 291/459 1146/502 1115/464
+f 1147/503 1115/464 1146/502
+f 286/432 1091/433 1144/500
+f 1147/503 1144/500 1091/433
+f 1142/497 1148/504 210/499
+f 798/505 210/499 1148/504
+f 1146/502 291/459 1148/504
+f 1111/460 1148/504 291/459
+f 300/506 1150/507 1149/508
+f 1152/509 1149/508 1150/507
+f 214/510 822/511 1151/512
+f 1152/509 1151/512 822/511
+f 1153/513 1155/514 281/412
+f 1082/421 281/412 1155/514
+f 1150/507 300/506 1155/514
+f 1154/515 1155/514 300/506
+f 301/516 1157/517 1156/518
+f 1158/519 1156/518 1157/517
+f 281/412 1075/413 1153/513
+f 1158/519 1153/513 1075/413
+f 1151/512 1160/520 214/510
+f 813/521 214/510 1160/520
+f 1157/517 301/516 1160/520
+f 1159/522 1160/520 301/516
+f 198/523 1161/524 772/525
+f 1164/526 772/525 1161/524
+f 1164/526 1162/527 1163/528
+f 303/529 1163/528 1162/527
+f 1165/530 1166/531 301/516
+f 1159/522 301/516 1166/531
+f 765/532 1166/531 198/523
+f 1161/524 198/523 1166/531
+f 284/415 1167/533 1078/416
+f 1168/534 1078/416 1167/533
+f 301/516 1156/518 1165/530
+f 1168/534 1165/530 1156/518
+f 1162/527 1170/535 303/529
+f 1169/536 303/529 1170/535
+f 1167/533 284/415 1170/535
+f 1088/427 1170/535 284/415
+f 305/537 1172/538 1171/539
+f 1174/540 1171/539 1172/538
+f 191/541 744/542 1173/543
+f 1174/540 1173/543 744/542
+f 1175/544 1177/545 282/419
+f 1086/426 282/419 1177/545
+f 1176/546 1177/545 305/537
+f 1172/538 305/537 1177/545
+f 300/506 1178/547 1154/515
+f 1179/548 1154/515 1178/547
+f 282/419 1080/420 1175/544
+f 1179/548 1175/544 1080/420
+f 1173/543 1180/549 191/541
+f 749/550 191/541 1180/549
+f 1149/508 1180/549 300/506
+f 1178/547 300/506 1180/549
+f 1181/551 1184/552 303/529
+f 1163/528 303/529 1184/552
+f 1183/553 1184/552 307/554
+f 1182/555 307/554 1184/552
+f 283/422 1185/556 1089/429
+f 1186/557 1089/429 1185/556
+f 303/529 1169/536 1181/551
+f 1186/557 1181/551 1169/536
+f 305/537 1187/558 1176/546
+f 1188/559 1176/546 1187/558
+f 283/422 1084/424 1185/556
+f 1188/559 1185/556 1084/424
+f 307/554 1182/555 1189/560
+f 1190/561 1189/560 1182/555
+f 1171/539 1190/561 305/537
+f 1187/558 305/537 1190/561
+f 1192/562 1194/563 309/564
+f 1191/565 309/564 1194/563
+f 1072/407 1194/563 279/396
+f 1193/566 279/396 1194/563
+f 1195/567 310/568 1198/569
+f 1196/570 1198/569 310/568
+f 1192/562 309/564 1198/569
+f 1197/571 1198/569 309/564
+f 1202/572 1199/573 1200/574
+f 311/575 1200/574 1199/573
+f 310/568 1201/576 1196/570
+f 1202/572 1196/570 1201/576
+f 1203/577 1205/578 270/365
+f 1043/372 270/365 1205/578
+f 1200/574 311/575 1205/578
+f 1204/579 1205/578 311/575
+f 279/396 1193/566 1063/395
+f 1206/580 1063/395 1193/566
+f 270/365 1037/366 1203/577
+f 1206/580 1203/577 1037/366
+f 1208/581 1211/582 313/583
+f 1207/584 313/583 1211/582
+f 1210/585 1211/582 316/586
+f 1209/587 316/586 1211/582
+f 314/588 1213/589 1212/590
+f 1215/591 1212/590 1213/589
+f 313/583 1214/592 1208/581
+f 1215/591 1208/581 1214/592
+f 315/593 1217/594 1216/595
+f 1219/596 1216/595 1217/594
+f 1219/596 1213/589 1218/597
+f 314/588 1218/597 1213/589
+f 1209/587 1222/598 316/586
+f 1220/599 316/586 1222/598
+f 1221/600 1222/598 315/593
+f 1217/594 315/593 1222/598
+f 1223/601 1225/602 310/603
+f 1201/604 310/603 1225/602
+f 1212/590 1225/602 314/588
+f 1224/605 314/588 1225/602
+f 318/606 1227/607 1226/608
+f 1228/609 1226/608 1227/607
+f 1228/609 1223/601 1195/610
+f 310/603 1195/610 1223/601
+f 1230/611 1232/612 319/613
+f 1229/614 319/613 1232/612
+f 1231/615 1232/612 318/606
+f 1227/607 318/606 1232/612
+f 314/588 1224/605 1218/597
+f 1234/616 1218/597 1224/605
+f 319/613 1233/617 1230/611
+f 1234/616 1230/611 1233/617
+f 1236/618 1238/619 321/620
+f 1235/621 321/620 1238/619
+f 1237/622 319/613 1238/619
+f 1229/614 1238/619 319/613
+f 322/623 1240/624 1239/625
+f 1242/626 1239/625 1240/624
+f 321/620 1241/627 1236/618
+f 1242/626 1236/618 1241/627
+f 315/593 1243/628 1221/600
+f 1245/629 1221/600 1243/628
+f 1240/624 322/623 1245/629
+f 1244/630 1245/629 322/623
+f 319/613 1237/622 1233/617
+f 1246/631 1233/617 1237/622
+f 1246/631 1243/628 1216/595
+f 315/593 1216/595 1243/628
+f 324/632 1248/633 1247/634
+f 1251/635 1247/634 1248/633
+f 326/636 1250/637 1249/638
+f 1251/635 1249/638 1250/637
+f 1253/639 1255/640 325/641
+f 1252/642 325/641 1255/640
+f 1254/643 1255/640 324/632
+f 1248/633 324/632 1255/640
+f 238/217 1256/644 920/226
+f 1258/645 920/226 1256/644
+f 325/641 1257/646 1253/639
+f 1258/645 1253/639 1257/646
+f 1249/638 1260/647 326/636
+f 1259/648 326/636 1260/647
+f 914/219 1260/647 238/217
+f 1256/644 238/217 1260/647
+f 1265/649 1261/650 1262/651
+f 328/652 1262/651 1261/650
+f 330/653 1264/654 1263/655
+f 1265/649 1263/655 1264/654
+f 1267/656 1269/657 329/658
+f 1266/659 329/658 1269/657
+f 1262/651 328/652 1269/657
+f 1268/660 1269/657 328/652
+f 251/272 1270/661 963/278
+f 1272/662 963/278 1270/661
+f 1272/662 1267/656 1271/663
+f 329/658 1271/663 1267/656
+f 1263/655 1274/664 330/653
+f 1273/665 330/653 1274/664
+f 959/274 1274/664 251/272
+f 1270/661 251/272 1274/664
+f 1278/666 1275/667 1276/668
+f 332/669 1276/668 1275/667
+f 329/658 1266/659 1277/670
+f 1278/666 1277/670 1266/659
+f 1280/671 1282/672 333/673
+f 1279/674 333/673 1282/672
+f 1276/668 332/669 1282/672
+f 1281/675 1282/672 332/669
+f 1285/676 953/266 1283/677
+f 248/262 1283/677 953/266
+f 333/673 1284/678 1280/671
+f 1285/676 1280/671 1284/678
+f 1271/663 329/658 1286/679
+f 1277/670 1286/679 329/658
+f 948/260 1286/679 248/262
+f 1283/677 248/262 1286/679
+f 335/680 1288/681 1287/682
+f 1291/683 1287/682 1288/681
+f 337/684 1290/685 1289/686
+f 1291/683 1289/686 1290/685
+f 1293/687 1295/688 336/689
+f 1292/690 336/689 1295/688
+f 1288/681 335/680 1295/688
+f 1294/691 1295/688 335/680
+f 1298/692 1183/693 1296/694
+f 307/695 1296/694 1183/693
+f 1298/692 1293/687 1297/696
+f 336/689 1297/696 1293/687
+f 1289/686 1300/697 337/684
+f 1299/698 337/684 1300/697
+f 1296/694 307/695 1300/697
+f 1189/699 1300/697 307/695
+f 1302/700 1305/701 339/702
+f 1301/703 339/702 1305/701
+f 1304/704 1305/701 341/705
+f 1303/706 341/705 1305/701
+f 340/707 1307/708 1306/709
+f 1309/710 1306/709 1307/708
+f 1309/710 1302/700 1308/711
+f 339/702 1308/711 1302/700
+f 1310/712 1312/713 294/714
+f 1126/715 294/714 1312/713
+f 1307/708 340/707 1312/713
+f 1311/716 1312/713 340/707
+f 341/705 1303/706 1313/717
+f 1314/718 1313/717 1303/706
+f 294/714 1119/719 1310/712
+f 1314/718 1310/712 1119/719
+f 1318/720 1315/721 1316/722
+f 343/723 1316/722 1315/721
+f 325/641 1252/642 1317/724
+f 1318/720 1317/724 1252/642
+f 1319/725 1321/726 330/653
+f 1264/654 330/653 1321/726
+f 1316/722 343/723 1321/726
+f 1320/727 1321/726 343/723
+f 243/242 1322/728 937/246
+f 1323/729 937/246 1322/728
+f 1323/729 1319/725 1273/665
+f 330/653 1273/665 1319/725
+f 1317/724 1324/730 325/641
+f 1257/646 325/641 1324/730
+f 933/243 1324/730 243/242
+f 1322/728 243/242 1324/730
+f 345/731 1326/732 1325/733
+f 1329/734 1325/733 1326/732
+f 346/735 1328/736 1327/737
+f 1329/734 1327/737 1328/736
+f 1330/738 1332/739 326/636
+f 1250/637 326/636 1332/739
+f 1331/740 1332/739 345/731
+f 1326/732 345/731 1332/739
+f 267/352 1333/741 1031/355
+f 1334/742 1031/355 1333/741
+f 326/636 1259/648 1330/738
+f 1334/742 1330/738 1259/648
+f 1335/743 346/735 1336/744
+f 1327/737 1336/744 346/735
+f 1027/350 1336/744 267/352
+f 1333/741 267/352 1336/744
+f 1341/745 1337/746 1338/747
+f 348/748 1338/747 1337/746
+f 349/749 1340/750 1339/751
+f 1341/745 1339/751 1340/750
+f 1342/752 1344/753 346/754
+f 1328/755 346/754 1344/753
+f 1338/747 348/748 1344/753
+f 1343/756 1344/753 348/748
+f 271/757 1345/758 1045/759
+f 1346/760 1045/759 1345/758
+f 1346/760 1342/752 1335/761
+f 346/754 1335/761 1342/752
+f 349/749 1339/751 1347/762
+f 1348/763 1347/762 1339/751
+f 1041/764 1348/763 271/757
+f 1345/758 271/757 1348/763
+f 1353/765 1349/766 1350/767
+f 351/768 1350/767 1349/766
+f 354/769 1352/770 1351/771
+f 1353/765 1351/771 1352/770
+f 1355/772 1357/773 352/774
+f 1354/775 352/774 1357/773
+f 1350/767 351/768 1357/773
+f 1356/776 1357/773 351/768
+f 353/777 1359/778 1358/779
+f 1361/780 1358/779 1359/778
+f 352/774 1360/781 1355/772
+f 1361/780 1355/772 1360/781
+f 1351/771 1364/782 354/769
+f 1362/783 354/769 1364/782
+f 1363/784 1364/782 353/777
+f 1359/778 353/777 1364/782
+f 1365/785 356/786 1369/787
+f 1366/788 1369/787 356/786
+f 1369/787 1367/789 1368/790
+f 357/791 1368/790 1367/789
+f 1370/792 1372/793 354/769
+f 1352/770 354/769 1372/793
+f 356/786 1371/794 1366/788
+f 1372/793 1366/788 1371/794
+f 1374/795 1073/409 1373/796
+f 278/402 1373/796 1073/409
+f 354/769 1362/783 1370/792
+f 1374/795 1370/792 1362/783
+f 1367/789 1376/797 357/791
+f 1375/798 357/791 1376/797
+f 278/402 1068/404 1373/796
+f 1376/797 1373/796 1068/404
+f 1381/799 1377/800 1378/801
+f 359/802 1378/801 1377/800
+f 360/803 1380/804 1379/805
+f 1381/799 1379/805 1380/804
+f 1384/806 1304/704 1382/807
+f 341/705 1382/807 1304/704
+f 1378/801 359/802 1384/806
+f 1383/808 1384/806 359/802
+f 209/87 1385/809 809/94
+f 1386/810 809/94 1385/809
+f 1382/807 341/705 1386/810
+f 1313/717 1386/810 341/705
+f 1379/805 1388/811 360/803
+f 1387/812 360/803 1388/811
+f 804/89 1388/811 209/87
+f 1385/809 209/87 1388/811
+f 1390/813 1392/814 362/815
+f 1389/816 362/815 1392/814
+f 1391/817 336/689 1392/814
+f 1292/690 1392/814 336/689
+f 363/818 1394/819 1393/820
+f 1396/821 1393/820 1394/819
+f 1396/821 1390/813 1395/822
+f 362/815 1395/822 1390/813
+f 1397/823 1399/824 199/47
+f 776/54 199/47 1399/824
+f 1398/825 1399/824 363/818
+f 1394/819 363/818 1399/824
+f 1400/826 1297/696 1391/817
+f 336/689 1391/817 1297/696
+f 199/47 770/48 1397/823
+f 1400/826 1397/823 770/48
+f 1402/827 1404/828 365/829
+f 1401/830 365/829 1404/828
+f 1403/831 363/818 1404/828
+f 1393/820 1404/828 363/818
+f 366/832 1406/833 1405/834
+f 1408/835 1405/834 1406/833
+f 1408/835 1402/827 1407/836
+f 365/829 1407/836 1402/827
+f 1409/837 1411/838 204/70
+f 793/74 204/70 1411/838
+f 1410/839 1411/838 366/832
+f 1406/833 366/832 1411/838
+f 363/818 1403/831 1398/825
+f 1412/840 1398/825 1403/831
+f 204/70 789/71 1409/837
+f 1412/840 1409/837 789/71
+f 1414/841 1416/842 368/843
+f 1413/844 368/843 1416/842
+f 1405/834 1416/842 366/832
+f 1415/845 366/832 1416/842
+f 1418/846 1420/847 369/848
+f 1417/849 369/848 1420/847
+f 1419/850 1420/847 368/843
+f 1414/841 368/843 1420/847
+f 265/340 1421/851 1025/348
+f 1423/852 1025/348 1421/851
+f 1423/852 1418/846 1422/853
+f 369/848 1422/853 1418/846
+f 366/832 1415/845 1410/839
+f 1424/854 1410/839 1415/845
+f 1424/854 1421/851 1019/339
+f 265/340 1019/339 1421/851
+f 371/855 1426/856 1425/857
+f 1428/858 1425/857 1426/856
+f 369/859 1417/860 1427/861
+f 1428/858 1427/861 1417/860
+f 372/862 1430/863 1429/864
+f 1432/865 1429/864 1430/863
+f 1431/866 1432/865 371/855
+f 1426/856 371/855 1432/865
+f 1433/867 1435/868 273/378
+f 1053/385 273/378 1435/868
+f 1434/869 1435/868 372/862
+f 1430/863 372/862 1435/868
+f 1427/861 1436/870 369/859
+f 1422/871 369/859 1436/870
+f 273/378 1047/379 1433/867
+f 1436/870 1433/867 1047/379
+f 1438/872 1440/873 374/874
+f 1437/875 374/874 1440/873
+f 1429/864 1440/873 372/862
+f 1439/876 372/862 1440/873
+f 375/877 1442/878 1441/879
+f 1444/880 1441/879 1442/878
+f 1444/880 1438/872 1443/881
+f 374/874 1443/881 1438/872
+f 1445/882 1447/883 274/381
+f 1057/390 274/381 1447/883
+f 1446/884 1447/883 375/877
+f 1442/878 375/877 1447/883
+f 372/862 1439/876 1434/869
+f 1448/885 1434/869 1439/876
+f 274/381 1051/383 1445/882
+f 1448/885 1445/882 1051/383
+f 1452/886 1449/887 1450/888
+f 377/889 1450/888 1449/887
+f 1451/890 375/877 1452/886
+f 1441/879 1452/886 375/877
+f 1453/891 1455/892 357/791
+f 1368/790 357/791 1455/892
+f 1450/888 377/889 1455/892
+f 1454/893 1455/892 377/889
+f 277/398 1456/894 1070/406
+f 1457/895 1070/406 1456/894
+f 357/791 1375/798 1453/891
+f 1457/895 1453/891 1375/798
+f 1446/884 375/877 1458/896
+f 1451/890 1458/896 375/877
+f 1065/400 1458/896 277/398
+f 1456/894 277/398 1458/896
+f 1462/897 1340/750 1459/898
+f 349/749 1459/898 1340/750
+f 1461/899 1462/897 380/900
+f 1460/901 380/900 1462/897
+f 1464/902 1204/903 1463/904
+f 311/905 1463/904 1204/903
+f 1459/898 349/749 1464/902
+f 1347/762 1464/902 349/749
+f 1465/906 1466/907 313/908
+f 1214/909 313/908 1466/907
+f 1199/910 1466/907 311/905
+f 1463/904 311/905 1466/907
+f 379/911 1468/912 1467/913
+f 1469/914 1467/913 1468/912
+f 313/908 1207/915 1465/906
+f 1469/914 1465/906 1207/915
+f 380/900 1460/901 1470/916
+f 1472/917 1470/916 1460/901
+f 1472/917 1468/912 1471/918
+f 379/911 1471/918 1468/912
+f 1363/784 353/777 1475/919
+f 1473/920 1475/919 353/777
+f 1191/565 1475/919 309/564
+f 1474/921 309/564 1475/919
+f 1477/922 1478/923 382/924
+f 1476/925 382/924 1478/923
+f 1358/779 1478/923 353/777
+f 1473/920 353/777 1478/923
+f 309/564 1474/921 1197/571
+f 1480/926 1197/571 1474/921
+f 382/924 1479/927 1477/922
+f 1480/926 1477/922 1479/927
+f 384/928 1482/929 1481/930
+f 1485/931 1481/930 1482/929
+f 387/932 1484/933 1483/934
+f 1485/931 1483/934 1484/933
+f 1487/935 1489/936 385/937
+f 1486/938 385/937 1489/936
+f 1488/939 1489/936 384/928
+f 1482/929 384/928 1489/936
+f 386/940 1491/941 1490/942
+f 1493/943 1490/942 1491/941
+f 385/937 1492/944 1487/935
+f 1493/943 1487/935 1492/944
+f 1483/934 1496/945 387/932
+f 1494/946 387/932 1496/945
+f 1491/941 386/940 1496/945
+f 1495/947 1496/945 386/940
+f 1501/948 1497/949 1498/950
+f 389/951 1498/950 1497/949
+f 392/952 1500/953 1499/954
+f 1501/948 1499/954 1500/953
+f 1503/955 1505/956 390/957
+f 1502/958 390/957 1505/956
+f 1498/950 389/951 1505/956
+f 1504/959 1505/956 389/951
+f 1507/960 1509/961 391/962
+f 1506/963 391/962 1509/961
+f 1509/961 1503/955 1508/964
+f 390/957 1508/964 1503/955
+f 1499/954 1512/965 392/952
+f 1510/966 392/952 1512/965
+f 391/962 1511/967 1507/960
+f 1512/965 1507/960 1511/967
+f 1513/968 394/969 1517/970
+f 1514/971 1517/970 394/969
+f 1516/972 1517/970 397/973
+f 1515/974 397/973 1517/970
+f 1521/975 1518/976 1519/977
+f 395/978 1519/977 1518/976
+f 1514/971 394/969 1521/975
+f 1520/979 1521/975 394/969
+f 1523/980 1525/981 396/982
+f 1522/983 396/982 1525/981
+f 1519/977 395/978 1525/981
+f 1524/984 1525/981 395/978
+f 397/973 1515/974 1526/985
+f 1528/986 1526/985 1515/974
+f 1528/986 1523/980 1527/987
+f 396/982 1527/987 1523/980
+f 399/988 1530/989 1529/990
+f 1532/991 1529/990 1530/989
+f 1502/958 1532/991 390/957
+f 1531/992 390/957 1532/991
+f 1534/993 1536/994 400/995
+f 1533/996 400/995 1536/994
+f 399/988 1535/997 1530/989
+f 1536/994 1530/989 1535/997
+f 1540/998 1537/999 1538/1000
+f 401/1001 1538/1000 1537/999
+f 400/995 1539/1002 1534/993
+f 1540/998 1534/993 1539/1002
+f 1531/992 1542/1003 390/957
+f 1508/964 390/957 1542/1003
+f 1538/1000 401/1001 1542/1003
+f 1541/1004 1542/1003 401/1001
+f 403/1005 1544/1006 1543/1007
+f 1546/1008 1543/1007 1544/1006
+f 1546/1008 1545/1009 1533/996
+f 400/995 1533/996 1545/1009
+f 395/978 1547/1010 1524/984
+f 1549/1011 1524/984 1547/1010
+f 1549/1011 1544/1006 1548/1012
+f 403/1005 1548/1012 1544/1006
+f 1551/1013 1552/1014 404/1015
+f 1550/1016 404/1015 1552/1014
+f 1518/976 1552/1014 395/978
+f 1547/1010 395/978 1552/1014
+f 1545/1009 1554/1017 400/995
+f 1539/1002 400/995 1554/1017
+f 1554/1017 1551/1013 1553/1018
+f 404/1015 1553/1018 1551/1013
+f 1555/1019 1558/1020 392/952
+f 1500/953 392/952 1558/1020
+f 407/1021 1557/1022 1556/1023
+f 1558/1020 1556/1023 1557/1022
+f 397/973 1559/1024 1516/972
+f 1560/1025 1516/972 1559/1024
+f 392/952 1510/966 1555/1019
+f 1560/1025 1555/1019 1510/966
+f 406/1026 1562/1027 1561/1028
+f 1563/1029 1561/1028 1562/1027
+f 1526/985 1563/1029 397/973
+f 1559/1024 397/973 1563/1029
+f 1556/1023 1566/1030 407/1021
+f 1564/1031 407/1021 1566/1030
+f 1565/1032 1566/1030 406/1026
+f 1562/1027 406/1026 1566/1030
+f 1567/1033 1570/1034 407/1035
+f 1557/1036 407/1035 1570/1034
+f 1568/1037 410/1038 1570/1034
+f 1569/1039 1570/1034 410/1038
+f 409/1040 1572/1041 1571/1042
+f 1573/1043 1571/1042 1572/1041
+f 407/1035 1564/1044 1567/1033
+f 1573/1043 1567/1033 1564/1044
+f 1574/1045 1576/1046 321/620
+f 1241/627 321/620 1576/1046
+f 1575/1047 1576/1046 409/1040
+f 1572/1041 409/1040 1576/1046
+f 318/606 1577/1048 1231/615
+f 1578/1049 1231/615 1577/1048
+f 321/620 1235/621 1574/1045
+f 1578/1049 1574/1045 1235/621
+f 1568/1037 1580/1050 410/1038
+f 1579/1051 410/1038 1580/1050
+f 1226/608 1580/1050 318/606
+f 1577/1048 318/606 1580/1050
+f 1581/1052 412/1053 1584/1054
+f 1582/1055 1584/1054 412/1053
+f 1561/1056 1584/1054 406/1057
+f 1583/1058 406/1057 1584/1054
+f 1587/1059 1210/585 1585/1060
+f 316/586 1585/1060 1210/585
+f 1587/1059 1582/1055 1586/1061
+f 412/1053 1586/1061 1582/1055
+f 322/623 1588/1062 1244/630
+f 1589/1063 1244/630 1588/1062
+f 1589/1063 1585/1060 1220/599
+f 316/586 1220/599 1585/1060
+f 1591/1064 1575/1047 1590/1065
+f 409/1040 1590/1065 1575/1047
+f 322/623 1239/625 1588/1062
+f 1591/1064 1588/1062 1239/625
+f 406/1057 1583/1058 1565/1066
+f 1592/1067 1565/1066 1583/1058
+f 409/1040 1571/1042 1590/1065
+f 1592/1067 1590/1065 1571/1042
+f 414/1068 1594/1069 1593/1070
+f 1596/1071 1593/1070 1594/1069
+f 396/1072 1522/1073 1595/1074
+f 1596/1071 1595/1074 1522/1073
+f 415/1075 1598/1076 1597/1077
+f 1600/1078 1597/1077 1598/1076
+f 1600/1078 1594/1069 1599/1079
+f 414/1068 1599/1079 1594/1069
+f 1601/1080 1603/1081 379/911
+f 1471/918 379/911 1603/1081
+f 1602/1082 1603/1081 415/1075
+f 1598/1076 415/1075 1603/1081
+f 1605/1083 1586/1084 1604/1085
+f 412/1086 1604/1085 1586/1084
+f 379/911 1467/913 1601/1080
+f 1605/1083 1601/1080 1467/913
+f 1527/1087 396/1072 1606/1088
+f 1595/1074 1606/1088 396/1072
+f 1604/1085 412/1086 1606/1088
+f 1581/1089 1606/1088 412/1086
+f 1608/1090 1611/1091 417/1092
+f 1607/1093 417/1092 1611/1091
+f 1609/1094 419/1095 1611/1091
+f 1610/1096 1611/1091 419/1095
+f 1612/1097 1614/1098 399/1099
+f 1535/1100 399/1099 1614/1098
+f 1614/1098 1608/1090 1613/1101
+f 417/1092 1613/1101 1608/1090
+f 418/1102 1616/1103 1615/1104
+f 1617/1105 1615/1104 1616/1103
+f 1612/1097 399/1099 1617/1105
+f 1529/1106 1617/1105 399/1099
+f 1618/1107 1620/1108 352/774
+f 1360/781 352/774 1620/1108
+f 1619/1109 1620/1108 418/1102
+f 1616/1103 418/1102 1620/1108
+f 419/1095 1609/1094 1621/1110
+f 1622/1111 1621/1110 1609/1094
+f 352/774 1354/775 1618/1107
+f 1622/1111 1618/1107 1354/775
+f 414/1068 1623/1112 1599/1079
+f 1626/1113 1599/1079 1623/1112
+f 1625/1114 1626/1113 421/1115
+f 1624/1116 421/1115 1626/1113
+f 1548/1117 403/1118 1628/1119
+f 1627/1120 1628/1119 403/1118
+f 1593/1070 1628/1119 414/1068
+f 1623/1112 414/1068 1628/1119
+f 1629/1121 1630/1122 417/1123
+f 1613/1124 417/1123 1630/1122
+f 1627/1120 403/1118 1630/1122
+f 1543/1125 1630/1122 403/1118
+f 1632/1126 1631/1127 1624/1116
+f 421/1115 1624/1116 1631/1127
+f 417/1123 1607/1128 1629/1121
+f 1632/1126 1629/1121 1607/1128
+f 418/1102 1633/1129 1619/1109
+f 1635/1130 1619/1109 1633/1129
+f 382/924 1476/925 1634/1131
+f 1635/1130 1634/1131 1476/925
+f 389/1132 1636/1133 1504/1134
+f 1637/1135 1504/1134 1636/1133
+f 1615/1104 1637/1135 418/1102
+f 1633/1129 418/1102 1637/1135
+f 1638/1136 1639/1137 410/1138
+f 1569/1139 410/1138 1639/1137
+f 1497/1140 1639/1137 389/1132
+f 1636/1133 389/1132 1639/1137
+f 1634/1131 1640/1141 382/924
+f 1479/927 382/924 1640/1141
+f 1579/1142 1640/1141 410/1138
+f 1638/1136 410/1138 1640/1141
+f 1644/1143 1641/1144 1642/1145
+f 424/1146 1642/1145 1641/1144
+f 391/962 1506/963 1643/1147
+f 1644/1143 1643/1147 1506/963
+f 384/928 1645/1148 1488/939
+f 1647/1149 1488/939 1645/1148
+f 424/1146 1646/1150 1642/1145
+f 1647/1149 1642/1145 1646/1150
+f 425/1151 1649/1152 1648/1153
+f 1650/1154 1648/1153 1649/1152
+f 384/928 1481/930 1645/1148
+f 1650/1154 1645/1148 1481/930
+f 1643/1147 1652/1155 391/962
+f 1511/967 391/962 1652/1155
+f 1651/1156 1652/1155 425/1151
+f 1649/1152 425/1151 1652/1155
+f 1653/1157 427/1158 1656/1159
+f 1654/1160 1656/1159 427/1158
+f 1537/999 1656/1159 401/1001
+f 1655/1161 401/1001 1656/1159
+f 385/937 1657/1162 1492/944
+f 1659/1163 1492/944 1657/1162
+f 427/1158 1658/1164 1654/1160
+f 1659/1163 1654/1160 1658/1164
+f 424/1146 1660/1165 1646/1150
+f 1661/1166 1646/1150 1660/1165
+f 385/937 1486/938 1657/1162
+f 1661/1166 1657/1162 1486/938
+f 401/1001 1655/1161 1541/1004
+f 1662/1167 1541/1004 1655/1161
+f 1641/1144 1662/1167 424/1146
+f 1660/1165 424/1146 1662/1167
+f 1666/1168 1663/1169 1664/1170
+f 429/1171 1664/1170 1663/1169
+f 404/1015 1550/1016 1665/1172
+f 1666/1168 1665/1172 1550/1016
+f 386/940 1667/1173 1495/947
+f 1669/1174 1495/947 1667/1173
+f 429/1171 1668/1175 1664/1170
+f 1669/1174 1664/1170 1668/1175
+f 1670/1176 1671/1177 427/1158
+f 1658/1164 427/1158 1671/1177
+f 1667/1173 386/940 1671/1177
+f 1490/942 1671/1177 386/940
+f 1553/1018 404/1015 1672/1178
+f 1665/1172 1672/1178 404/1015
+f 1672/1178 1670/1176 1653/1157
+f 427/1158 1653/1157 1670/1176
+f 1651/1156 425/1151 1675/1179
+f 1673/1180 1675/1179 425/1151
+f 1513/968 1675/1179 394/969
+f 1674/1181 394/969 1675/1179
+f 387/932 1676/1182 1484/933
+f 1677/1183 1484/933 1676/1182
+f 425/1151 1648/1153 1673/1180
+f 1677/1183 1673/1180 1648/1153
+f 429/1171 1678/1184 1668/1175
+f 1679/1185 1668/1175 1678/1184
+f 387/932 1494/946 1676/1182
+f 1679/1185 1676/1182 1494/946
+f 394/969 1674/1181 1520/979
+f 1680/1186 1520/979 1674/1181
+f 1680/1186 1678/1184 1663/1169
+f 429/1171 1663/1169 1678/1184
+f 1681/1187 1684/1188 196/1189
+f 763/1190 196/1189 1684/1188
+f 1683/1191 1684/1188 434/1192
+f 1682/1193 434/1192 1684/1188
+f 1686/1194 1687/1195 432/1196
+f 1685/1197 432/1196 1687/1195
+f 1687/1195 1681/1187 759/1198
+f 196/1189 759/1198 1681/1187
+f 1689/1199 1691/1200 433/1201
+f 1688/1202 433/1201 1691/1200
+f 1691/1200 1686/1194 1690/1203
+f 432/1196 1690/1203 1686/1194
+f 434/1192 1682/1193 1692/1204
+f 1694/1205 1692/1204 1682/1193
+f 1694/1205 1689/1199 1693/1206
+f 433/1201 1693/1206 1689/1199
+f 340/1207 1695/1208 1311/1209
+f 1697/1210 1311/1209 1695/1208
+f 432/1196 1685/1197 1696/1211
+f 1697/1210 1696/1211 1685/1197
+f 1699/1212 1700/1213 436/1214
+f 1698/1215 436/1214 1700/1213
+f 1306/1216 1700/1213 340/1207
+f 1695/1208 340/1207 1700/1213
+f 437/1217 1702/1218 1701/1219
+f 1704/1220 1701/1219 1702/1218
+f 436/1214 1703/1221 1699/1212
+f 1704/1220 1699/1212 1703/1221
+f 1690/1203 432/1196 1706/1222
+f 1696/1211 1706/1222 432/1196
+f 1705/1223 1706/1222 437/1217
+f 1702/1218 437/1217 1706/1222
+f 1707/1224 1710/1225 337/1226
+f 1290/1227 337/1226 1710/1225
+f 1709/1228 1710/1225 441/1229
+f 1708/1230 441/1229 1710/1225
+f 439/1231 1712/1232 1711/1233
+f 1713/1234 1711/1233 1712/1232
+f 337/1226 1299/1235 1707/1224
+f 1713/1234 1707/1224 1299/1235
+f 1715/1236 1717/1237 440/1238
+f 1714/1239 440/1238 1717/1237
+f 1716/1240 1717/1237 439/1231
+f 1712/1232 439/1231 1717/1237
+f 441/1229 1708/1230 1718/1241
+f 1720/1242 1718/1241 1708/1230
+f 440/1238 1719/1243 1715/1236
+f 1720/1242 1715/1236 1719/1243
+f 1723/1244 746/1245 1721/1246
+f 190/1247 1721/1246 746/1245
+f 1722/1248 439/1231 1723/1244
+f 1711/1233 1723/1244 439/1231
+f 1725/1249 1683/1191 1724/1250
+f 434/1192 1724/1250 1683/1191
+f 1721/1246 190/1247 1725/1249
+f 740/1251 1725/1249 190/1247
+f 1728/1252 1726/1253 1727/1254
+f 443/1255 1727/1254 1726/1253
+f 434/1192 1692/1204 1724/1250
+f 1728/1252 1724/1250 1692/1204
+f 1730/1256 1716/1240 1722/1248
+f 439/1231 1722/1248 1716/1240
+f 1727/1254 443/1255 1730/1256
+f 1729/1257 1730/1256 443/1255
+f 445/1258 1732/1259 1731/1260
+f 1734/1261 1731/1260 1732/1259
+f 1701/1219 1734/1261 437/1217
+f 1733/1262 437/1217 1734/1261
+f 446/1263 1736/1264 1735/1265
+f 1738/1266 1735/1265 1736/1264
+f 1737/1267 1738/1266 445/1258
+f 1732/1259 445/1258 1738/1266
+f 1739/1268 1741/1269 433/1201
+f 1693/1206 433/1201 1741/1269
+f 1740/1270 1741/1269 446/1263
+f 1736/1264 446/1263 1741/1269
+f 437/1217 1733/1262 1705/1223
+f 1742/1271 1705/1223 1733/1262
+f 433/1201 1688/1202 1739/1268
+f 1742/1271 1739/1268 1688/1202
+f 1746/1272 1743/1273 1744/1274
+f 448/1275 1744/1274 1743/1273
+f 446/1263 1735/1265 1745/1276
+f 1746/1272 1745/1276 1735/1265
+f 1747/1277 1749/1278 440/1238
+f 1719/1243 440/1238 1749/1278
+f 1744/1274 448/1275 1749/1278
+f 1748/1279 1749/1278 448/1275
+f 443/1255 1750/1280 1729/1257
+f 1751/1281 1729/1257 1750/1280
+f 440/1238 1714/1239 1747/1277
+f 1751/1281 1747/1277 1714/1239
+f 1745/1276 1752/1282 446/1263
+f 1740/1270 446/1263 1752/1282
+f 1726/1253 1752/1282 443/1255
+f 1750/1280 443/1255 1752/1282
+f 1754/1283 1757/1284 450/1285
+f 1753/1286 450/1285 1757/1284
+f 1756/1287 1757/1284 452/1288
+f 1755/1289 452/1288 1757/1284
+f 451/1290 1759/1291 1758/1292
+f 1761/1293 1758/1292 1759/1291
+f 450/1285 1760/1294 1754/1283
+f 1761/1293 1754/1283 1760/1294
+f 452/1288 1755/1289 1762/1295
+f 1764/1296 1762/1295 1755/1289
+f 1763/1297 1764/1296 451/1290
+f 1759/1291 451/1290 1764/1296
+f 452/1288 1765/1298 1756/1287
+f 1768/1299 1756/1287 1765/1298
+f 1766/1300 455/1301 1768/1299
+f 1767/1302 1768/1299 455/1301
+f 1770/1303 1771/1304 454/1305
+f 1769/1306 454/1305 1771/1304
+f 1762/1295 1771/1304 452/1288
+f 1765/1298 452/1288 1771/1304
+f 208/82 1772/1307 806/91
+f 1774/1308 806/91 1772/1307
+f 454/1305 1773/1309 1770/1303
+f 1774/1308 1770/1303 1773/1309
+f 1775/1310 1776/1311 229/173
+f 881/180 229/173 1776/1311
+f 800/84 1776/1311 208/82
+f 1772/1307 208/82 1776/1311
+f 1778/1312 1777/1313 1766/1300
+f 455/1301 1766/1300 1777/1313
+f 229/173 875/174 1775/1310
+f 1778/1312 1775/1310 875/174
+f 455/1301 1779/1314 1767/1302
+f 1781/1315 1767/1302 1779/1314
+f 450/1285 1753/1286 1780/1316
+f 1781/1315 1780/1316 1753/1286
+f 1782/1317 1783/1318 258/303
+f 989/308 258/303 1783/1318
+f 1777/1313 1783/1318 455/1301
+f 1779/1314 455/1301 1783/1318
+f 1784/1319 1785/1320 249/251
+f 942/252 249/251 1785/1320
+f 258/303 986/305 1782/1317
+f 1785/1320 1782/1317 986/305
+f 1786/1321 457/1322 1788/1323
+f 1787/1324 1788/1323 457/1322
+f 1784/1319 249/251 1788/1323
+f 952/264 1788/1323 249/251
+f 1780/1316 1790/1325 450/1285
+f 1760/1294 450/1285 1790/1325
+f 1789/1326 1790/1325 457/1322
+f 1787/1324 457/1322 1790/1325
+f 1792/1327 1795/1328 459/1329
+f 1791/1330 459/1329 1795/1328
+f 1793/1331 460/1332 1795/1328
+f 1794/1333 1795/1328 460/1332
+f 457/1322 1796/1334 1789/1326
+f 1798/1335 1789/1326 1796/1334
+f 459/1329 1797/1336 1792/1327
+f 1798/1335 1792/1327 1797/1336
+f 1284/678 333/673 1800/1337
+f 1799/1338 1800/1337 333/673
+f 1786/1321 1800/1337 457/1322
+f 1796/1334 457/1322 1800/1337
+f 1802/1339 1801/1340 1793/1331
+f 460/1332 1793/1331 1801/1340
+f 333/673 1279/674 1799/1338
+f 1802/1339 1799/1338 1279/674
+f 360/803 1803/1341 1380/804
+f 1806/1342 1380/804 1803/1341
+f 463/1343 1805/1344 1804/1345
+f 1806/1342 1804/1345 1805/1344
+f 1807/1346 1808/1347 454/1305
+f 1773/1309 454/1305 1808/1347
+f 1387/812 1808/1347 360/803
+f 1803/1341 360/803 1808/1347
+f 462/1348 1810/1349 1809/1350
+f 1811/1351 1809/1350 1810/1349
+f 454/1305 1769/1306 1807/1346
+f 1811/1351 1807/1346 1769/1306
+f 1804/1345 1814/1352 463/1343
+f 1812/1353 463/1343 1814/1352
+f 1813/1354 1814/1352 462/1348
+f 1810/1349 462/1348 1814/1352
+f 451/1290 1815/1355 1763/1297
+f 1818/1356 1763/1297 1815/1355
+f 467/1357 1817/1358 1816/1359
+f 1818/1356 1816/1359 1817/1358
+f 1821/1360 1819/1361 1820/1362
+f 465/1363 1820/1362 1819/1361
+f 1758/1292 1821/1360 451/1290
+f 1815/1355 451/1290 1821/1360
+f 1823/1364 1825/1365 466/1366
+f 1822/1367 466/1366 1825/1365
+f 1820/1362 465/1363 1825/1365
+f 1824/1368 1825/1365 465/1363
+f 1826/1369 467/1357 1828/1370
+f 1816/1359 1828/1370 467/1357
+f 1823/1364 466/1366 1828/1370
+f 1827/1371 1828/1370 466/1366
+f 1829/1372 1831/1373 459/1329
+f 1797/1336 459/1329 1831/1373
+f 1819/1361 1831/1373 465/1363
+f 1830/1374 465/1363 1831/1373
+f 469/1375 1833/1376 1832/1377
+f 1834/1378 1832/1377 1833/1376
+f 459/1329 1791/1330 1829/1372
+f 1834/1378 1829/1372 1791/1330
+f 1836/1379 1838/1380 470/1381
+f 1835/1382 470/1381 1838/1380
+f 1837/1383 1838/1380 469/1375
+f 1833/1376 469/1375 1838/1380
+f 465/1363 1830/1374 1824/1368
+f 1840/1384 1824/1368 1830/1374
+f 470/1381 1839/1385 1836/1379
+f 1840/1384 1836/1379 1839/1385
+f 1844/1386 1813/1354 1841/1387
+f 462/1348 1841/1387 1813/1354
+f 473/1388 1843/1389 1842/1390
+f 1844/1386 1842/1390 1843/1389
+f 1845/1391 1846/1392 467/1357
+f 1817/1358 467/1357 1846/1392
+f 1841/1387 462/1348 1846/1392
+f 1809/1350 1846/1392 462/1348
+f 472/1393 1848/1394 1847/1395
+f 1849/1396 1847/1395 1848/1394
+f 1849/1396 1845/1391 1826/1369
+f 467/1357 1826/1369 1845/1391
+f 1842/1390 1852/1397 473/1388
+f 1850/1398 473/1388 1852/1397
+f 1851/1399 1852/1397 472/1393
+f 1848/1394 472/1393 1852/1397
+f 1853/1400 1855/1401 470/1381
+f 1839/1385 470/1381 1855/1401
+f 1854/1402 466/1366 1855/1401
+f 1822/1367 1855/1401 466/1366
+f 1858/1403 1856/1404 1857/1405
+f 475/1406 1857/1405 1856/1404
+f 470/1381 1835/1382 1853/1400
+f 1858/1403 1853/1400 1835/1382
+f 472/1393 1859/1407 1851/1399
+f 1861/1408 1851/1399 1859/1407
+f 475/1406 1860/1409 1857/1405
+f 1861/1408 1857/1405 1860/1409
+f 1854/1402 1862/1410 466/1366
+f 1827/1371 466/1366 1862/1410
+f 1859/1407 472/1393 1862/1410
+f 1847/1395 1862/1410 472/1393
+f 477/1411 1864/1412 1863/1413
+f 1867/1414 1863/1413 1864/1412
+f 1867/1414 1865/1415 1866/1416
+f 480/1417 1866/1416 1865/1415
+f 478/1418 1869/1419 1868/1420
+f 1871/1421 1868/1420 1869/1419
+f 1870/1422 1871/1421 477/1411
+f 1864/1412 477/1411 1871/1421
+f 1872/1423 479/1424 1875/1425
+f 1873/1426 1875/1425 479/1424
+f 1874/1427 1875/1425 478/1418
+f 1869/1419 478/1418 1875/1425
+f 1865/1415 1878/1428 480/1417
+f 1876/1429 480/1417 1878/1428
+f 479/1424 1877/1430 1873/1426
+f 1878/1428 1873/1426 1877/1430
+f 1879/1431 1882/1432 480/1417
+f 1866/1416 480/1417 1882/1432
+f 1880/1433 484/1434 1882/1432
+f 1881/1435 1882/1432 484/1434
+f 482/1436 1884/1437 1883/1438
+f 1885/1439 1883/1438 1884/1437
+f 1885/1439 1879/1431 1876/1429
+f 480/1417 1876/1429 1879/1431
+f 1887/1440 1889/1441 483/1442
+f 1886/1443 483/1442 1889/1441
+f 1888/1444 1889/1441 482/1436
+f 1884/1437 482/1436 1889/1441
+f 484/1434 1880/1433 1890/1445
+f 1892/1446 1890/1445 1880/1433
+f 483/1442 1891/1447 1887/1440
+f 1892/1446 1887/1440 1891/1447
+f 1894/1448 1897/1449 486/1450
+f 1893/1451 486/1450 1897/1449
+f 1897/1449 1895/1452 1896/1453
+f 489/1454 1896/1453 1895/1452
+f 1899/1455 1901/1456 487/1457
+f 1898/1458 487/1457 1901/1456
+f 486/1450 1900/1459 1894/1448
+f 1901/1456 1894/1448 1900/1459
+f 488/1460 1903/1461 1902/1462
+f 1905/1463 1902/1462 1903/1461
+f 487/1457 1904/1464 1899/1455
+f 1905/1463 1899/1455 1904/1464
+f 1895/1452 1908/1465 489/1454
+f 1906/1466 489/1454 1908/1465
+f 1907/1467 1908/1465 488/1460
+f 1903/1461 488/1460 1908/1465
+f 1910/1468 1913/1469 491/1470
+f 1909/1471 491/1470 1913/1469
+f 1912/1472 1913/1469 493/1473
+f 1911/1474 493/1473 1913/1469
+f 492/1475 1915/1476 1914/1477
+f 1917/1478 1914/1477 1915/1476
+f 491/1470 1916/1479 1910/1468
+f 1917/1478 1910/1468 1916/1479
+f 1918/1480 1920/1481 487/1457
+f 1904/1464 487/1457 1920/1481
+f 1919/1482 1920/1481 492/1475
+f 1915/1476 492/1475 1920/1481
+f 493/1473 1911/1474 1921/1483
+f 1922/1484 1921/1483 1911/1474
+f 487/1457 1898/1458 1918/1480
+f 1922/1484 1918/1480 1898/1458
+f 495/1485 1924/1486 1923/1487
+f 1927/1488 1923/1487 1924/1486
+f 1926/1489 1927/1488 498/1490
+f 1925/1491 498/1490 1927/1488
+f 1929/1492 1931/1493 496/1494
+f 1928/1495 496/1494 1931/1493
+f 1930/1496 1931/1493 495/1485
+f 1924/1486 495/1485 1931/1493
+f 1933/1497 1935/1498 497/1499
+f 1932/1500 497/1499 1935/1498
+f 496/1494 1934/1501 1929/1492
+f 1935/1498 1929/1492 1934/1501
+f 498/1490 1925/1491 1936/1502
+f 1938/1503 1936/1502 1925/1491
+f 497/1499 1937/1504 1933/1497
+f 1938/1503 1933/1497 1937/1504
+f 500/1505 1940/1506 1939/1507
+f 1942/1508 1939/1507 1940/1506
+f 486/1450 1893/1451 1941/1509
+f 1942/1508 1941/1509 1893/1451
+f 1944/1510 1946/1511 501/1512
+f 1943/1513 501/1512 1946/1511
+f 1945/1514 1946/1511 500/1505
+f 1940/1506 500/1505 1946/1511
+f 502/1515 1948/1516 1947/1517
+f 1950/1518 1947/1517 1948/1516
+f 501/1512 1949/1519 1944/1510
+f 1950/1518 1944/1510 1949/1519
+f 1941/1509 1952/1520 486/1450
+f 1900/1459 486/1450 1952/1520
+f 1951/1521 1952/1520 502/1515
+f 1948/1516 502/1515 1952/1520
+f 1954/1522 1956/1523 504/1524
+f 1953/1525 504/1524 1956/1523
+f 1955/1526 477/1411 1956/1523
+f 1863/1413 1956/1523 477/1411
+f 505/1527 1958/1528 1957/1529
+f 1960/1530 1957/1529 1958/1528
+f 504/1524 1959/1531 1954/1522
+f 1960/1530 1954/1522 1959/1531
+f 1961/1532 1963/1533 501/1512
+f 1949/1519 501/1512 1963/1533
+f 1962/1534 1963/1533 505/1527
+f 1958/1528 505/1527 1963/1533
+f 1964/1535 1870/1422 1955/1526
+f 477/1411 1955/1526 1870/1422
+f 501/1512 1943/1513 1961/1532
+f 1964/1535 1961/1532 1943/1513
+f 1966/1536 1969/1537 507/1538
+f 1965/1539 507/1538 1969/1537
+f 509/1540 1968/1541 1967/1542
+f 1969/1537 1967/1542 1968/1541
+f 1971/1543 1973/1544 508/1545
+f 1970/1546 508/1545 1973/1544
+f 1973/1544 1966/1536 1972/1547
+f 507/1538 1972/1547 1966/1536
+f 505/1527 1974/1548 1962/1534
+f 1976/1549 1962/1534 1974/1548
+f 1976/1549 1971/1543 1975/1550
+f 508/1545 1975/1550 1971/1543
+f 1977/1551 509/1540 1978/1552
+f 1967/1542 1978/1552 509/1540
+f 1957/1529 1978/1552 505/1527
+f 1974/1548 505/1527 1978/1552
+f 493/1473 1979/1553 1912/1472
+f 1982/1554 1912/1472 1979/1553
+f 511/1555 1981/1556 1980/1557
+f 1982/1554 1980/1557 1981/1556
+f 502/1515 1983/1558 1951/1521
+f 1984/1559 1951/1521 1983/1558
+f 1921/1483 1984/1559 493/1473
+f 1979/1553 493/1473 1984/1559
+f 1975/1550 508/1545 1986/1560
+f 1985/1561 1986/1560 508/1545
+f 1947/1517 1986/1560 502/1515
+f 1983/1558 502/1515 1986/1560
+f 1980/1557 1988/1562 511/1555
+f 1987/1563 511/1555 1988/1562
+f 508/1545 1970/1546 1985/1561
+f 1988/1562 1985/1561 1970/1546
+f 498/1490 1989/1564 1926/1489
+f 1992/1565 1926/1489 1989/1564
+f 515/1566 1991/1567 1990/1568
+f 1992/1565 1990/1568 1991/1567
+f 1994/1569 1995/1570 513/1571
+f 1993/1572 513/1571 1995/1570
+f 1936/1502 1995/1570 498/1490
+f 1989/1564 498/1490 1995/1570
+f 514/1573 1997/1574 1996/1575
+f 1999/1576 1996/1575 1997/1574
+f 513/1571 1998/1577 1994/1569
+f 1999/1576 1994/1569 1998/1577
+f 1990/1568 2002/1578 515/1566
+f 2000/1579 515/1566 2002/1578
+f 2001/1580 2002/1578 514/1573
+f 1997/1574 514/1573 2002/1578
+f 2004/1581 2007/1582 517/1583
+f 2003/1584 517/1583 2007/1582
+f 2006/1585 2007/1582 519/1586
+f 2005/1587 519/1586 2007/1582
+f 518/1588 2009/1589 2008/1590
+f 2011/1591 2008/1590 2009/1589
+f 2011/1591 2004/1581 2010/1592
+f 517/1583 2010/1592 2004/1581
+f 2012/1593 2014/1594 513/1571
+f 1998/1577 513/1571 2014/1594
+f 2013/1595 2014/1594 518/1588
+f 2009/1589 518/1588 2014/1594
+f 519/1586 2005/1587 2015/1596
+f 2016/1597 2015/1596 2005/1587
+f 513/1571 1993/1572 2012/1593
+f 2016/1597 2012/1593 1993/1572
+f 2020/1598 1968/1541 2017/1599
+f 509/1540 2017/1599 1968/1541
+f 2020/1598 2018/1600 2019/1601
+f 522/1602 2019/1601 2018/1600
+f 2021/1603 521/1604 2023/1605
+f 2022/1606 2023/1605 521/1604
+f 2023/1605 2017/1599 1977/1551
+f 509/1540 1977/1551 2017/1599
+f 518/1588 2024/1607 2013/1595
+f 2026/1608 2013/1595 2024/1607
+f 521/1604 2025/1609 2022/1606
+f 2026/1608 2022/1606 2025/1609
+f 2018/1600 2028/1610 522/1602
+f 2027/1611 522/1602 2028/1610
+f 2008/1590 2028/1610 518/1588
+f 2024/1607 518/1588 2028/1610
+f 484/1434 2029/1612 1881/1435
+f 2031/1613 1881/1435 2029/1612
+f 504/1524 1953/1525 2030/1614
+f 2031/1613 2030/1614 1953/1525
+f 514/1573 2032/1615 2001/1580
+f 2033/1616 2001/1580 2032/1615
+f 1890/1445 2033/1616 484/1434
+f 2029/1612 484/1434 2033/1616
+f 2034/1617 2035/1618 521/1604
+f 2025/1609 521/1604 2035/1618
+f 1996/1575 2035/1618 514/1573
+f 2032/1615 514/1573 2035/1618
+f 1959/1531 504/1524 2036/1619
+f 2030/1614 2036/1619 504/1524
+f 521/1604 2021/1603 2034/1617
+f 2036/1619 2034/1617 2021/1603
+f 2038/1620 2041/1621 525/1622
+f 2037/1623 525/1622 2041/1621
+f 2040/1624 2041/1621 528/1625
+f 2039/1626 528/1625 2041/1621
+f 526/1627 2043/1628 2042/1629
+f 2045/1630 2042/1629 2043/1628
+f 525/1622 2044/1631 2038/1620
+f 2045/1630 2038/1620 2044/1631
+f 2047/1632 2049/1633 527/1634
+f 2046/1635 527/1634 2049/1633
+f 2048/1636 2049/1633 526/1627
+f 2043/1628 526/1627 2049/1633
+f 2052/1637 2050/1638 2039/1626
+f 528/1625 2039/1626 2050/1638
+f 527/1634 2051/1639 2047/1632
+f 2052/1637 2047/1632 2051/1639
+f 2057/1640 2053/1641 2054/1642
+f 530/1643 2054/1642 2053/1641
+f 2056/1644 2057/1640 532/1645
+f 2055/1646 532/1645 2057/1640
+f 531/1647 2059/1648 2058/1649
+f 2061/1650 2058/1649 2059/1648
+f 2061/1650 2054/1642 2060/1651
+f 530/1643 2060/1651 2054/1642
+f 2062/1652 2064/1653 526/1627
+f 2048/1636 526/1627 2064/1653
+f 2063/1654 2064/1653 531/1647
+f 2059/1648 531/1647 2064/1653
+f 532/1645 2055/1646 2065/1655
+f 2066/1656 2065/1655 2055/1646
+f 526/1627 2042/1629 2062/1652
+f 2066/1656 2062/1652 2042/1629
+f 534/1657 2068/1658 2067/1659
+f 2071/1660 2067/1659 2068/1658
+f 537/1661 2070/1662 2069/1663
+f 2071/1660 2069/1663 2070/1662
+f 2072/1664 535/1665 2075/1666
+f 2073/1667 2075/1666 535/1665
+f 2074/1668 2075/1666 534/1657
+f 2068/1658 534/1657 2075/1666
+f 536/1669 2077/1670 2076/1671
+f 2079/1672 2076/1671 2077/1670
+f 2079/1672 2073/1667 2078/1673
+f 535/1665 2078/1673 2073/1667
+f 2080/1674 537/1661 2082/1675
+f 2069/1663 2082/1675 537/1661
+f 2081/1676 2082/1675 536/1669
+f 2077/1670 536/1669 2082/1675
+f 2084/1677 2086/1678 539/1679
+f 2083/1680 539/1679 2086/1678
+f 2053/1641 2086/1678 530/1643
+f 2085/1681 530/1643 2086/1678
+f 536/1669 2087/1682 2081/1676
+f 2089/1683 2081/1676 2087/1682
+f 539/1679 2088/1684 2084/1677
+f 2089/1683 2084/1677 2088/1684
+f 2091/1685 2092/1686 540/1687
+f 2090/1688 540/1687 2092/1686
+f 2076/1671 2092/1686 536/1669
+f 2087/1682 536/1669 2092/1686
+f 530/1643 2085/1681 2060/1651
+f 2094/1689 2060/1651 2085/1681
+f 540/1687 2093/1690 2091/1685
+f 2094/1689 2091/1685 2093/1690
+f 542/1691 2096/1692 2095/1693
+f 2098/1694 2095/1693 2096/1692
+f 507/1538 1965/1539 2097/1695
+f 2098/1694 2097/1695 1965/1539
+f 2099/1696 2101/1697 532/1645
+f 2056/1644 532/1645 2101/1697
+f 2100/1698 2101/1697 542/1691
+f 2096/1692 542/1691 2101/1697
+f 543/1699 2103/1700 2102/1701
+f 2104/1702 2102/1701 2103/1700
+f 2104/1702 2099/1696 2065/1655
+f 532/1645 2065/1655 2099/1696
+f 2097/1695 2106/1703 507/1538
+f 1972/1547 507/1538 2106/1703
+f 2105/1704 2106/1703 543/1699
+f 2103/1700 543/1699 2106/1703
+f 2010/1592 517/1583 2110/1705
+f 2107/1706 2110/1705 517/1583
+f 2109/1707 2110/1705 546/1708
+f 2108/1709 546/1708 2110/1705
+f 2113/1710 2111/1711 2112/1712
+f 545/1713 2112/1712 2111/1711
+f 517/1583 2003/1584 2107/1706
+f 2113/1710 2107/1706 2003/1584
+f 537/1661 2114/1714 2070/1662
+f 2116/1715 2070/1662 2114/1714
+f 545/1713 2115/1716 2112/1712
+f 2116/1715 2112/1712 2115/1716
+f 546/1708 2108/1709 2117/1717
+f 2118/1718 2117/1717 2108/1709
+f 2080/1674 2118/1718 537/1661
+f 2114/1714 537/1661 2118/1718
+f 2119/1719 2122/1720 491/1470
+f 1916/1479 491/1470 2122/1720
+f 2121/1721 2122/1720 549/1722
+f 2120/1723 549/1722 2122/1720
+f 2125/1724 2123/1725 2124/1726
+f 548/1727 2124/1726 2123/1725
+f 491/1470 1909/1471 2119/1719
+f 2125/1724 2119/1719 1909/1471
+f 528/1625 2126/1728 2040/1624
+f 2128/1729 2040/1624 2126/1728
+f 2124/1726 548/1727 2128/1729
+f 2127/1730 2128/1729 548/1727
+f 549/1722 2120/1723 2129/1731
+f 2130/1732 2129/1731 2120/1723
+f 2050/1638 2130/1732 528/1625
+f 2126/1728 528/1625 2130/1732
+f 2131/1733 2133/1734 539/1679
+f 2088/1684 539/1679 2133/1734
+f 2117/1717 2133/1734 546/1708
+f 2132/1735 546/1708 2133/1734
+f 542/1691 2134/1736 2100/1698
+f 2135/1737 2100/1698 2134/1736
+f 539/1679 2083/1680 2131/1733
+f 2135/1737 2131/1733 2083/1680
+f 2136/1738 2137/1739 522/1602
+f 2019/1601 522/1602 2137/1739
+f 2095/1693 2137/1739 542/1691
+f 2134/1736 542/1691 2137/1739
+f 546/1708 2132/1735 2109/1707
+f 2138/1740 2109/1707 2132/1735
+f 522/1602 2027/1611 2136/1738
+f 2138/1740 2136/1738 2027/1611
+f 543/1699 2139/1741 2105/1704
+f 2141/1742 2105/1704 2139/1741
+f 511/1555 1987/1563 2140/1743
+f 2141/1742 2140/1743 1987/1563
+f 2142/1744 2143/1745 525/1622
+f 2044/1631 525/1622 2143/1745
+f 2102/1701 2143/1745 543/1699
+f 2139/1741 543/1699 2143/1745
+f 548/1727 2144/1746 2127/1730
+f 2145/1747 2127/1730 2144/1746
+f 525/1622 2037/1623 2142/1744
+f 2145/1747 2142/1744 2037/1623
+f 2140/1743 2146/1748 511/1555
+f 1981/1556 511/1555 2146/1748
+f 2123/1725 2146/1748 548/1727
+f 2144/1746 548/1727 2146/1748
+f 2150/1749 2121/1721 2147/1750
+f 549/1722 2147/1750 2121/1721
+f 2149/1751 2150/1749 555/1752
+f 2148/1753 555/1752 2150/1749
+f 2152/1754 2153/1755 553/1756
+f 2151/1757 553/1756 2153/1755
+f 2147/1750 549/1722 2153/1755
+f 2129/1731 2153/1755 549/1722
+f 554/1758 2155/1759 2154/1760
+f 2157/1761 2154/1760 2155/1759
+f 2157/1761 2152/1754 2156/1762
+f 553/1756 2156/1762 2152/1754
+f 2158/1763 555/1752 2160/1764
+f 2148/1753 2160/1764 555/1752
+f 2159/1765 2160/1764 554/1758
+f 2155/1759 554/1758 2160/1764
+f 2164/1766 2159/1767 2161/1768
+f 554/1769 2161/1768 2159/1767
+f 559/1770 2163/1771 2162/1772
+f 2164/1766 2162/1772 2163/1771
+f 2166/1773 2167/1774 557/1775
+f 2165/1776 557/1775 2167/1774
+f 2161/1768 554/1769 2167/1774
+f 2154/1777 2167/1774 554/1769
+f 558/1778 2169/1779 2168/1780
+f 2171/1781 2168/1780 2169/1779
+f 2171/1781 2166/1773 2170/1782
+f 557/1775 2170/1782 2166/1773
+f 2162/1772 2174/1783 559/1770
+f 2172/1784 559/1770 2174/1783
+f 2173/1785 2174/1783 558/1778
+f 2169/1779 558/1778 2174/1783
+f 2175/1786 2178/1787 559/1770
+f 2163/1771 559/1770 2178/1787
+f 2177/1788 2178/1787 563/1789
+f 2176/1790 563/1789 2178/1787
+f 561/1791 2180/1792 2179/1793
+f 2181/1794 2179/1793 2180/1792
+f 559/1770 2172/1784 2175/1786
+f 2181/1794 2175/1786 2172/1784
+f 2183/1795 2185/1796 562/1797
+f 2182/1798 562/1797 2185/1796
+f 2184/1799 2185/1796 561/1791
+f 2180/1792 561/1791 2185/1796
+f 563/1789 2176/1790 2186/1800
+f 2188/1801 2186/1800 2176/1790
+f 562/1797 2187/1802 2183/1795
+f 2188/1801 2183/1795 2187/1802
+f 2190/1803 2192/1804 565/1805
+f 2189/1806 565/1805 2192/1804
+f 2179/1793 2192/1804 561/1791
+f 2191/1807 561/1791 2192/1804
+f 2194/1808 2196/1809 566/1810
+f 2193/1811 566/1810 2196/1809
+f 565/1805 2195/1812 2190/1803
+f 2196/1809 2190/1803 2195/1812
+f 2198/1813 2200/1814 567/1815
+f 2197/1816 567/1815 2200/1814
+f 566/1810 2199/1817 2194/1808
+f 2200/1814 2194/1808 2199/1817
+f 561/1791 2191/1807 2184/1799
+f 2202/1818 2184/1799 2191/1807
+f 567/1815 2201/1819 2198/1813
+f 2202/1818 2198/1813 2201/1819
+f 2204/1820 2207/1821 569/1822
+f 2203/1823 569/1822 2207/1821
+f 2205/1824 572/1825 2207/1821
+f 2206/1826 2207/1821 572/1825
+f 2209/1827 2211/1828 570/1829
+f 2208/1830 570/1829 2211/1828
+f 569/1822 2210/1831 2204/1820
+f 2211/1828 2204/1820 2210/1831
+f 571/1832 2213/1833 2212/1834
+f 2215/1835 2212/1834 2213/1833
+f 570/1829 2214/1836 2209/1827
+f 2215/1835 2209/1827 2214/1836
+f 572/1825 2205/1824 2216/1837
+f 2218/1838 2216/1837 2205/1824
+f 2213/1833 571/1832 2218/1838
+f 2217/1839 2218/1838 571/1832
+f 2220/1840 2223/1841 574/1842
+f 2219/1843 574/1842 2223/1841
+f 2221/1844 577/1845 2223/1841
+f 2222/1846 2223/1841 577/1845
+f 2225/1847 2227/1848 575/1849
+f 2224/1850 575/1849 2227/1848
+f 574/1842 2226/1851 2220/1840
+f 2227/1848 2220/1840 2226/1851
+f 576/1852 2229/1853 2228/1854
+f 2231/1855 2228/1854 2229/1853
+f 575/1849 2230/1856 2225/1847
+f 2231/1855 2225/1847 2230/1856
+f 577/1845 2221/1844 2232/1857
+f 2234/1858 2232/1857 2221/1844
+f 2229/1853 576/1852 2234/1858
+f 2233/1859 2234/1858 576/1852
+f 579/1860 2236/1861 2235/1862
+f 2238/1863 2235/1862 2236/1861
+f 515/1864 2000/1865 2237/1866
+f 2238/1863 2237/1866 2000/1865
+f 574/1842 2239/1867 2226/1851
+f 2241/1868 2226/1851 2239/1867
+f 2236/1861 579/1860 2241/1868
+f 2240/1869 2241/1868 579/1860
+f 580/1870 2243/1871 2242/1872
+f 2244/1873 2242/1872 2243/1871
+f 574/1842 2219/1843 2239/1867
+f 2244/1873 2239/1867 2219/1843
+f 2237/1866 2246/1874 515/1864
+f 1991/1875 515/1864 2246/1874
+f 2243/1871 580/1870 2246/1874
+f 2245/1876 2246/1874 580/1870
+f 2248/1877 2251/1878 582/1879
+f 2247/1880 582/1879 2251/1878
+f 2250/1881 2251/1878 584/1882
+f 2249/1883 584/1882 2251/1878
+f 576/1852 2252/1884 2233/1859
+f 2254/1885 2233/1859 2252/1884
+f 582/1879 2253/1886 2248/1877
+f 2254/1885 2248/1877 2253/1886
+f 583/1887 2256/1888 2255/1889
+f 2257/1890 2255/1889 2256/1888
+f 2252/1884 576/1852 2257/1890
+f 2228/1854 2257/1890 576/1852
+f 584/1882 2249/1883 2258/1891
+f 2260/1892 2258/1891 2249/1883
+f 2260/1892 2256/1888 2259/1893
+f 583/1887 2259/1893 2256/1888
+f 2261/1894 2263/1895 483/1896
+f 1891/1897 483/1896 2263/1895
+f 2235/1862 2263/1895 579/1860
+f 2262/1898 579/1860 2263/1895
+f 583/1887 2264/1899 2259/1893
+f 2265/1900 2259/1893 2264/1899
+f 483/1896 1886/1901 2261/1894
+f 2265/1900 2261/1894 1886/1901
+f 2266/1902 2267/1903 575/1849
+f 2230/1856 575/1849 2267/1903
+f 583/1887 2255/1889 2264/1899
+f 2267/1903 2264/1899 2255/1889
+f 579/1860 2262/1898 2240/1869
+f 2268/1904 2240/1869 2262/1898
+f 575/1849 2224/1850 2266/1902
+f 2268/1904 2266/1902 2224/1850
+f 2271/1905 1930/1906 2269/1907
+f 495/1908 2269/1907 1930/1906
+f 2271/1905 2270/1909 2247/1880
+f 582/1879 2247/1880 2270/1909
+f 2272/1910 2273/1911 580/1870
+f 2245/1876 580/1870 2273/1911
+f 2269/1907 495/1908 2273/1911
+f 1923/1912 2273/1911 495/1908
+f 577/1845 2274/1913 2222/1846
+f 2275/1914 2222/1846 2274/1913
+f 580/1870 2242/1872 2272/1910
+f 2275/1914 2272/1910 2242/1872
+f 2270/1909 2276/1915 582/1879
+f 2253/1886 582/1879 2276/1915
+f 2276/1915 2274/1913 2232/1857
+f 577/1845 2232/1857 2274/1913
+f 500/1916 2277/1917 1945/1918
+f 2280/1919 1945/1918 2277/1917
+f 589/1920 2279/1921 2278/1922
+f 2280/1919 2278/1922 2279/1921
+f 2282/1923 2283/1924 588/1925
+f 2281/1926 588/1925 2283/1924
+f 1939/1927 2283/1924 500/1916
+f 2277/1917 500/1916 2283/1924
+f 572/1825 2284/1928 2206/1826
+f 2286/1929 2206/1826 2284/1928
+f 588/1925 2285/1930 2282/1923
+f 2286/1929 2282/1923 2285/1930
+f 2278/1922 2288/1931 589/1920
+f 2287/1932 589/1920 2288/1931
+f 2284/1928 572/1825 2288/1931
+f 2216/1837 2288/1931 572/1825
+f 2292/1933 2289/1934 2290/1935
+f 591/1936 2290/1935 2289/1934
+f 489/1937 1906/1938 2291/1939
+f 2292/1933 2291/1939 1906/1938
+f 2293/1940 2295/1941 569/1822
+f 2210/1831 569/1822 2295/1941
+f 2290/1935 591/1936 2295/1941
+f 2294/1942 2295/1941 591/1936
+f 588/1925 2296/1943 2285/1930
+f 2297/1944 2285/1930 2296/1943
+f 569/1822 2203/1823 2293/1940
+f 2297/1944 2293/1940 2203/1823
+f 1896/1945 489/1937 2298/1946
+f 2291/1939 2298/1946 489/1937
+f 2296/1943 588/1925 2298/1946
+f 2281/1926 2298/1946 588/1925
+f 478/1947 2299/1948 1874/1949
+f 2302/1950 1874/1949 2299/1948
+f 593/1951 2301/1952 2300/1953
+f 2302/1950 2300/1953 2301/1952
+f 2303/1954 2304/1955 589/1920
+f 2279/1921 589/1920 2304/1955
+f 1868/1956 2304/1955 478/1947
+f 2299/1948 478/1947 2304/1955
+f 571/1832 2305/1957 2217/1839
+f 2306/1958 2217/1839 2305/1957
+f 589/1920 2287/1932 2303/1954
+f 2306/1958 2303/1954 2287/1932
+f 2300/1953 2308/1959 593/1951
+f 2307/1960 593/1951 2308/1959
+f 2212/1834 2308/1959 571/1832
+f 2305/1957 571/1832 2308/1959
+f 2310/1961 2312/1962 595/1963
+f 2309/1964 595/1963 2312/1962
+f 2289/1934 2312/1962 591/1936
+f 2311/1965 591/1936 2312/1962
+f 2313/1966 2315/1967 593/1951
+f 2301/1952 593/1951 2315/1967
+f 595/1963 2314/1968 2310/1961
+f 2315/1967 2310/1961 2314/1968
+f 570/1829 2316/1969 2214/1836
+f 2317/1970 2214/1836 2316/1969
+f 593/1951 2307/1960 2313/1966
+f 2317/1970 2313/1966 2307/1960
+f 591/1936 2311/1965 2294/1942
+f 2318/1971 2294/1942 2311/1965
+f 570/1829 2208/1830 2316/1969
+f 2318/1971 2316/1969 2208/1830
+f 2319/1972 2322/1973 565/1805
+f 2195/1812 565/1805 2322/1973
+f 2321/1974 2322/1973 599/1975
+f 2320/1976 599/1975 2322/1973
+f 558/1778 2323/1977 2173/1785
+f 2324/1978 2173/1785 2323/1977
+f 565/1805 2189/1806 2319/1972
+f 2324/1978 2319/1972 2189/1806
+f 2326/1979 2327/1980 597/1981
+f 2325/1982 597/1981 2327/1980
+f 2323/1977 558/1778 2327/1980
+f 2168/1780 2327/1980 558/1778
+f 598/1983 2329/1984 2328/1985
+f 2331/1986 2328/1985 2329/1984
+f 2331/1986 2326/1979 2330/1987
+f 597/1981 2330/1987 2326/1979
+f 2320/1976 2334/1988 599/1975
+f 2332/1989 599/1975 2334/1988
+f 2333/1990 2334/1988 598/1983
+f 2329/1984 598/1983 2334/1988
+f 2335/1991 601/1992 2339/1993
+f 2336/1994 2339/1993 601/1992
+f 2338/1995 2339/1993 604/1996
+f 2337/1997 604/1996 2339/1993
+f 2341/1998 2343/1999 602/2000
+f 2340/2001 602/2000 2343/1999
+f 2336/1994 601/1992 2343/1999
+f 2342/2002 2343/1999 601/1992
+f 2345/2003 2347/2004 603/2005
+f 2344/2006 603/2005 2347/2004
+f 602/2000 2346/2007 2341/1998
+f 2347/2004 2341/1998 2346/2007
+f 604/1996 2337/1997 2348/2008
+f 2350/2009 2348/2008 2337/1997
+f 603/2005 2349/2010 2345/2003
+f 2350/2009 2345/2003 2349/2010
+f 2351/2011 2353/2012 603/2005
+f 2349/2010 603/2005 2353/2012
+f 2352/2013 598/2014 2353/2012
+f 2328/2015 2353/2012 598/2014
+f 606/2016 2355/2017 2354/2018
+f 2356/2019 2354/2018 2355/2017
+f 603/2005 2344/2006 2351/2011
+f 2356/2019 2351/2011 2344/2006
+f 2358/2020 2360/2021 607/2022
+f 2357/2023 607/2022 2360/2021
+f 2359/2024 2360/2021 606/2016
+f 2355/2017 606/2016 2360/2021
+f 2362/2025 2333/2026 2352/2013
+f 598/2014 2352/2013 2333/2026
+f 607/2022 2361/2027 2358/2020
+f 2362/2025 2358/2020 2361/2027
+f 2363/2028 2366/2029 606/2016
+f 2359/2024 606/2016 2366/2029
+f 2365/2030 2366/2029 610/2031
+f 2364/2032 610/2031 2366/2029
+f 2367/2033 2368/2034 602/2000
+f 2346/2007 602/2000 2368/2034
+f 606/2016 2354/2018 2363/2028
+f 2368/2034 2363/2028 2354/2018
+f 2370/2035 2371/2036 609/2037
+f 2369/2038 609/2037 2371/2036
+f 602/2000 2340/2001 2367/2033
+f 2371/2036 2367/2033 2340/2001
+f 610/2031 2364/2032 2372/2039
+f 2374/2040 2372/2039 2364/2032
+f 609/2037 2373/2041 2370/2035
+f 2374/2040 2370/2035 2373/2041
+f 612/2042 2376/2043 2375/2044
+f 2378/2045 2375/2044 2376/2043
+f 324/2046 1247/2047 2377/2048
+f 2378/2045 2377/2048 1247/2047
+f 2379/2049 2381/2050 527/1634
+f 2051/1639 527/1634 2381/2050
+f 2380/2051 2381/2050 612/2042
+f 2376/2043 612/2042 2381/2050
+f 613/2052 2383/2053 2382/2054
+f 2384/2055 2382/2054 2383/2053
+f 527/1634 2046/1635 2379/2049
+f 2384/2055 2379/2049 2046/1635
+f 2377/2048 2386/2056 324/2046
+f 1254/2057 324/2046 2386/2056
+f 2385/2058 2386/2056 613/2052
+f 2383/2053 613/2052 2386/2056
+f 615/2059 2388/2060 2387/2061
+f 2390/2062 2387/2061 2388/2060
+f 2390/2062 2389/2063 1261/2064
+f 328/2065 1261/2064 2389/2063
+f 2391/2066 2393/2067 540/1687
+f 2093/1690 540/1687 2393/2067
+f 2392/2068 2393/2067 615/2059
+f 2388/2060 615/2059 2393/2067
+f 2396/2069 2394/2070 2395/2071
+f 616/2072 2395/2071 2394/2070
+f 540/1687 2090/1688 2391/2066
+f 2396/2069 2391/2066 2090/1688
+f 2389/2063 2398/2073 328/2065
+f 1268/2074 328/2065 2398/2073
+f 2395/2071 616/2072 2398/2073
+f 2397/2075 2398/2073 616/2072
+f 616/2072 2399/2076 2397/2075
+f 2401/2077 2397/2075 2399/2076
+f 2401/2077 2400/2078 1275/2079
+f 332/2080 1275/2079 2400/2078
+f 2078/1673 535/1665 2403/2081
+f 2402/2082 2403/2081 535/1665
+f 2394/2070 2403/2081 616/2072
+f 2399/2076 616/2072 2403/2081
+f 618/2083 2405/2084 2404/2085
+f 2406/2086 2404/2085 2405/2084
+f 2406/2086 2402/2082 2072/1664
+f 535/1665 2072/1664 2402/2082
+f 2400/2078 2408/2087 332/2080
+f 1281/2088 332/2080 2408/2087
+f 2405/2084 618/2083 2408/2087
+f 2407/2089 2408/2087 618/2083
+f 620/2090 2410/2091 2409/2092
+f 2412/2093 2409/2092 2410/2091
+f 335/2094 1287/2095 2411/2096
+f 2412/2093 2411/2096 1287/2095
+f 2413/2097 2415/2098 595/2099
+f 2314/2100 595/2099 2415/2098
+f 2414/2101 2415/2098 620/2090
+f 2410/2091 620/2090 2415/2098
+f 2418/2102 2416/2103 2417/2104
+f 621/2105 2417/2104 2416/2103
+f 2418/2102 2413/2097 2309/2106
+f 595/2099 2309/2106 2413/2097
+f 2411/2096 2420/2107 335/2094
+f 1294/2108 335/2094 2420/2107
+f 2417/2104 621/2105 2420/2107
+f 2419/2109 2420/2107 621/2105
+f 2421/2110 623/2111 2424/2112
+f 2422/2113 2424/2112 623/2111
+f 2423/2114 339/2115 2424/2112
+f 1301/2116 2424/2112 339/2115
+f 584/2117 2425/2118 2250/2119
+f 2427/2120 2250/2119 2425/2118
+f 623/2111 2426/2121 2422/2113
+f 2427/2120 2422/2113 2426/2121
+f 2429/2122 2430/2123 624/2124
+f 2428/2125 624/2124 2430/2123
+f 2425/2118 584/2117 2430/2123
+f 2258/2126 2430/2123 584/2117
+f 2432/2127 1308/2128 2423/2114
+f 339/2115 2423/2114 1308/2128
+f 624/2124 2431/2129 2429/2122
+f 2432/2127 2429/2122 2431/2129
+f 613/2052 2433/2130 2385/2058
+f 2435/2131 2385/2058 2433/2130
+f 2435/2131 2434/2132 1315/2133
+f 343/2134 1315/2133 2434/2132
+f 2063/1654 531/1647 2437/2135
+f 2436/2136 2437/2135 531/1647
+f 2382/2054 2437/2135 613/2052
+f 2433/2130 613/2052 2437/2135
+f 2439/2137 2392/2068 2438/2138
+f 615/2059 2438/2138 2392/2068
+f 531/1647 2058/1649 2436/2136
+f 2439/2137 2436/2136 2058/1649
+f 2434/2132 2440/2139 343/2134
+f 1320/2140 343/2134 2440/2139
+f 2438/2138 615/2059 2440/2139
+f 2387/2061 2440/2139 615/2059
+f 627/2141 2442/2142 2441/2143
+f 2444/2144 2441/2143 2442/2142
+f 345/2145 1325/2146 2443/2147
+f 2444/2144 2443/2147 1325/2146
+f 2156/1762 553/1756 2447/2148
+f 2445/2149 2447/2148 553/1756
+f 2446/2150 2447/2148 627/2141
+f 2442/2142 627/2141 2447/2148
+f 612/2042 2448/2151 2380/2051
+f 2449/2152 2380/2051 2448/2151
+f 553/1756 2151/1757 2445/2149
+f 2449/2152 2445/2149 2151/1757
+f 1331/2153 345/2145 2450/2154
+f 2443/2147 2450/2154 345/2145
+f 2375/2044 2450/2154 612/2042
+f 2448/2151 612/2042 2450/2154
+f 629/2155 2452/2156 2451/2157
+f 2454/2158 2451/2157 2452/2156
+f 2454/2158 2453/2159 1337/746
+f 348/748 1337/746 2453/2159
+f 2170/2160 557/2161 2457/2162
+f 2455/2163 2457/2162 557/2161
+f 629/2155 2456/2164 2452/2156
+f 2457/2162 2452/2156 2456/2164
+f 2459/2165 2446/2166 2458/2167
+f 627/2168 2458/2167 2446/2166
+f 557/2161 2165/2169 2455/2163
+f 2459/2165 2455/2163 2165/2169
+f 2453/2159 2460/2170 348/748
+f 1343/756 348/748 2460/2170
+f 2458/2167 627/2168 2460/2170
+f 2441/2171 2460/2170 627/2168
+f 631/2172 2462/2173 2461/2174
+f 2464/2175 2461/2174 2462/2173
+f 2464/2175 2463/2176 1349/766
+f 351/768 1349/766 2463/2176
+f 2465/2177 632/2178 2468/2179
+f 2466/2180 2468/2179 632/2178
+f 2467/2181 2468/2179 631/2172
+f 2462/2173 631/2172 2468/2179
+f 633/2182 2470/2183 2469/2184
+f 2472/2185 2469/2184 2470/2183
+f 632/2178 2471/2186 2466/2180
+f 2472/2185 2466/2180 2471/2186
+f 2463/2176 2474/2187 351/768
+f 1356/776 351/768 2474/2187
+f 2473/2188 2474/2187 633/2182
+f 2470/2183 633/2182 2474/2187
+f 2478/2189 2475/2190 2476/2191
+f 635/2192 2476/2191 2475/2190
+f 1365/785 2478/2189 356/786
+f 2477/2193 356/786 2478/2189
+f 566/1810 2479/2194 2199/1817
+f 2481/2195 2199/1817 2479/2194
+f 2476/2191 635/2192 2481/2195
+f 2480/2196 2481/2195 635/2192
+f 631/2172 2482/2197 2467/2181
+f 2483/2198 2467/2181 2482/2197
+f 2193/1811 2483/2198 566/1810
+f 2479/2194 566/1810 2483/2198
+f 356/786 2477/2193 1371/794
+f 2484/2199 1371/794 2477/2193
+f 2482/2197 631/2172 2484/2199
+f 2461/2174 2484/2199 631/2172
+f 637/2200 2486/2201 2485/2202
+f 2488/2203 2485/2202 2486/2201
+f 2488/2203 2487/2204 1377/2205
+f 359/2206 1377/2205 2487/2204
+f 2489/2207 2491/2208 496/1494
+f 1934/1501 496/1494 2491/2208
+f 2490/2209 2491/2208 637/2200
+f 2486/2201 637/2200 2491/2208
+f 2492/2210 2493/2211 623/2111
+f 2426/2121 623/2111 2493/2211
+f 496/1494 1928/1495 2489/2207
+f 2493/2211 2489/2207 1928/1495
+f 2487/2204 2494/2212 359/2206
+f 1383/2213 359/2206 2494/2212
+f 2494/2212 2492/2210 2421/2110
+f 623/2111 2421/2110 2492/2210
+f 2495/2214 2497/2215 621/2105
+f 2419/2109 621/2105 2497/2215
+f 2496/2216 362/2217 2497/2215
+f 1389/2218 2497/2215 362/2217
+f 488/1460 2498/2219 1907/1467
+f 2499/2220 1907/1467 2498/2219
+f 2499/2220 2495/2214 2416/2103
+f 621/2105 2416/2103 2495/2214
+f 2501/2221 2502/2222 639/2223
+f 2500/2224 639/2223 2502/2222
+f 1902/1462 2502/2222 488/1460
+f 2498/2219 488/1460 2502/2222
+f 2504/2225 1395/2226 2496/2216
+f 362/2217 2496/2216 1395/2226
+f 639/2223 2503/2227 2501/2221
+f 2504/2225 2501/2221 2503/2227
+f 2505/2228 2507/2229 639/2223
+f 2503/2227 639/2223 2507/2229
+f 2506/2230 365/2231 2507/2229
+f 1401/2232 2507/2229 365/2231
+f 492/1475 2508/2233 1919/1482
+f 2509/2234 1919/1482 2508/2233
+f 639/2223 2500/2224 2505/2228
+f 2509/2234 2505/2228 2500/2224
+f 2511/2235 2512/2236 641/2237
+f 2510/2238 641/2237 2512/2236
+f 1914/1477 2512/2236 492/1475
+f 2508/2233 492/1475 2512/2236
+f 2514/2239 1407/2240 2506/2230
+f 365/2231 2506/2230 1407/2240
+f 641/2237 2513/2241 2511/2235
+f 2514/2239 2511/2235 2513/2241
+f 2515/2242 2517/2243 641/2237
+f 2513/2241 641/2237 2517/2243
+f 2516/2244 368/2245 2517/2243
+f 1413/2246 2517/2243 368/2245
+f 2519/2247 2149/1751 2518/2248
+f 555/1752 2518/2248 2149/1751
+f 641/2237 2510/2238 2515/2242
+f 2519/2247 2515/2242 2510/2238
+f 2522/2249 2520/2250 2521/2251
+f 643/2252 2521/2251 2520/2250
+f 555/1752 2158/1763 2518/2248
+f 2522/2249 2518/2248 2158/1763
+f 1419/2253 368/2245 2524/2254
+f 2516/2244 2524/2254 368/2245
+f 2521/2251 643/2252 2524/2254
+f 2523/2255 2524/2254 643/2252
+f 643/2256 2525/2257 2523/2258
+f 2527/2259 2523/2258 2525/2257
+f 371/855 1425/857 2526/2260
+f 2527/2259 2526/2260 1425/857
+f 563/1789 2528/2261 2177/1788
+f 2529/2262 2177/1788 2528/2261
+f 2520/2263 2529/2262 643/2256
+f 2525/2257 643/2256 2529/2262
+f 2531/2264 2532/2265 645/2266
+f 2530/2267 645/2266 2532/2265
+f 2186/1800 2532/2265 563/1789
+f 2528/2261 563/1789 2532/2265
+f 1431/866 371/855 2534/2268
+f 2526/2260 2534/2268 371/855
+f 645/2266 2533/2269 2531/2264
+f 2534/2268 2531/2264 2533/2269
+f 2535/2270 2537/2271 645/2266
+f 2533/2269 645/2266 2537/2271
+f 2536/2272 374/874 2537/2271
+f 1437/875 2537/2271 374/874
+f 562/1797 2538/2273 2187/1802
+f 2539/2274 2187/1802 2538/2273
+f 645/2266 2530/2267 2535/2270
+f 2539/2274 2535/2270 2530/2267
+f 2541/2275 2542/2276 647/2277
+f 2540/2278 647/2277 2542/2276
+f 2182/1798 2542/2276 562/1797
+f 2538/2273 562/1797 2542/2276
+f 2544/2279 1443/881 2536/2272
+f 374/874 2536/2272 1443/881
+f 647/2277 2543/2280 2541/2275
+f 2544/2279 2541/2275 2543/2280
+f 2545/2281 2547/2282 647/2277
+f 2543/2280 647/2277 2547/2282
+f 2547/2282 2546/2283 1449/887
+f 377/889 1449/887 2546/2283
+f 2548/2284 2549/2285 567/1815
+f 2201/1819 567/1815 2549/2285
+f 2540/2278 2549/2285 647/2277
+f 2545/2281 647/2277 2549/2285
+f 635/2192 2550/2286 2480/2196
+f 2551/2287 2480/2196 2550/2286
+f 567/1815 2197/1816 2548/2284
+f 2551/2287 2548/2284 2197/1816
+f 2546/2283 2552/2288 377/889
+f 1454/893 377/889 2552/2288
+f 2550/2286 635/2192 2552/2288
+f 2475/2190 2552/2288 635/2192
+f 2555/2289 1461/899 2553/2290
+f 380/900 2553/2290 1461/899
+f 2555/2289 2554/2291 2451/2157
+f 629/2155 2451/2157 2554/2291
+f 2557/2292 2558/2293 650/2294
+f 2556/2295 650/2294 2558/2293
+f 380/900 1470/916 2553/2290
+f 2558/2293 2553/2290 1470/916
+f 604/2296 2559/2297 2338/2298
+f 2561/2299 2338/2298 2559/2297
+f 650/2294 2560/2300 2557/2292
+f 2561/2299 2557/2292 2560/2300
+f 2562/2301 2563/2302 597/2303
+f 2330/2304 597/2303 2563/2302
+f 2348/2305 2563/2302 604/2296
+f 2559/2297 604/2296 2563/2302
+f 2554/2291 2564/2306 629/2155
+f 2456/2164 629/2155 2564/2306
+f 2564/2306 2562/2301 2325/2307
+f 597/2303 2325/2307 2562/2301
+f 2321/1974 599/1975 2567/2308
+f 2565/2309 2567/2308 599/1975
+f 2465/2177 2567/2308 632/2178
+f 2566/2310 632/2178 2567/2308
+f 652/2311 2569/2312 2568/2313
+f 2570/2314 2568/2313 2569/2312
+f 599/1975 2332/1989 2565/2309
+f 2570/2314 2565/2309 2332/1989
+f 2471/2186 632/2178 2572/2315
+f 2566/2310 2572/2315 632/2178
+f 2571/2316 2572/2315 652/2311
+f 2569/2312 652/2311 2572/2315
+f 654/2317 2574/2318 2573/2319
+f 2577/2320 2573/2319 2574/2318
+f 657/2321 2576/2322 2575/2323
+f 2577/2320 2575/2323 2576/2322
+f 2579/2324 2581/2325 655/2326
+f 2578/2327 655/2326 2581/2325
+f 2580/2328 2581/2325 654/2317
+f 2574/2318 654/2317 2581/2325
+f 656/2329 2583/2330 2582/2331
+f 2585/2332 2582/2331 2583/2330
+f 655/2326 2584/2333 2579/2324
+f 2585/2332 2579/2324 2584/2333
+f 2575/2323 2588/2334 657/2321
+f 2586/2335 657/2321 2588/2334
+f 2587/2336 2588/2334 656/2329
+f 2583/2330 656/2329 2588/2334
+f 659/2337 2590/2338 2589/2339
+f 2593/2340 2589/2339 2590/2338
+f 2593/2340 2591/2341 2592/2342
+f 662/2343 2592/2342 2591/2341
+f 660/2344 2595/2345 2594/2346
+f 2597/2347 2594/2346 2595/2345
+f 2590/2338 659/2337 2597/2347
+f 2596/2348 2597/2347 659/2337
+f 2601/2349 2598/2350 2599/2351
+f 661/2352 2599/2351 2598/2350
+f 2595/2345 660/2344 2601/2349
+f 2600/2353 2601/2349 660/2344
+f 2591/2341 2604/2354 662/2343
+f 2602/2355 662/2343 2604/2354
+f 2599/2351 661/2352 2604/2354
+f 2603/2356 2604/2354 661/2352
+f 2606/2357 2609/2358 664/2359
+f 2605/2360 664/2359 2609/2358
+f 2608/2361 2609/2358 667/2362
+f 2607/2363 667/2362 2609/2358
+f 2613/2364 2610/2365 2611/2366
+f 665/2367 2611/2366 2610/2365
+f 664/2359 2612/2368 2606/2357
+f 2613/2364 2606/2357 2612/2368
+f 2615/2369 2617/2370 666/2371
+f 2614/2372 666/2371 2617/2370
+f 2611/2366 665/2367 2617/2370
+f 2616/2373 2617/2370 665/2367
+f 2607/2363 2620/2374 667/2362
+f 2618/2375 667/2362 2620/2374
+f 2620/2374 2615/2369 2619/2376
+f 666/2371 2619/2376 2615/2369
+f 2603/2356 661/2352 2624/2377
+f 2621/2378 2624/2377 661/2352
+f 671/2379 2623/2380 2622/2381
+f 2624/2377 2622/2381 2623/2380
+f 2626/2382 2627/2383 669/2384
+f 2625/2385 669/2384 2627/2383
+f 2621/2378 661/2352 2627/2383
+f 2598/2350 2627/2383 661/2352
+f 670/2386 2629/2387 2628/2388
+f 2631/2389 2628/2388 2629/2387
+f 2631/2389 2626/2382 2630/2390
+f 669/2384 2630/2390 2626/2382
+f 671/2379 2622/2381 2632/2391
+f 2634/2392 2632/2391 2622/2381
+f 2629/2387 670/2386 2634/2392
+f 2633/2393 2634/2392 670/2386
+f 2638/2394 2633/2393 2635/2395
+f 670/2386 2635/2395 2633/2393
+f 674/2396 2637/2397 2636/2398
+f 2638/2394 2636/2398 2637/2397
+f 2641/2399 2639/2400 2640/2401
+f 673/2402 2640/2401 2639/2400
+f 2635/2395 670/2386 2641/2399
+f 2628/2388 2641/2399 670/2386
+f 2619/2376 666/2371 2644/2403
+f 2642/2404 2644/2403 666/2371
+f 2644/2403 2640/2401 2643/2405
+f 673/2402 2643/2405 2640/2401
+f 2646/2406 2645/2407 2636/2398
+f 674/2396 2636/2398 2645/2407
+f 666/2371 2614/2372 2642/2404
+f 2646/2406 2642/2404 2614/2372
+f 676/2408 2648/2409 2647/2410
+f 2650/2411 2647/2410 2648/2409
+f 2589/2339 2650/2411 659/2337
+f 2649/2412 659/2337 2650/2411
+f 2652/2413 2654/2414 677/2415
+f 2651/2416 677/2415 2654/2414
+f 2653/2417 2654/2414 676/2408
+f 2648/2409 676/2408 2654/2414
+f 2612/2368 664/2359 2657/2418
+f 2655/2419 2657/2418 664/2359
+f 677/2415 2656/2420 2652/2413
+f 2657/2418 2652/2413 2656/2420
+f 659/2337 2649/2412 2596/2348
+f 2658/2421 2596/2348 2649/2412
+f 664/2359 2605/2360 2655/2419
+f 2658/2421 2655/2419 2605/2360
+f 2660/2422 2662/2423 679/2424
+f 2659/2425 679/2424 2662/2423
+f 2661/2426 676/2427 2662/2423
+f 2647/2428 2662/2423 676/2427
+f 2663/2429 2665/2430 607/2022
+f 2361/2027 607/2022 2665/2430
+f 2664/2431 2665/2430 679/2424
+f 2660/2422 679/2424 2665/2430
+f 610/2031 2666/2432 2365/2030
+f 2667/2433 2365/2030 2666/2432
+f 607/2022 2357/2023 2663/2429
+f 2667/2433 2663/2429 2357/2023
+f 2669/2434 2670/2435 680/2436
+f 2668/2437 680/2436 2670/2435
+f 2372/2039 2670/2435 610/2031
+f 2666/2432 610/2031 2670/2435
+f 676/2427 2661/2426 2653/2438
+f 2672/2439 2653/2438 2661/2426
+f 680/2436 2671/2440 2669/2434
+f 2672/2439 2669/2434 2671/2440
+f 2656/2441 677/2442 2676/2443
+f 2673/2444 2676/2443 677/2442
+f 2675/2445 2676/2443 682/2446
+f 2674/2447 682/2446 2676/2443
+f 680/2436 2677/2448 2671/2440
+f 2678/2449 2671/2440 2677/2448
+f 677/2442 2651/2450 2673/2444
+f 2678/2449 2673/2444 2651/2450
+f 609/2037 2679/2451 2373/2041
+f 2680/2452 2373/2041 2679/2451
+f 2680/2452 2677/2448 2668/2437
+f 680/2436 2668/2437 2677/2448
+f 2682/2453 2342/2002 2681/2454
+f 601/1992 2681/2454 2342/2002
+f 609/2037 2369/2038 2679/2451
+f 2682/2453 2679/2451 2369/2038
+f 2684/2455 2683/2456 2674/2447
+f 682/2446 2674/2447 2683/2456
+f 2684/2455 2681/2454 2335/1991
+f 601/1992 2335/1991 2681/2454
+f 665/2457 2685/2458 2616/2459
+f 2688/2460 2616/2459 2685/2458
+f 684/2461 2687/2462 2686/2463
+f 2688/2460 2686/2463 2687/2462
+f 2689/2464 2690/2465 682/2466
+f 2675/2467 682/2466 2690/2465
+f 2610/2468 2690/2465 665/2457
+f 2685/2458 665/2457 2690/2465
+f 650/2294 2691/2469 2560/2300
+f 2692/2470 2560/2300 2691/2469
+f 2692/2470 2689/2464 2683/2471
+f 682/2466 2683/2471 2689/2464
+f 2693/2472 2694/2473 415/1075
+f 1602/1082 415/1075 2694/2473
+f 2556/2295 2694/2473 650/2294
+f 2691/2469 650/2294 2694/2473
+f 2686/2463 2696/2474 684/2461
+f 2695/2475 684/2461 2696/2474
+f 415/1075 1597/1077 2693/2472
+f 2696/2474 2693/2472 1597/1077
+f 2697/2476 2700/2477 419/1095
+f 1610/1096 419/1095 2700/2477
+f 2699/2478 2700/2477 687/2479
+f 2698/2480 687/2479 2700/2477
+f 633/2182 2701/2481 2473/2188
+f 2702/2482 2473/2188 2701/2481
+f 419/1095 1621/1110 2697/2476
+f 2702/2482 2697/2476 1621/1110
+f 2704/2483 2705/2484 686/2485
+f 2703/2486 686/2485 2705/2484
+f 2469/2184 2705/2484 633/2182
+f 2701/2481 633/2182 2705/2484
+f 2706/2487 2708/2488 671/2489
+f 2623/2490 671/2489 2708/2488
+f 686/2485 2707/2491 2704/2483
+f 2708/2488 2704/2483 2707/2491
+f 2710/2492 2709/2493 2698/2480
+f 687/2479 2698/2480 2709/2493
+f 2706/2487 671/2489 2710/2492
+f 2632/2494 2710/2492 671/2489
+f 1625/1114 421/1115 2713/2495
+f 2711/2496 2713/2495 421/1115
+f 684/2461 2695/2475 2712/2497
+f 2713/2495 2712/2497 2695/2475
+f 687/2498 2714/2499 2699/2500
+f 2715/2501 2699/2500 2714/2499
+f 2715/2501 2711/2496 1631/1127
+f 421/1115 1631/1127 2711/2496
+f 2716/2502 2717/2503 674/2504
+f 2637/2505 674/2504 2717/2503
+f 2714/2499 687/2498 2717/2503
+f 2709/2506 2717/2503 687/2498
+f 2712/2497 2718/2507 684/2461
+f 2687/2462 684/2461 2718/2507
+f 2645/2508 2718/2507 674/2504
+f 2716/2502 674/2504 2718/2507
+f 652/2311 2719/2509 2571/2316
+f 2721/2510 2571/2316 2719/2509
+f 686/2485 2703/2486 2720/2511
+f 2721/2510 2720/2511 2703/2486
+f 2664/2512 679/2513 2723/2514
+f 2722/2515 2723/2514 679/2513
+f 2568/2313 2723/2514 652/2311
+f 2719/2509 652/2311 2723/2514
+f 2724/2516 2725/2517 662/2518
+f 2592/2519 662/2518 2725/2517
+f 2659/2520 2725/2517 679/2513
+f 2722/2515 679/2513 2725/2517
+f 2720/2511 2726/2521 686/2485
+f 2707/2491 686/2485 2726/2521
+f 662/2518 2602/2522 2724/2516
+f 2726/2521 2724/2516 2602/2522
+f 660/2344 2727/2523 2600/2353
+f 2730/2524 2600/2353 2727/2523
+f 2730/2524 2728/2525 2729/2526
+f 692/2527 2729/2526 2728/2525
+f 2731/2528 691/2529 2733/2530
+f 2732/2531 2733/2530 691/2529
+f 2594/2346 2733/2530 660/2344
+f 2727/2523 660/2344 2733/2530
+f 657/2321 2734/2532 2576/2322
+f 2736/2533 2576/2322 2734/2532
+f 691/2529 2735/2534 2732/2531
+f 2736/2533 2732/2531 2735/2534
+f 692/2527 2728/2525 2737/2535
+f 2738/2536 2737/2535 2728/2525
+f 657/2321 2586/2335 2734/2532
+f 2738/2536 2734/2532 2586/2335
+f 2739/2537 2742/2538 669/2384
+f 2630/2390 669/2384 2742/2538
+f 2741/2539 2742/2538 694/2540
+f 2740/2541 694/2540 2742/2538
+f 2729/2526 692/2527 2744/2542
+f 2743/2543 2744/2542 692/2527
+f 669/2384 2625/2385 2739/2537
+f 2744/2542 2739/2537 2625/2385
+f 656/2329 2745/2544 2587/2336
+f 2746/2545 2587/2336 2745/2544
+f 692/2527 2737/2535 2743/2543
+f 2746/2545 2743/2543 2737/2535
+f 694/2540 2740/2541 2747/2546
+f 2748/2547 2747/2546 2740/2541
+f 656/2329 2582/2331 2745/2544
+f 2748/2547 2745/2544 2582/2331
+f 673/2402 2749/2548 2643/2405
+f 2752/2549 2643/2405 2749/2548
+f 2752/2549 2750/2550 2751/2551
+f 696/2552 2751/2551 2750/2550
+f 2754/2553 2741/2539 2753/2554
+f 694/2540 2753/2554 2741/2539
+f 2639/2400 2754/2553 673/2402
+f 2749/2548 673/2402 2754/2553
+f 2755/2555 2756/2556 655/2326
+f 2584/2333 655/2326 2756/2556
+f 2753/2554 694/2540 2756/2556
+f 2747/2546 2756/2556 694/2540
+f 696/2552 2750/2550 2757/2557
+f 2758/2558 2757/2557 2750/2550
+f 655/2326 2578/2327 2755/2555
+f 2758/2558 2755/2555 2578/2327
+f 2759/2559 2761/2560 667/2362
+f 2608/2361 667/2362 2761/2560
+f 2731/2528 2761/2560 691/2529
+f 2760/2561 691/2529 2761/2560
+f 2763/2562 2751/2551 2762/2563
+f 696/2552 2762/2563 2751/2551
+f 667/2362 2618/2375 2759/2559
+f 2763/2562 2759/2559 2618/2375
+f 654/2317 2764/2564 2580/2328
+f 2765/2565 2580/2328 2764/2564
+f 696/2552 2757/2557 2762/2563
+f 2765/2565 2762/2563 2757/2557
+f 691/2529 2760/2561 2735/2534
+f 2766/2566 2735/2534 2760/2561
+f 654/2317 2573/2319 2764/2564
+f 2766/2566 2764/2564 2573/2319
+f 2767/2567 699/2568 2770/2569
+f 2768/2570 2770/2569 699/2568
+f 2769/2571 482/2572 2770/2569
+f 1883/2573 2770/2569 482/2572
+f 2774/2574 2771/2575 2772/2576
+f 700/2577 2772/2576 2771/2575
+f 699/2568 2773/2578 2768/2570
+f 2774/2574 2768/2570 2773/2578
+f 2778/2579 2775/2580 2776/2581
+f 701/2582 2776/2581 2775/2580
+f 2772/2576 700/2577 2778/2579
+f 2777/2583 2778/2579 700/2577
+f 2780/2584 1888/2585 2769/2571
+f 482/2572 2769/2571 1888/2585
+f 2776/2581 701/2582 2780/2584
+f 2779/2586 2780/2584 701/2582
+f 701/2582 2781/2587 2779/2586
+f 2783/2588 2779/2586 2781/2587
+f 624/2589 2428/2590 2782/2591
+f 2783/2588 2782/2591 2428/2590
+f 2785/2592 2786/2593 703/2594
+f 2784/2595 703/2594 2786/2593
+f 2775/2580 2786/2593 701/2582
+f 2781/2587 701/2582 2786/2593
+f 436/1214 2787/2596 1703/1221
+f 2789/2597 1703/1221 2787/2596
+f 703/2594 2788/2598 2785/2592
+f 2789/2597 2785/2592 2788/2598
+f 2782/2591 2790/2599 624/2589
+f 2431/2600 624/2589 2790/2599
+f 1698/1215 2790/2599 436/1214
+f 2787/2596 436/1214 2790/2599
+f 2791/2601 2793/2602 441/1229
+f 1709/1228 441/1229 2793/2602
+f 2409/2603 2793/2602 620/2604
+f 2792/2605 620/2604 2793/2602
+f 705/2606 2795/2607 2794/2608
+f 2796/2609 2794/2608 2795/2607
+f 441/1229 1718/1241 2791/2601
+f 2796/2609 2791/2601 1718/1241
+f 2797/2610 706/2611 2800/2612
+f 2798/2613 2800/2612 706/2611
+f 2799/2614 2800/2612 705/2606
+f 2795/2607 705/2606 2800/2612
+f 620/2604 2792/2605 2414/2615
+f 2802/2616 2414/2615 2792/2605
+f 706/2611 2801/2617 2798/2613
+f 2802/2616 2798/2613 2801/2617
+f 2803/2618 2805/2619 706/2611
+f 2801/2617 706/2611 2805/2619
+f 2805/2619 2804/2620 1872/2621
+f 479/2622 1872/2621 2804/2620
+f 2807/2623 2808/2624 708/2625
+f 2806/2626 708/2625 2808/2624
+f 2808/2624 2803/2618 2797/2610
+f 706/2611 2797/2610 2803/2618
+f 699/2568 2809/2627 2773/2578
+f 2811/2628 2773/2578 2809/2627
+f 2811/2628 2807/2623 2810/2629
+f 708/2625 2810/2629 2807/2623
+f 2804/2620 2812/2630 479/2622
+f 1877/2631 479/2622 2812/2630
+f 2812/2630 2809/2627 2767/2567
+f 699/2568 2767/2567 2809/2627
+f 2813/2632 2815/2633 703/2594
+f 2788/2598 703/2594 2815/2633
+f 445/1258 1731/1260 2814/2634
+f 2815/2633 2814/2634 1731/1260
+f 700/2577 2816/2635 2777/2583
+f 2817/2636 2777/2583 2816/2635
+f 703/2594 2784/2595 2813/2632
+f 2817/2636 2813/2632 2784/2595
+f 2819/2637 2820/2638 710/2639
+f 2818/2640 710/2639 2820/2638
+f 2771/2575 2820/2638 700/2577
+f 2816/2635 700/2577 2820/2638
+f 2822/2641 1737/1267 2814/2634
+f 445/1258 2814/2634 1737/1267
+f 710/2639 2821/2642 2819/2637
+f 2822/2641 2819/2637 2821/2642
+f 710/2639 2823/2643 2821/2642
+f 2825/2644 2821/2642 2823/2643
+f 1743/1273 2825/2644 448/1275
+f 2824/2645 448/1275 2825/2644
+f 2826/2646 2827/2647 708/2625
+f 2810/2629 708/2625 2827/2647
+f 2818/2640 2827/2647 710/2639
+f 2823/2643 710/2639 2827/2647
+f 705/2606 2828/2648 2799/2614
+f 2829/2649 2799/2614 2828/2648
+f 708/2625 2806/2626 2826/2646
+f 2829/2649 2826/2646 2806/2626
+f 2824/2645 2830/2650 448/1275
+f 1748/1279 448/1275 2830/2650
+f 2794/2608 2830/2650 705/2606
+f 2828/2648 705/2606 2830/2650
+f 2832/2651 2835/2652 713/2653
+f 2831/2654 713/2653 2835/2652
+f 2834/2655 2835/2652 715/2656
+f 2833/2657 715/2656 2835/2652
+f 2836/2658 714/2659 2839/2660
+f 2837/2661 2839/2660 714/2659
+f 713/2653 2838/2662 2832/2651
+f 2839/2660 2832/2651 2838/2662
+f 715/2656 2833/2657 2840/2663
+f 2842/2664 2840/2663 2833/2657
+f 714/2659 2841/2665 2837/2661
+f 2842/2664 2837/2661 2841/2665
+f 2844/2666 2846/2667 717/2668
+f 2843/2669 717/2668 2846/2667
+f 713/2653 2831/2654 2845/2670
+f 2846/2667 2845/2670 2831/2654
+f 519/1586 2847/2671 2006/1585
+f 2849/2672 2006/1585 2847/2671
+f 2849/2672 2844/2666 2848/2673
+f 717/2668 2848/2673 2844/2666
+f 2850/2674 2851/2675 497/1499
+f 1937/1504 497/1499 2851/2675
+f 2015/1596 2851/2675 519/1586
+f 2847/2671 519/1586 2851/2675
+f 718/2676 2853/2677 2852/2678
+f 2854/2679 2852/2678 2853/2677
+f 497/1499 1932/1500 2850/2674
+f 2854/2679 2850/2674 1932/1500
+f 2845/2670 2856/2680 713/2653
+f 2838/2662 713/2653 2856/2680
+f 2855/2681 2856/2680 718/2676
+f 2853/2677 718/2676 2856/2680
+f 715/2656 2857/2682 2834/2655
+f 2859/2683 2834/2655 2857/2682
+f 717/2668 2843/2669 2858/2684
+f 2859/2683 2858/2684 2843/2669
+f 2861/2685 2862/2686 720/2687
+f 2860/2688 720/2687 2862/2686
+f 2840/2663 2862/2686 715/2656
+f 2857/2682 715/2656 2862/2686
+f 2863/2689 2865/2690 534/1657
+f 2074/1668 534/1657 2865/2690
+f 2864/2691 2865/2690 720/2687
+f 2861/2685 720/2687 2865/2690
+f 545/1713 2866/2692 2115/1716
+f 2867/2693 2115/1716 2866/2692
+f 2863/2689 534/1657 2867/2693
+f 2067/1659 2867/2693 534/1657
+f 2858/2684 2868/2694 717/2668
+f 2848/2673 717/2668 2868/2694
+f 2111/1711 2868/2694 545/1713
+f 2866/2692 545/1713 2868/2694
+f 2869/2695 2872/2696 460/2697
+f 1794/2698 460/2697 2872/2696
+f 2871/2699 2872/2696 722/2700
+f 2870/2701 722/2700 2872/2696
+f 618/2083 2873/2702 2407/2089
+f 2874/2703 2407/2089 2873/2702
+f 2874/2703 2869/2695 1801/2704
+f 460/2697 1801/2704 2869/2695
+f 2864/2691 720/2687 2876/2705
+f 2875/2706 2876/2705 720/2687
+f 2404/2085 2876/2705 618/2083
+f 2873/2702 618/2083 2876/2705
+f 722/2700 2870/2701 2877/2707
+f 2878/2708 2877/2707 2870/2701
+f 720/2687 2860/2688 2875/2706
+f 2878/2708 2875/2706 2860/2688
+f 463/2709 2879/2710 1805/2711
+f 2881/2712 1805/2711 2879/2710
+f 637/2200 2485/2202 2880/2713
+f 2881/2712 2880/2713 2485/2202
+f 2883/2714 2884/2715 724/2716
+f 2882/2717 724/2716 2884/2715
+f 1812/2718 2884/2715 463/2709
+f 2879/2710 463/2709 2884/2715
+f 718/2676 2885/2719 2855/2681
+f 2887/2720 2855/2681 2885/2719
+f 724/2716 2886/2721 2883/2714
+f 2887/2720 2883/2714 2886/2721
+f 2880/2713 2888/2722 637/2200
+f 2490/2209 637/2200 2888/2722
+f 2852/2678 2888/2722 718/2676
+f 2885/2719 718/2676 2888/2722
+f 726/2723 2890/2724 2889/2725
+f 2892/2726 2889/2725 2890/2724
+f 714/2659 2836/2658 2891/2727
+f 2892/2726 2891/2727 2836/2658
+f 2894/2728 2896/2729 727/2730
+f 2893/2731 727/2730 2896/2729
+f 2895/2732 2896/2729 726/2723
+f 2890/2724 726/2723 2896/2729
+f 2898/2733 2900/2734 728/2735
+f 2897/2736 728/2735 2900/2734
+f 2894/2728 727/2730 2900/2734
+f 2899/2737 2900/2734 727/2730
+f 2841/2665 714/2659 2902/2738
+f 2891/2727 2902/2738 714/2659
+f 2902/2738 2898/2733 2901/2739
+f 728/2735 2901/2739 2898/2733
+f 2901/2739 728/2735 2905/2740
+f 2903/2741 2905/2740 728/2735
+f 2877/2707 2905/2740 722/2700
+f 2904/2742 722/2700 2905/2740
+f 730/2743 2907/2744 2906/2745
+f 2908/2746 2906/2745 2907/2744
+f 728/2735 2897/2736 2903/2741
+f 2908/2746 2903/2741 2897/2736
+f 2909/2747 2911/2748 469/2749
+f 1837/2750 469/2749 2911/2748
+f 2910/2751 2911/2748 730/2743
+f 2907/2744 730/2743 2911/2748
+f 722/2700 2904/2742 2871/2699
+f 2912/2752 2871/2699 2904/2742
+f 469/2749 1832/2753 2909/2747
+f 2912/2752 2909/2747 1832/2753
+f 473/2754 2913/2755 1843/2756
+f 2915/2757 1843/2756 2913/2755
+f 2915/2757 2914/2758 2882/2717
+f 724/2716 2882/2717 2914/2758
+f 2917/2759 2918/2760 732/2761
+f 2916/2762 732/2761 2918/2760
+f 1850/2763 2918/2760 473/2754
+f 2913/2755 473/2754 2918/2760
+f 2921/2764 2895/2732 2919/2765
+f 726/2723 2919/2765 2895/2732
+f 732/2761 2920/2766 2917/2759
+f 2921/2764 2917/2759 2920/2766
+f 2914/2758 2922/2767 724/2716
+f 2886/2721 724/2716 2922/2767
+f 2919/2765 726/2723 2922/2767
+f 2889/2725 2922/2767 726/2723
+f 2923/2768 2925/2769 727/2730
+f 2899/2737 727/2730 2925/2769
+f 2906/2745 2925/2769 730/2743
+f 2924/2770 730/2743 2925/2769
+f 2926/2771 2927/2772 732/2761
+f 2920/2766 732/2761 2927/2772
+f 2923/2768 727/2730 2927/2772
+f 2893/2731 2927/2772 727/2730
+f 475/2773 2928/2774 1860/2775
+f 2929/2776 1860/2775 2928/2774
+f 732/2761 2916/2762 2926/2771
+f 2929/2776 2926/2771 2916/2762
+f 730/2743 2924/2770 2910/2751
+f 2930/2777 2910/2751 2924/2770
+f 2930/2777 2928/2774 1856/2778
+f 475/2773 1856/2778 2928/2774
+f 738/6 6/2779 739/1
+f 735/2 739/1 6/2779
+f 193/2780 737/7 736/3
+f 739/1 736/3 737/7
+f 743/11 742/12 740/10
+f 14/2781 740/10 742/12
+f 741/9 193/2780 743/11
+f 736/3 743/11 193/2780
+f 747/14 746/17 744/16
+f 10/2782 744/16 746/17
+f 193/2780 741/9 745/13
+f 747/14 745/13 741/9
+f 16/2783 748/18 749/20
+f 750/19 749/20 748/18
+f 745/13 750/19 193/2780
+f 737/7 193/2780 750/19
+f 754/22 735/2 751/24
+f 6/2779 751/24 735/2
+f 197/2784 753/25 752/21
+f 754/22 752/21 753/25
+f 18/2785 755/28 757/30
+f 758/29 757/30 755/28
+f 752/21 758/29 197/2784
+f 756/27 197/2784 758/29
+f 762/33 761/35 759/31
+f 13/2786 759/31 761/35
+f 197/2784 756/27 760/34
+f 762/33 760/34 756/27
+f 763/37 14/2781 764/36
+f 742/12 764/36 14/2781
+f 760/34 764/36 197/2784
+f 753/25 197/2784 764/36
+f 1/2787 765/39 768/42
+f 769/38 768/42 765/39
+f 767/44 769/38 202/2788
+f 766/40 202/2788 769/38
+f 12/2789 770/48 772/49
+f 773/46 772/49 770/48
+f 202/2788 766/40 771/45
+f 773/46 771/45 766/40
+f 774/52 777/53 11/2790
+f 776/54 11/2790 777/53
+f 771/45 777/53 202/2788
+f 775/51 202/2788 777/53
+f 24/2791 778/55 779/57
+f 780/56 779/57 778/55
+f 202/2788 775/51 767/44
+f 780/56 767/44 775/51
+f 7/2792 781/61 784/62
+f 785/59 784/62 781/61
+f 206/2793 783/64 782/58
+f 785/59 782/58 783/64
+f 779/57 788/66 24/2791
+f 787/67 24/2791 788/66
+f 782/58 788/66 206/2793
+f 786/65 206/2793 788/66
+f 11/2790 789/71 774/52
+f 791/69 774/52 789/71
+f 206/2793 786/65 790/68
+f 791/69 790/68 786/65
+f 792/72 794/73 23/2794
+f 793/74 23/2794 794/73
+f 790/68 794/73 206/2793
+f 783/64 206/2793 794/73
+f 2/2795 795/78 798/80
+f 799/76 798/80 795/78
+f 211/2796 797/81 796/75
+f 799/76 796/75 797/81
+f 800/84 803/85 27/2797
+f 802/86 27/2797 803/85
+f 796/75 803/85 211/2796
+f 801/83 211/2796 803/85
+f 804/89 807/90 19/2798
+f 806/91 19/2798 807/90
+f 211/2796 801/83 805/88
+f 807/90 805/88 801/83
+f 20/2799 808/93 809/94
+f 810/92 809/94 808/93
+f 810/92 797/81 805/88
+f 211/2796 805/88 797/81
+f 768/42 814/96 1/2787
+f 813/98 1/2787 814/96
+f 812/99 814/96 215/2800
+f 811/95 215/2800 814/96
+f 24/2791 815/103 778/55
+f 817/101 778/55 815/103
+f 215/2800 811/95 816/100
+f 817/101 816/100 811/95
+f 818/106 821/107 15/2801
+f 820/108 15/2801 821/107
+f 816/100 821/107 215/2800
+f 819/105 215/2800 821/107
+f 16/2783 822/110 823/111
+f 824/109 823/111 822/110
+f 215/2800 819/105 812/99
+f 824/109 812/99 819/105
+f 828/113 827/114 738/6
+f 6/2779 738/6 827/114
+f 218/2802 826/116 825/112
+f 828/113 825/112 826/116
+f 16/2783 823/111 748/18
+f 830/118 748/18 823/111
+f 825/112 830/118 218/2802
+f 829/117 218/2802 830/118
+f 15/2801 831/122 818/106
+f 833/120 818/106 831/122
+f 218/2802 829/117 832/119
+f 833/120 832/119 829/117
+f 834/123 836/124 32/2803
+f 835/125 32/2803 836/124
+f 832/119 836/124 218/2802
+f 826/116 218/2802 836/124
+f 841/129 840/132 837/128
+f 5/2804 837/128 840/132
+f 838/127 222/2805 841/129
+f 839/130 841/129 222/2805
+f 32/2803 835/125 843/135
+f 844/134 843/135 835/125
+f 844/134 842/133 838/127
+f 222/2805 838/127 842/133
+f 831/122 15/2801 847/136
+f 845/137 847/136 15/2801
+f 842/133 847/136 222/2805
+f 846/138 222/2805 847/136
+f 849/142 30/2806 850/140
+f 848/141 850/140 30/2806
+f 222/2805 846/138 839/130
+f 850/140 839/130 846/138
+f 851/145 854/146 7/2792
+f 781/61 7/2792 854/146
+f 853/147 854/146 224/2807
+f 852/144 224/2807 854/146
+f 857/149 856/150 849/142
+f 30/2806 849/142 856/150
+f 855/148 224/2807 857/149
+f 852/144 857/149 224/2807
+f 15/2801 820/108 845/137
+f 859/151 845/137 820/108
+f 859/151 858/152 855/148
+f 224/2807 855/148 858/152
+f 24/2791 787/67 815/103
+f 860/153 815/103 787/67
+f 858/152 860/153 224/2807
+f 853/147 224/2807 860/153
+f 861/156 864/157 2/2795
+f 795/78 2/2795 864/157
+f 863/158 864/157 228/2808
+f 862/155 228/2808 864/157
+f 18/2785 865/162 867/163
+f 868/160 867/163 865/162
+f 228/2808 862/155 866/159
+f 868/160 866/159 862/155
+f 869/166 872/167 17/2809
+f 871/168 17/2809 872/167
+f 866/159 872/167 228/2808
+f 870/165 228/2808 872/167
+f 27/2797 802/86 873/170
+f 874/169 873/170 802/86
+f 228/2808 870/165 863/158
+f 874/169 863/158 870/165
+f 879/172 878/177 875/174
+f 8/2810 875/174 878/177
+f 232/2811 877/175 876/171
+f 879/172 876/171 877/175
+f 873/170 882/179 27/2797
+f 881/180 27/2797 882/179
+f 876/171 882/179 232/2811
+f 880/178 232/2811 882/179
+f 17/2809 883/184 869/166
+f 885/182 869/166 883/184
+f 232/2811 880/178 884/181
+f 885/182 884/181 880/178
+f 887/187 29/2812 888/185
+f 886/186 888/185 29/2812
+f 884/181 888/185 232/2811
+f 877/175 232/2811 888/185
+f 837/128 5/2804 892/188
+f 889/189 892/188 5/2804
+f 891/192 892/188 235/2813
+f 890/190 235/2813 892/188
+f 29/2812 887/187 894/195
+f 895/194 894/195 887/187
+f 235/2813 890/190 893/193
+f 895/194 893/193 890/190
+f 896/198 898/199 17/2809
+f 883/184 17/2809 898/199
+f 893/193 898/199 235/2813
+f 897/197 235/2813 898/199
+f 899/201 32/2803 900/200
+f 843/135 900/200 32/2803
+f 235/2813 897/197 891/192
+f 900/200 891/192 897/197
+f 751/24 6/2779 903/203
+f 827/114 903/203 6/2779
+f 236/2814 902/204 901/202
+f 903/203 901/202 902/204
+f 32/2803 899/201 834/123
+f 905/206 834/123 899/201
+f 901/202 905/206 236/2814
+f 904/205 236/2814 905/206
+f 17/2809 871/168 896/198
+f 907/208 896/198 871/168
+f 236/2814 904/205 906/207
+f 907/208 906/207 904/205
+f 18/2785 757/30 865/162
+f 908/209 865/162 757/30
+f 906/207 908/209 236/2814
+f 902/204 236/2814 908/209
+f 913/211 912/214 909/213
+f 3/2815 909/213 912/214
+f 241/2816 911/216 910/210
+f 913/211 910/210 911/216
+f 22/2817 914/219 916/221
+f 917/220 916/221 914/219
+f 915/218 241/2816 917/220
+f 910/210 917/220 241/2816
+f 21/2818 918/222 920/226
+f 921/224 920/226 918/222
+f 921/224 919/225 915/218
+f 241/2816 915/218 919/225
+f 31/2819 922/227 923/229
+f 924/228 923/229 922/227
+f 919/225 924/228 241/2816
+f 911/216 241/2816 924/228
+f 925/233 929/231 9/2820
+f 928/235 9/2820 929/231
+f 245/2821 927/234 926/230
+f 929/231 926/230 927/234
+f 923/229 932/238 31/2819
+f 931/239 31/2819 932/238
+f 926/230 932/238 245/2821
+f 930/237 245/2821 932/238
+f 21/2818 933/243 918/222
+f 935/241 918/222 933/243
+f 245/2821 930/237 934/240
+f 935/241 934/240 930/237
+f 938/244 937/246 936/245
+f 33/2822 936/245 937/246
+f 934/240 938/244 245/2821
+f 927/234 245/2821 938/244
+f 939/249 943/250 4/2823
+f 942/252 4/2823 943/250
+f 941/253 943/250 250/2824
+f 940/248 250/2824 943/250
+f 28/2825 944/257 946/258
+f 947/255 946/258 944/257
+f 250/2824 940/248 945/254
+f 947/255 945/254 940/248
+f 948/260 951/259 25/2826
+f 950/263 25/2826 951/259
+f 945/254 951/259 250/2824
+f 949/261 250/2824 951/259
+f 26/2827 952/264 953/266
+f 954/265 953/266 952/264
+f 954/265 941/253 949/261
+f 250/2824 949/261 941/253
+f 9/2820 928/235 957/269
+f 958/268 957/269 928/235
+f 253/2828 956/271 955/267
+f 958/268 955/267 956/271
+f 959/274 961/275 33/2822
+f 936/245 33/2822 961/275
+f 955/267 961/275 253/2828
+f 960/273 253/2828 961/275
+f 25/2826 950/263 963/278
+f 964/277 963/278 950/263
+f 253/2828 960/273 962/276
+f 964/277 962/276 960/273
+f 965/279 966/280 28/2825
+f 944/257 28/2825 966/280
+f 962/276 966/280 253/2828
+f 956/271 253/2828 966/280
+f 840/132 970/282 5/2804
+f 969/284 5/2804 970/282
+f 968/285 970/282 256/2829
+f 967/281 256/2829 970/282
+f 30/2806 971/289 848/141
+f 973/287 848/141 971/289
+f 256/2829 967/281 972/286
+f 973/287 972/286 967/281
+f 975/292 31/2819 976/290
+f 931/239 976/290 31/2819
+f 972/286 976/290 256/2829
+f 974/291 256/2829 976/290
+f 9/2820 977/294 925/233
+f 978/293 925/233 977/294
+f 256/2829 974/291 968/285
+f 978/293 968/285 974/291
+f 29/2812 979/298 886/186
+f 982/296 886/186 979/298
+f 982/296 980/295 981/299
+f 259/2830 981/299 980/295
+f 946/258 985/300 28/2825
+f 984/302 28/2825 985/300
+f 980/295 985/300 259/2830
+f 983/301 259/2830 985/300
+f 986/305 988/306 4/2823
+f 939/249 4/2823 988/306
+f 259/2830 983/301 987/304
+f 988/306 987/304 983/301
+f 878/177 990/307 8/2810
+f 989/308 8/2810 990/307
+f 981/299 259/2830 990/307
+f 987/304 990/307 259/2830
+f 23/2794 991/309 792/72
+f 994/311 792/72 991/309
+f 994/311 992/312 993/313
+f 262/2831 993/313 992/312
+f 22/2817 916/221 996/316
+f 997/314 996/316 916/221
+f 992/312 997/314 262/2831
+f 995/315 262/2831 997/314
+f 998/320 1000/318 3/2815
+f 909/213 3/2815 1000/318
+f 262/2831 995/315 999/317
+f 1000/318 999/317 995/315
+f 784/62 1002/321 7/2792
+f 1001/322 7/2792 1002/321
+f 993/313 262/2831 1002/321
+f 999/317 1002/321 262/2831
+f 28/2825 984/302 965/279
+f 1005/324 965/279 984/302
+f 263/2832 1004/325 1003/323
+f 1005/324 1003/323 1004/325
+f 894/195 1007/327 29/2812
+f 979/298 29/2812 1007/327
+f 1003/323 1007/327 263/2832
+f 1006/326 263/2832 1007/327
+f 5/2804 969/284 889/189
+f 1009/329 889/189 969/284
+f 263/2832 1006/326 1008/328
+f 1009/329 1008/328 1006/326
+f 957/269 1010/330 9/2820
+f 977/294 9/2820 1010/330
+f 1008/328 1010/330 263/2832
+f 1004/325 263/2832 1010/330
+f 856/150 1013/332 30/2806
+f 971/289 30/2806 1013/332
+f 1012/333 1013/332 264/2833
+f 1011/331 264/2833 1013/332
+f 7/2792 1001/322 851/145
+f 1015/335 851/145 1001/322
+f 264/2833 1011/331 1014/334
+f 1015/335 1014/334 1011/331
+f 912/214 1017/337 3/2815
+f 998/320 3/2815 1017/337
+f 1014/334 1017/337 264/2833
+f 1016/336 264/2833 1017/337
+f 31/2819 975/292 922/227
+f 1018/338 922/227 975/292
+f 264/2833 1016/336 1012/333
+f 1018/338 1012/333 1016/336
+f 23/2794 1019/339 991/309
+f 1022/341 991/309 1019/339
+f 1020/342 268/2834 1022/341
+f 1021/343 1022/341 268/2834
+f 35/2835 1023/344 1025/348
+f 1026/346 1025/348 1023/344
+f 1026/346 1024/347 1020/342
+f 268/2834 1020/342 1024/347
+f 1029/353 34/2836 1030/349
+f 1027/350 1030/349 34/2836
+f 1024/347 1030/349 268/2834
+f 1028/351 268/2834 1030/349
+f 1032/354 1031/355 996/316
+f 22/2817 996/316 1031/355
+f 268/2834 1028/351 1021/343
+f 1032/354 1021/343 1028/351
+f 1033/358 1036/359 35/2837
+f 1023/361 35/2837 1036/359
+f 1035/360 1036/359 272/2838
+f 1034/357 272/2838 1036/359
+f 37/2839 1037/366 1039/367
+f 1040/364 1039/367 1037/366
+f 272/2838 1034/357 1038/363
+f 1040/364 1038/363 1034/357
+f 1043/372 36/2840 1044/368
+f 1041/369 1044/368 36/2840
+f 1038/363 1044/368 272/2838
+f 1042/370 272/2838 1044/368
+f 1046/373 1045/375 1029/374
+f 34/2841 1029/374 1045/375
+f 272/2838 1042/370 1035/360
+f 1046/373 1035/360 1042/370
+f 35/2837 1047/379 1033/358
+f 1050/377 1033/358 1047/379
+f 276/2842 1049/380 1048/376
+f 1050/377 1048/376 1049/380
+f 1051/383 1054/384 40/2843
+f 1053/385 40/2843 1054/384
+f 1048/376 1054/384 276/2842
+f 1052/382 276/2842 1054/384
+f 41/2844 1055/389 1057/390
+f 1058/387 1057/390 1055/389
+f 276/2842 1052/382 1056/386
+f 1058/387 1056/386 1052/382
+f 1039/367 1060/391 37/2839
+f 1059/392 37/2839 1060/391
+f 1056/386 1060/391 276/2842
+f 1049/380 276/2842 1060/391
+f 37/2839 1059/392 1063/395
+f 1064/394 1063/395 1059/392
+f 280/2845 1062/397 1061/393
+f 1064/394 1061/393 1062/397
+f 1065/400 1067/401 41/2844
+f 1055/389 41/2844 1067/401
+f 1061/393 1067/401 280/2845
+f 1066/399 280/2845 1067/401
+f 1071/405 1070/406 1068/404
+f 42/2846 1068/404 1070/406
+f 280/2845 1066/399 1069/403
+f 1071/405 1069/403 1066/399
+f 38/2847 1072/407 1073/409
+f 1074/408 1073/409 1072/407
+f 1074/408 1062/397 1069/403
+f 280/2845 1069/403 1062/397
+f 1079/411 1078/416 1075/413
+f 43/2848 1075/413 1078/416
+f 285/2849 1077/414 1076/410
+f 1079/411 1076/410 1077/414
+f 1080/420 1083/418 48/2850
+f 1082/421 48/2850 1083/418
+f 285/2849 1076/410 1081/417
+f 1083/418 1081/417 1076/410
+f 1084/424 1087/425 45/2851
+f 1086/426 45/2851 1087/425
+f 285/2849 1081/417 1085/423
+f 1087/425 1085/423 1081/417
+f 1088/427 1090/428 46/2852
+f 1089/429 46/2852 1090/428
+f 285/2849 1085/423 1077/414
+f 1090/428 1077/414 1085/423
+f 1091/433 1095/431 44/2853
+f 1094/435 44/2853 1095/431
+f 290/2854 1093/434 1092/430
+f 1095/431 1092/430 1093/434
+f 1099/438 1098/441 1096/440
+f 50/2855 1096/440 1098/441
+f 290/2854 1092/430 1097/437
+f 1099/438 1097/437 1092/430
+f 1100/444 1103/445 47/2856
+f 1102/446 47/2856 1103/445
+f 1097/437 1103/445 290/2854
+f 1101/443 290/2854 1103/445
+f 1104/447 1106/448 49/2857
+f 1105/449 49/2857 1106/448
+f 290/2854 1101/443 1093/434
+f 1106/448 1093/434 1101/443
+f 18/2858 867/452 1109/455
+f 1110/453 1109/455 867/452
+f 1107/451 293/2859 1110/453
+f 1108/456 1110/453 293/2859
+f 1113/458 861/461 1111/460
+f 2/2860 1111/460 861/461
+f 293/2859 1107/451 1112/457
+f 1113/458 1112/457 1107/451
+f 1094/435 1116/463 44/2853
+f 1115/464 44/2853 1116/463
+f 1112/457 1116/463 293/2859
+f 1114/462 293/2859 1116/463
+f 1117/466 1118/465 49/2857
+f 1104/447 49/2857 1118/465
+f 293/2859 1114/462 1108/456
+f 1118/465 1108/456 1114/462
+f 20/2861 1119/470 1122/471
+f 1123/468 1122/471 1119/470
+f 297/2862 1121/473 1120/467
+f 1123/468 1120/467 1121/473
+f 13/2863 1124/475 1126/478
+f 1127/474 1126/478 1124/475
+f 1120/467 1127/474 297/2862
+f 1125/476 297/2862 1127/474
+f 1102/446 1130/480 47/2856
+f 1129/481 47/2856 1130/480
+f 297/2862 1125/476 1128/479
+f 1130/480 1128/479 1125/476
+f 1131/482 1132/483 50/2855
+f 1096/440 50/2855 1132/483
+f 1128/479 1132/483 297/2862
+f 1121/473 297/2862 1132/483
+f 18/2858 1109/455 755/486
+f 1135/485 755/486 1109/455
+f 298/2864 1134/488 1133/484
+f 1135/485 1133/484 1134/488
+f 1105/449 1137/490 49/2857
+f 1117/466 49/2857 1137/490
+f 1133/484 1137/490 298/2864
+f 1136/489 298/2864 1137/490
+f 1129/481 1139/492 47/2856
+f 1100/444 47/2856 1139/492
+f 298/2864 1136/489 1138/491
+f 1139/492 1138/491 1136/489
+f 13/2863 761/493 1124/475
+f 1140/494 1124/475 761/493
+f 1138/491 1140/494 298/2864
+f 1134/488 298/2864 1140/494
+f 20/2861 1122/471 808/498
+f 1143/495 808/498 1122/471
+f 1142/497 1143/495 299/2865
+f 1141/496 299/2865 1143/495
+f 1098/441 1145/501 50/2855
+f 1131/482 50/2855 1145/501
+f 299/2865 1141/496 1144/500
+f 1145/501 1144/500 1141/496
+f 1115/464 1147/503 44/2853
+f 1091/433 44/2853 1147/503
+f 1144/500 1147/503 299/2865
+f 1146/502 299/2865 1147/503
+f 1148/504 1111/460 798/505
+f 2/2860 798/505 1111/460
+f 1148/504 1142/497 1146/502
+f 299/2865 1146/502 1142/497
+f 16/2866 1149/508 822/511
+f 1152/509 822/511 1149/508
+f 1151/512 1152/509 302/2867
+f 1150/507 302/2867 1152/509
+f 1082/421 1155/514 48/2850
+f 1154/515 48/2850 1155/514
+f 302/2867 1150/507 1153/513
+f 1155/514 1153/513 1150/507
+f 1156/518 1158/519 43/2848
+f 1075/413 43/2848 1158/519
+f 1153/513 1158/519 302/2867
+f 1157/517 302/2867 1158/519
+f 1160/520 1159/522 813/521
+f 1/2868 813/521 1159/522
+f 302/2867 1157/517 1151/512
+f 1160/520 1151/512 1157/517
+f 772/525 1164/526 12/2869
+f 1163/528 12/2869 1164/526
+f 1161/524 304/2870 1164/526
+f 1162/527 1164/526 304/2870
+f 1159/522 1166/531 1/2868
+f 765/532 1/2868 1166/531
+f 1166/531 1165/530 1161/524
+f 304/2870 1161/524 1165/530
+f 1078/416 1168/534 43/2848
+f 1156/518 43/2848 1168/534
+f 1165/530 1168/534 304/2870
+f 1167/533 304/2870 1168/534
+f 1169/536 1170/535 46/2852
+f 1088/427 46/2852 1170/535
+f 304/2870 1167/533 1162/527
+f 1170/535 1162/527 1167/533
+f 1171/539 1174/540 10/2871
+f 744/542 10/2871 1174/540
+f 1173/543 1174/540 306/2872
+f 1172/538 306/2872 1174/540
+f 1086/426 1177/545 45/2851
+f 1176/546 45/2851 1177/545
+f 306/2872 1172/538 1175/544
+f 1177/545 1175/544 1172/538
+f 1154/515 1179/548 48/2850
+f 1080/420 48/2850 1179/548
+f 1175/544 1179/548 306/2872
+f 1178/547 306/2872 1179/548
+f 16/2866 749/550 1149/508
+f 1180/549 1149/508 749/550
+f 306/2872 1178/547 1173/543
+f 1180/549 1173/543 1178/547
+f 12/2869 1163/528 1183/553
+f 1184/552 1183/553 1163/528
+f 308/2873 1182/555 1181/551
+f 1184/552 1181/551 1182/555
+f 1089/429 1186/557 46/2852
+f 1169/536 46/2852 1186/557
+f 1181/551 1186/557 308/2873
+f 1185/556 308/2873 1186/557
+f 1176/546 1188/559 45/2851
+f 1084/424 45/2851 1188/559
+f 308/2873 1185/556 1187/558
+f 1188/559 1187/558 1185/556
+f 10/2871 1189/560 1171/539
+f 1190/561 1171/539 1189/560
+f 1187/558 1190/561 308/2873
+f 1182/555 308/2873 1190/561
+f 38/2847 1191/565 1072/407
+f 1194/563 1072/407 1191/565
+f 312/2874 1193/566 1192/562
+f 1194/563 1192/562 1193/566
+f 51/2875 1195/567 1197/571
+f 1198/569 1197/571 1195/567
+f 312/2874 1192/562 1196/570
+f 1198/569 1196/570 1192/562
+f 1201/576 53/2876 1202/572
+f 1199/573 1202/572 53/2876
+f 312/2874 1196/570 1200/574
+f 1202/572 1200/574 1196/570
+f 1205/578 1204/579 1043/372
+f 36/2840 1043/372 1204/579
+f 312/2874 1200/574 1203/577
+f 1205/578 1203/577 1200/574
+f 1063/395 1206/580 37/2839
+f 1037/366 37/2839 1206/580
+f 1203/577 1206/580 312/2874
+f 1193/566 312/2874 1206/580
+f 52/2877 1207/584 1210/585
+f 1211/582 1210/585 1207/584
+f 1211/582 1208/581 1209/587
+f 317/2878 1209/587 1208/581
+f 1212/590 1215/591 53/2879
+f 1214/592 53/2879 1215/591
+f 1208/581 1215/591 317/2878
+f 1213/589 317/2878 1215/591
+f 1216/595 1219/596 57/2880
+f 1218/597 57/2880 1219/596
+f 1217/594 317/2878 1219/596
+f 1213/589 1219/596 317/2878
+f 1222/598 1221/600 1220/599
+f 56/2881 1220/599 1221/600
+f 317/2878 1217/594 1209/587
+f 1222/598 1209/587 1217/594
+f 1225/602 1212/590 1201/604
+f 53/2879 1201/604 1212/590
+f 320/2882 1224/605 1223/601
+f 1225/602 1223/601 1224/605
+f 51/2883 1226/608 1195/610
+f 1228/609 1195/610 1226/608
+f 1227/607 320/2882 1228/609
+f 1223/601 1228/609 320/2882
+f 1232/612 1231/615 1229/614
+f 54/2884 1229/614 1231/615
+f 320/2882 1227/607 1230/611
+f 1232/612 1230/611 1227/607
+f 1218/597 1234/616 57/2880
+f 1233/617 57/2880 1234/616
+f 1230/611 1234/616 320/2882
+f 1224/605 320/2882 1234/616
+f 1238/619 1229/614 1235/621
+f 54/2884 1235/621 1229/614
+f 323/2885 1237/622 1236/618
+f 1238/619 1236/618 1237/622
+f 1239/625 1242/626 55/2886
+f 1241/627 55/2886 1242/626
+f 1236/618 1242/626 323/2885
+f 1240/624 323/2885 1242/626
+f 1221/600 1245/629 56/2881
+f 1244/630 56/2881 1245/629
+f 323/2885 1240/624 1243/628
+f 1245/629 1243/628 1240/624
+f 1233/617 1246/631 57/2880
+f 1216/595 57/2880 1246/631
+f 1243/628 1246/631 323/2885
+f 1237/622 323/2885 1246/631
+f 1247/634 1251/635 69/2887
+f 1250/637 69/2887 1251/635
+f 1249/638 1251/635 327/2888
+f 1248/633 327/2888 1251/635
+f 76/2889 1252/642 1254/643
+f 1255/640 1254/643 1252/642
+f 327/2888 1248/633 1253/639
+f 1255/640 1253/639 1248/633
+f 920/226 1258/645 21/2818
+f 1257/646 21/2818 1258/645
+f 1253/639 1258/645 327/2888
+f 1256/644 327/2888 1258/645
+f 22/2817 1259/648 914/219
+f 1260/647 914/219 1259/648
+f 327/2888 1256/644 1249/638
+f 1260/647 1249/638 1256/644
+f 1264/654 77/2890 1265/649
+f 1261/650 1265/649 77/2890
+f 1263/655 1265/649 331/2891
+f 1262/651 331/2891 1265/649
+f 1269/657 1268/660 1266/659
+f 80/2892 1266/659 1268/660
+f 331/2891 1262/651 1267/656
+f 1269/657 1267/656 1262/651
+f 963/278 1272/662 25/2826
+f 1271/663 25/2826 1272/662
+f 1270/661 331/2891 1272/662
+f 1267/656 1272/662 331/2891
+f 33/2822 1273/665 959/274
+f 1274/664 959/274 1273/665
+f 331/2891 1270/661 1263/655
+f 1274/664 1263/655 1270/661
+f 1266/659 80/2892 1278/666
+f 1275/667 1278/666 80/2892
+f 1277/670 1278/666 334/2893
+f 1276/668 334/2893 1278/666
+f 1282/672 1281/675 1279/674
+f 81/2894 1279/674 1281/675
+f 334/2893 1276/668 1280/671
+f 1282/672 1280/671 1276/668
+f 1284/678 26/2827 1285/676
+f 953/266 1285/676 26/2827
+f 1280/671 1285/676 334/2893
+f 1283/677 334/2893 1285/676
+f 25/2826 1271/663 948/260
+f 1286/679 948/260 1271/663
+f 1286/679 1277/670 1283/677
+f 334/2893 1283/677 1277/670
+f 1287/682 1291/683 67/2895
+f 1290/685 67/2895 1291/683
+f 1289/686 1291/683 338/2896
+f 1288/681 338/2896 1291/683
+f 1295/688 1294/691 1292/690
+f 68/2897 1292/690 1294/691
+f 338/2896 1288/681 1293/687
+f 1295/688 1293/687 1288/681
+f 1297/696 12/2789 1298/692
+f 1183/693 1298/692 12/2789
+f 1293/687 1298/692 338/2896
+f 1296/694 338/2896 1298/692
+f 1300/697 1189/699 1299/698
+f 10/2782 1299/698 1189/699
+f 338/2896 1296/694 1289/686
+f 1300/697 1289/686 1296/694
+f 78/2898 1301/703 1304/704
+f 1305/701 1304/704 1301/703
+f 1305/701 1302/700 1303/706
+f 342/2899 1303/706 1302/700
+f 1306/709 1309/710 74/2900
+f 1308/711 74/2900 1309/710
+f 1307/708 342/2899 1309/710
+f 1302/700 1309/710 342/2899
+f 1312/713 1311/716 1126/715
+f 13/2786 1126/715 1311/716
+f 342/2899 1307/708 1310/712
+f 1312/713 1310/712 1307/708
+f 1313/717 1314/718 20/2799
+f 1119/719 20/2799 1314/718
+f 1303/706 342/2899 1314/718
+f 1310/712 1314/718 342/2899
+f 1315/721 1318/720 76/2889
+f 1252/642 76/2889 1318/720
+f 1317/724 1318/720 344/2901
+f 1316/722 344/2901 1318/720
+f 1321/726 1320/727 1264/654
+f 77/2890 1264/654 1320/727
+f 344/2901 1316/722 1319/725
+f 1321/726 1319/725 1316/722
+f 937/246 1323/729 33/2822
+f 1273/665 33/2822 1323/729
+f 1322/728 344/2901 1323/729
+f 1319/725 1323/729 344/2901
+f 21/2818 1257/646 933/243
+f 1324/730 933/243 1257/646
+f 344/2901 1322/728 1317/724
+f 1324/730 1317/724 1322/728
+f 1328/736 70/2902 1329/734
+f 1325/733 1329/734 70/2902
+f 1326/732 347/2903 1329/734
+f 1327/737 1329/734 347/2903
+f 69/2887 1250/637 1331/740
+f 1332/739 1331/740 1250/637
+f 347/2903 1326/732 1330/738
+f 1332/739 1330/738 1326/732
+f 1031/355 1334/742 22/2817
+f 1259/648 22/2817 1334/742
+f 1330/738 1334/742 347/2903
+f 1333/741 347/2903 1334/742
+f 34/2836 1335/743 1027/350
+f 1336/744 1027/350 1335/743
+f 347/2903 1333/741 1327/737
+f 1336/744 1327/737 1333/741
+f 1340/750 71/2904 1341/745
+f 1337/746 1341/745 71/2904
+f 1339/751 1341/745 350/2905
+f 1338/747 350/2905 1341/745
+f 1344/753 1343/756 1328/755
+f 70/2906 1328/755 1343/756
+f 350/2905 1338/747 1342/752
+f 1344/753 1342/752 1338/747
+f 1045/759 1346/760 34/2907
+f 1335/761 34/2907 1346/760
+f 1345/758 350/2905 1346/760
+f 1342/752 1346/760 350/2905
+f 36/2908 1347/762 1041/764
+f 1348/763 1041/764 1347/762
+f 1348/763 1339/751 1345/758
+f 350/2905 1345/758 1339/751
+f 59/2909 1349/766 1352/770
+f 1353/765 1352/770 1349/766
+f 1351/771 1353/765 355/2910
+f 1350/767 355/2910 1353/765
+f 65/2911 1354/775 1356/776
+f 1357/773 1356/776 1354/775
+f 355/2910 1350/767 1355/772
+f 1357/773 1355/772 1350/767
+f 1358/779 1361/780 39/2912
+f 1360/781 39/2912 1361/780
+f 1355/772 1361/780 355/2910
+f 1359/778 355/2910 1361/780
+f 38/2847 1362/783 1363/784
+f 1364/782 1363/784 1362/783
+f 355/2910 1359/778 1351/771
+f 1364/782 1351/771 1359/778
+f 1368/790 60/2913 1369/787
+f 1365/785 1369/787 60/2913
+f 358/2914 1367/789 1366/788
+f 1369/787 1366/788 1367/789
+f 1372/793 1371/794 1352/770
+f 59/2909 1352/770 1371/794
+f 1370/792 358/2914 1372/793
+f 1366/788 1372/793 358/2914
+f 38/2847 1073/409 1362/783
+f 1374/795 1362/783 1073/409
+f 1374/795 1373/796 1370/792
+f 358/2914 1370/792 1373/796
+f 1375/798 1376/797 42/2846
+f 1068/404 42/2846 1376/797
+f 1373/796 1376/797 358/2914
+f 1367/789 358/2914 1376/797
+f 1380/804 79/2915 1381/799
+f 1377/800 1381/799 79/2915
+f 1379/805 1381/799 361/2916
+f 1378/801 361/2916 1381/799
+f 1383/808 78/2898 1384/806
+f 1304/704 1384/806 78/2898
+f 361/2916 1378/801 1382/807
+f 1384/806 1382/807 1378/801
+f 20/2799 809/94 1313/717
+f 1386/810 1313/717 809/94
+f 1385/809 361/2916 1386/810
+f 1382/807 1386/810 361/2916
+f 19/2798 1387/812 804/89
+f 1388/811 804/89 1387/812
+f 1388/811 1379/805 1385/809
+f 361/2916 1385/809 1379/805
+f 68/2897 1389/816 1292/690
+f 1392/814 1292/690 1389/816
+f 364/2917 1391/817 1390/813
+f 1392/814 1390/813 1391/817
+f 1393/820 1396/821 58/2918
+f 1395/822 58/2918 1396/821
+f 1394/819 364/2917 1396/821
+f 1390/813 1396/821 364/2917
+f 11/2790 776/54 1398/825
+f 1399/824 1398/825 776/54
+f 364/2917 1394/819 1397/823
+f 1399/824 1397/823 1394/819
+f 770/48 12/2789 1400/826
+f 1297/696 1400/826 12/2789
+f 1397/823 1400/826 364/2917
+f 1391/817 364/2917 1400/826
+f 1404/828 1393/820 1401/830
+f 58/2918 1401/830 1393/820
+f 367/2919 1403/831 1402/827
+f 1404/828 1402/827 1403/831
+f 1405/834 1408/835 61/2920
+f 1407/836 61/2920 1408/835
+f 1406/833 367/2919 1408/835
+f 1402/827 1408/835 367/2919
+f 23/2794 793/74 1410/839
+f 1411/838 1410/839 793/74
+f 367/2919 1406/833 1409/837
+f 1411/838 1409/837 1406/833
+f 1398/825 1412/840 11/2790
+f 789/71 11/2790 1412/840
+f 1409/837 1412/840 367/2919
+f 1403/831 367/2919 1412/840
+f 1413/844 1416/842 61/2920
+f 1405/834 61/2920 1416/842
+f 370/2921 1415/845 1414/841
+f 1416/842 1414/841 1415/845
+f 1420/847 1419/850 1417/849
+f 62/2922 1417/849 1419/850
+f 370/2921 1414/841 1418/846
+f 1420/847 1418/846 1414/841
+f 35/2835 1025/348 1422/853
+f 1423/852 1422/853 1025/348
+f 1421/851 370/2921 1423/852
+f 1418/846 1423/852 370/2921
+f 1410/839 1424/854 23/2794
+f 1019/339 23/2794 1424/854
+f 1421/851 1424/854 370/2921
+f 1415/845 370/2921 1424/854
+f 1425/857 1428/858 62/2923
+f 1417/860 62/2923 1428/858
+f 1427/861 1428/858 373/2924
+f 1426/856 373/2924 1428/858
+f 1429/864 1432/865 63/2925
+f 1431/866 63/2925 1432/865
+f 373/2924 1426/856 1430/863
+f 1432/865 1430/863 1426/856
+f 1435/868 1434/869 1053/385
+f 40/2843 1053/385 1434/869
+f 1433/867 373/2924 1435/868
+f 1430/863 1435/868 373/2924
+f 35/2837 1422/871 1047/379
+f 1436/870 1047/379 1422/871
+f 1436/870 1427/861 1433/867
+f 373/2924 1433/867 1427/861
+f 1440/873 1429/864 1437/875
+f 63/2925 1437/875 1429/864
+f 376/2926 1439/876 1438/872
+f 1440/873 1438/872 1439/876
+f 1441/879 1444/880 64/2927
+f 1443/881 64/2927 1444/880
+f 1438/872 1444/880 376/2926
+f 1442/878 376/2926 1444/880
+f 41/2844 1057/390 1446/884
+f 1447/883 1446/884 1057/390
+f 376/2926 1442/878 1445/882
+f 1447/883 1445/882 1442/878
+f 1434/869 1448/885 40/2843
+f 1051/383 40/2843 1448/885
+f 1445/882 1448/885 376/2926
+f 1439/876 376/2926 1448/885
+f 1452/886 1441/879 1449/887
+f 64/2927 1449/887 1441/879
+f 1451/890 1452/886 378/2928
+f 1450/888 378/2928 1452/886
+f 1455/892 1454/893 1368/790
+f 60/2913 1368/790 1454/893
+f 378/2928 1450/888 1453/891
+f 1455/892 1453/891 1450/888
+f 1070/406 1457/895 42/2846
+f 1375/798 42/2846 1457/895
+f 1453/891 1457/895 378/2928
+f 1456/894 378/2928 1457/895
+f 41/2844 1446/884 1065/400
+f 1458/896 1065/400 1446/884
+f 1458/896 1451/890 1456/894
+f 378/2928 1456/894 1451/890
+f 1462/897 1461/899 1340/750
+f 71/2904 1340/750 1461/899
+f 381/2929 1460/901 1459/898
+f 1462/897 1459/898 1460/901
+f 1347/762 36/2908 1464/902
+f 1204/903 1464/902 36/2908
+f 381/2929 1459/898 1463/904
+f 1464/902 1463/904 1459/898
+f 53/2930 1214/909 1199/910
+f 1466/907 1199/910 1214/909
+f 381/2929 1463/904 1465/906
+f 1466/907 1465/906 1463/904
+f 1467/913 1469/914 52/2931
+f 1207/915 52/2931 1469/914
+f 381/2929 1465/906 1468/912
+f 1469/914 1468/912 1465/906
+f 1470/916 1472/917 72/2932
+f 1471/918 72/2932 1472/917
+f 381/2929 1468/912 1460/901
+f 1472/917 1460/901 1468/912
+f 38/2847 1363/784 1191/565
+f 1475/919 1191/565 1363/784
+f 1475/919 1473/920 1474/921
+f 383/2933 1474/921 1473/920
+f 39/2912 1476/925 1358/779
+f 1478/923 1358/779 1476/925
+f 1478/923 1477/922 1473/920
+f 383/2933 1473/920 1477/922
+f 1197/571 1480/926 51/2875
+f 1479/927 51/2875 1480/926
+f 1477/922 1480/926 383/2933
+f 1474/921 383/2933 1480/926
+f 1485/931 1484/933 1481/930
+f 85/2934 1481/930 1484/933
+f 1483/934 1485/931 388/2935
+f 1482/929 388/2935 1485/931
+f 1489/936 1488/939 1486/938
+f 83/2936 1486/938 1488/939
+f 388/2935 1482/929 1487/935
+f 1489/936 1487/935 1482/929
+f 1490/942 1493/943 84/2937
+f 1492/944 84/2937 1493/943
+f 1487/935 1493/943 388/2935
+f 1491/941 388/2935 1493/943
+f 1496/945 1495/947 1494/946
+f 82/2938 1494/946 1495/947
+f 388/2935 1491/941 1483/934
+f 1496/945 1483/934 1491/941
+f 1497/949 1501/948 92/2939
+f 1500/953 92/2939 1501/948
+f 1499/954 1501/948 393/2940
+f 1498/950 393/2940 1501/948
+f 1505/956 1504/959 1502/958
+f 89/2941 1502/958 1504/959
+f 393/2940 1498/950 1503/955
+f 1505/956 1503/955 1498/950
+f 1509/961 1508/964 1506/963
+f 88/2942 1506/963 1508/964
+f 1507/960 393/2940 1509/961
+f 1503/955 1509/961 393/2940
+f 93/2943 1510/966 1511/967
+f 1512/965 1511/967 1510/966
+f 1512/965 1499/954 1507/960
+f 393/2940 1507/960 1499/954
+f 93/2943 1513/968 1516/972
+f 1517/970 1516/972 1513/968
+f 1517/970 1514/971 1515/974
+f 398/2944 1515/974 1514/971
+f 1521/975 1520/979 1518/976
+f 86/2945 1518/976 1520/979
+f 1514/971 1521/975 398/2944
+f 1519/977 398/2944 1521/975
+f 1525/981 1524/984 1522/983
+f 87/2946 1522/983 1524/984
+f 398/2944 1519/977 1523/980
+f 1525/981 1523/980 1519/977
+f 1526/985 1528/986 94/2947
+f 1527/987 94/2947 1528/986
+f 1515/974 398/2944 1528/986
+f 1523/980 1528/986 398/2944
+f 89/2941 1529/990 1502/958
+f 1532/991 1502/958 1529/990
+f 1530/989 402/2948 1532/991
+f 1531/992 1532/991 402/2948
+f 1535/997 90/2949 1536/994
+f 1533/996 1536/994 90/2949
+f 1530/989 1536/994 402/2948
+f 1534/993 402/2948 1536/994
+f 1539/1002 91/2950 1540/998
+f 1537/999 1540/998 91/2950
+f 1534/993 1540/998 402/2948
+f 1538/1000 402/2948 1540/998
+f 1542/1003 1541/1004 1508/964
+f 88/2942 1508/964 1541/1004
+f 402/2948 1538/1000 1531/992
+f 1542/1003 1531/992 1538/1000
+f 1543/1007 1546/1008 90/2949
+f 1533/996 90/2949 1546/1008
+f 1544/1006 405/2951 1546/1008
+f 1545/1009 1546/1008 405/2951
+f 1524/984 1549/1011 87/2946
+f 1548/1012 87/2946 1549/1011
+f 1547/1010 405/2951 1549/1011
+f 1544/1006 1549/1011 405/2951
+f 1518/976 86/2945 1552/1014
+f 1550/1016 1552/1014 86/2945
+f 405/2951 1547/1010 1551/1013
+f 1552/1014 1551/1013 1547/1010
+f 1554/1017 1553/1018 1539/1002
+f 91/2950 1539/1002 1553/1018
+f 1545/1009 405/2951 1554/1017
+f 1551/1013 1554/1017 405/2951
+f 92/2939 1500/953 1557/1022
+f 1558/1020 1557/1022 1500/953
+f 408/2952 1556/1023 1555/1019
+f 1558/1020 1555/1019 1556/1023
+f 93/2943 1516/972 1510/966
+f 1560/1025 1510/966 1516/972
+f 1555/1019 1560/1025 408/2952
+f 1559/1024 408/2952 1560/1025
+f 1561/1028 1563/1029 94/2947
+f 1526/985 94/2947 1563/1029
+f 1562/1027 408/2952 1563/1029
+f 1559/1024 1563/1029 408/2952
+f 95/2953 1564/1031 1565/1032
+f 1566/1030 1565/1032 1564/1031
+f 408/2952 1562/1027 1556/1023
+f 1566/1030 1556/1023 1562/1027
+f 1570/1034 1569/1039 1557/1036
+f 92/2954 1557/1036 1569/1039
+f 411/2955 1568/1037 1567/1033
+f 1570/1034 1567/1033 1568/1037
+f 1571/1042 1573/1043 95/2956
+f 1564/1044 95/2956 1573/1043
+f 411/2955 1567/1033 1572/1041
+f 1573/1043 1572/1041 1567/1033
+f 55/2886 1241/627 1575/1047
+f 1576/1046 1575/1047 1241/627
+f 411/2955 1572/1041 1574/1045
+f 1576/1046 1574/1045 1572/1041
+f 1231/615 1578/1049 54/2884
+f 1235/621 54/2884 1578/1049
+f 1574/1045 1578/1049 411/2955
+f 1577/1048 411/2955 1578/1049
+f 51/2883 1579/1051 1226/608
+f 1580/1050 1226/608 1579/1051
+f 411/2955 1577/1048 1568/1037
+f 1580/1050 1568/1037 1577/1048
+f 94/2957 1581/1052 1561/1056
+f 1584/1054 1561/1056 1581/1052
+f 1584/1054 1582/1055 1583/1058
+f 413/2958 1583/1058 1582/1055
+f 1586/1061 52/2877 1587/1059
+f 1210/585 1587/1059 52/2877
+f 1585/1060 413/2958 1587/1059
+f 1582/1055 1587/1059 413/2958
+f 1244/630 1589/1063 56/2881
+f 1220/599 56/2881 1589/1063
+f 413/2958 1585/1060 1588/1062
+f 1589/1063 1588/1062 1585/1060
+f 1575/1047 1591/1064 55/2886
+f 1239/625 55/2886 1591/1064
+f 413/2958 1588/1062 1590/1065
+f 1591/1064 1590/1065 1588/1062
+f 1571/1042 95/2956 1592/1067
+f 1565/1066 1592/1067 95/2956
+f 1590/1065 1592/1067 413/2958
+f 1583/1058 413/2958 1592/1067
+f 1593/1070 1596/1071 87/2959
+f 1522/1073 87/2959 1596/1071
+f 416/2960 1595/1074 1594/1069
+f 1596/1071 1594/1069 1595/1074
+f 1597/1077 1600/1078 73/2961
+f 1599/1079 73/2961 1600/1078
+f 416/2960 1594/1069 1598/1076
+f 1600/1078 1598/1076 1594/1069
+f 72/2932 1471/918 1602/1082
+f 1603/1081 1602/1082 1471/918
+f 416/2960 1598/1076 1601/1080
+f 1603/1081 1601/1080 1598/1076
+f 1605/1083 1467/913 1586/1084
+f 52/2931 1586/1084 1467/913
+f 416/2960 1601/1080 1604/1085
+f 1605/1083 1604/1085 1601/1080
+f 94/2962 1527/1087 1581/1089
+f 1606/1088 1581/1089 1527/1087
+f 1606/1088 1595/1074 1604/1085
+f 416/2960 1604/1085 1595/1074
+f 1611/1091 1610/1096 1607/1093
+f 66/2963 1607/1093 1610/1096
+f 420/2964 1609/1094 1608/1090
+f 1611/1091 1608/1090 1609/1094
+f 1614/1098 1613/1101 1535/1100
+f 90/2965 1535/1100 1613/1101
+f 420/2964 1608/1090 1612/1097
+f 1614/1098 1612/1097 1608/1090
+f 1615/1104 1617/1105 89/2966
+f 1529/1106 89/2966 1617/1105
+f 420/2964 1612/1097 1616/1103
+f 1617/1105 1616/1103 1612/1097
+f 39/2912 1360/781 1619/1109
+f 1620/1108 1619/1109 1360/781
+f 420/2964 1616/1103 1618/1107
+f 1620/1108 1618/1107 1616/1103
+f 1621/1110 1622/1111 65/2911
+f 1354/775 65/2911 1622/1111
+f 1618/1107 1622/1111 420/2964
+f 1609/1094 420/2964 1622/1111
+f 73/2961 1599/1079 1625/1114
+f 1626/1113 1625/1114 1599/1079
+f 1626/1113 1623/1112 1624/1116
+f 422/2967 1624/1116 1623/1112
+f 87/2959 1548/1117 1593/1070
+f 1628/1119 1593/1070 1548/1117
+f 422/2967 1623/1112 1627/1120
+f 1628/1119 1627/1120 1623/1112
+f 1630/1122 1543/1125 1613/1124
+f 90/2968 1613/1124 1543/1125
+f 422/2967 1627/1120 1629/1121
+f 1630/1122 1629/1121 1627/1120
+f 66/2969 1631/1127 1607/1128
+f 1632/1126 1607/1128 1631/1127
+f 1624/1116 422/2967 1632/1126
+f 1629/1121 1632/1126 422/2967
+f 1619/1109 1635/1130 39/2912
+f 1476/925 39/2912 1635/1130
+f 1634/1131 1635/1130 423/2970
+f 1633/1129 423/2970 1635/1130
+f 1637/1135 1615/1104 1504/1134
+f 89/2966 1504/1134 1615/1104
+f 423/2970 1633/1129 1636/1133
+f 1637/1135 1636/1133 1633/1129
+f 92/2971 1569/1139 1497/1140
+f 1639/1137 1497/1140 1569/1139
+f 423/2970 1636/1133 1638/1136
+f 1639/1137 1638/1136 1636/1133
+f 51/2875 1479/927 1579/1142
+f 1640/1141 1579/1142 1479/927
+f 423/2970 1638/1136 1634/1131
+f 1640/1141 1634/1131 1638/1136
+f 1506/963 88/2942 1644/1143
+f 1641/1144 1644/1143 88/2942
+f 1643/1147 1644/1143 426/2972
+f 1642/1145 426/2972 1644/1143
+f 1488/939 1647/1149 83/2936
+f 1646/1150 83/2936 1647/1149
+f 1642/1145 1647/1149 426/2972
+f 1645/1148 426/2972 1647/1149
+f 1648/1153 1650/1154 85/2934
+f 1481/930 85/2934 1650/1154
+f 1649/1152 426/2972 1650/1154
+f 1645/1148 1650/1154 426/2972
+f 93/2943 1511/967 1651/1156
+f 1652/1155 1651/1156 1511/967
+f 426/2972 1649/1152 1643/1147
+f 1652/1155 1643/1147 1649/1152
+f 91/2950 1653/1157 1537/999
+f 1656/1159 1537/999 1653/1157
+f 1656/1159 1654/1160 1655/1161
+f 428/2973 1655/1161 1654/1160
+f 1492/944 1659/1163 84/2937
+f 1658/1164 84/2937 1659/1163
+f 1654/1160 1659/1163 428/2973
+f 1657/1162 428/2973 1659/1163
+f 1646/1150 1661/1166 83/2936
+f 1486/938 83/2936 1661/1166
+f 1657/1162 1661/1166 428/2973
+f 1660/1165 428/2973 1661/1166
+f 1541/1004 1662/1167 88/2942
+f 1641/1144 88/2942 1662/1167
+f 1662/1167 1655/1161 1660/1165
+f 428/2973 1660/1165 1655/1161
+f 86/2945 1663/1169 1550/1016
+f 1666/1168 1550/1016 1663/1169
+f 1665/1172 1666/1168 430/2974
+f 1664/1170 430/2974 1666/1168
+f 1668/1175 82/2938 1669/1174
+f 1495/947 1669/1174 82/2938
+f 1664/1170 1669/1174 430/2974
+f 1667/1173 430/2974 1669/1174
+f 1658/1164 1671/1177 84/2937
+f 1490/942 84/2937 1671/1177
+f 1670/1176 430/2974 1671/1177
+f 1667/1173 1671/1177 430/2974
+f 91/2950 1553/1018 1653/1157
+f 1672/1178 1653/1157 1553/1018
+f 1672/1178 1665/1172 1670/1176
+f 430/2974 1670/1176 1665/1172
+f 93/2943 1651/1156 1513/968
+f 1675/1179 1513/968 1651/1156
+f 431/2975 1674/1181 1673/1180
+f 1675/1179 1673/1180 1674/1181
+f 1484/933 1677/1183 85/2934
+f 1648/1153 85/2934 1677/1183
+f 1673/1180 1677/1183 431/2975
+f 1676/1182 431/2975 1677/1183
+f 1668/1175 1679/1185 82/2938
+f 1494/946 82/2938 1679/1185
+f 1678/1184 431/2975 1679/1185
+f 1676/1182 1679/1185 431/2975
+f 1520/979 1680/1186 86/2945
+f 1663/1169 86/2945 1680/1186
+f 1674/1181 431/2975 1680/1186
+f 1678/1184 1680/1186 431/2975
+f 14/2976 763/1190 1683/1191
+f 1684/1188 1683/1191 763/1190
+f 1684/1188 1681/1187 1682/1193
+f 435/2977 1682/1193 1681/1187
+f 1687/1195 759/1198 1685/1197
+f 13/2978 1685/1197 759/1198
+f 435/2977 1681/1187 1686/1194
+f 1687/1195 1686/1194 1681/1187
+f 1688/1202 1691/1200 97/2979
+f 1690/1203 97/2979 1691/1200
+f 1689/1199 435/2977 1691/1200
+f 1686/1194 1691/1200 435/2977
+f 1692/1204 1694/1205 98/2980
+f 1693/1206 98/2980 1694/1205
+f 1682/1193 435/2977 1694/1205
+f 1689/1199 1694/1205 435/2977
+f 1685/1197 13/2978 1697/1210
+f 1311/1209 1697/1210 13/2978
+f 1696/1211 1697/1210 438/2981
+f 1695/1208 438/2981 1697/1210
+f 1700/1213 1306/1216 1698/1215
+f 74/2982 1698/1215 1306/1216
+f 438/2981 1695/1208 1699/1212
+f 1700/1213 1699/1212 1695/1208
+f 1701/1219 1704/1220 100/2983
+f 1703/1221 100/2983 1704/1220
+f 1699/1212 1704/1220 438/2981
+f 1702/1218 438/2981 1704/1220
+f 97/2979 1690/1203 1705/1223
+f 1706/1222 1705/1223 1690/1203
+f 438/2981 1702/1218 1696/1211
+f 1706/1222 1696/1211 1702/1218
+f 67/2984 1290/1227 1709/1228
+f 1710/1225 1709/1228 1290/1227
+f 442/2985 1708/1230 1707/1224
+f 1710/1225 1707/1224 1708/1230
+f 1711/1233 1713/1234 10/2986
+f 1299/1235 10/2986 1713/1234
+f 1707/1224 1713/1234 442/2985
+f 1712/1232 442/2985 1713/1234
+f 96/2987 1714/1239 1716/1240
+f 1717/1237 1716/1240 1714/1239
+f 442/2985 1712/1232 1715/1236
+f 1717/1237 1715/1236 1712/1232
+f 1718/1241 1720/1242 99/2988
+f 1719/1243 99/2988 1720/1242
+f 1715/1236 1720/1242 442/2985
+f 1708/1230 442/2985 1720/1242
+f 10/2986 746/1245 1711/1233
+f 1723/1244 1711/1233 746/1245
+f 1721/1246 444/2989 1723/1244
+f 1722/1248 1723/1244 444/2989
+f 740/1251 14/2976 1725/1249
+f 1683/1191 1725/1249 14/2976
+f 1725/1249 1724/1250 1721/1246
+f 444/2989 1721/1246 1724/1250
+f 1692/1204 98/2980 1728/1252
+f 1726/1253 1728/1252 98/2980
+f 1724/1250 1728/1252 444/2989
+f 1727/1254 444/2989 1728/1252
+f 1729/1257 96/2987 1730/1256
+f 1716/1240 1730/1256 96/2987
+f 444/2989 1727/1254 1722/1248
+f 1730/1256 1722/1248 1727/1254
+f 1734/1261 1701/1219 1731/1260
+f 100/2983 1731/1260 1701/1219
+f 447/2990 1733/1262 1732/1259
+f 1734/1261 1732/1259 1733/1262
+f 75/2991 1735/1265 1737/1267
+f 1738/1266 1737/1267 1735/1265
+f 1732/1259 1738/1266 447/2990
+f 1736/1264 447/2990 1738/1266
+f 98/2980 1693/1206 1740/1270
+f 1741/1269 1740/1270 1693/1206
+f 447/2990 1736/1264 1739/1268
+f 1741/1269 1739/1268 1736/1264
+f 1688/1202 97/2979 1742/1271
+f 1705/1223 1742/1271 97/2979
+f 1739/1268 1742/1271 447/2990
+f 1733/1262 447/2990 1742/1271
+f 75/2991 1743/1273 1735/1265
+f 1746/1272 1735/1265 1743/1273
+f 1745/1276 1746/1272 449/2992
+f 1744/1274 449/2992 1746/1272
+f 1749/1278 1748/1279 1719/1243
+f 99/2988 1719/1243 1748/1279
+f 449/2992 1744/1274 1747/1277
+f 1749/1278 1747/1277 1744/1274
+f 1729/1257 1751/1281 96/2987
+f 1714/1239 96/2987 1751/1281
+f 1747/1277 1751/1281 449/2992
+f 1750/1280 449/2992 1751/1281
+f 98/2980 1740/1270 1726/1253
+f 1752/1282 1726/1253 1740/1270
+f 449/2992 1750/1280 1745/1276
+f 1752/1282 1745/1276 1750/1280
+f 1757/1284 1756/1287 1753/1286
+f 101/2993 1753/1286 1756/1287
+f 1757/1284 1754/1283 1755/1289
+f 453/2994 1755/1289 1754/1283
+f 1758/1292 1761/1293 102/2995
+f 1760/1294 102/2995 1761/1293
+f 1754/1283 1761/1293 453/2994
+f 1759/1291 453/2994 1761/1293
+f 1762/1295 1764/1296 103/2996
+f 1763/1297 103/2996 1764/1296
+f 1759/1291 1764/1296 453/2994
+f 1755/1289 453/2994 1764/1296
+f 1768/1299 1767/1302 1756/1287
+f 101/2993 1756/1287 1767/1302
+f 456/2997 1766/1300 1765/1298
+f 1768/1299 1765/1298 1766/1300
+f 103/2996 1769/1306 1762/1295
+f 1771/1304 1762/1295 1769/1306
+f 456/2997 1765/1298 1770/1303
+f 1771/1304 1770/1303 1765/1298
+f 806/91 1774/1308 19/2798
+f 1773/1309 19/2798 1774/1308
+f 1770/1303 1774/1308 456/2997
+f 1772/1307 456/2997 1774/1308
+f 27/2797 881/180 800/84
+f 1776/1311 800/84 881/180
+f 456/2997 1772/1307 1775/1310
+f 1776/1311 1775/1310 1772/1307
+f 875/174 8/2810 1778/1312
+f 1777/1313 1778/1312 8/2810
+f 456/2997 1775/1310 1766/1300
+f 1778/1312 1766/1300 1775/1310
+f 1767/1302 1781/1315 101/2993
+f 1753/1286 101/2993 1781/1315
+f 458/2998 1780/1316 1779/1314
+f 1781/1315 1779/1314 1780/1316
+f 8/2810 989/308 1777/1313
+f 1783/1318 1777/1313 989/308
+f 458/2998 1779/1314 1782/1317
+f 1783/1318 1782/1317 1779/1314
+f 1785/1320 986/305 942/252
+f 4/2823 942/252 986/305
+f 458/2998 1782/1317 1784/1319
+f 1785/1320 1784/1319 1782/1317
+f 26/2827 1786/1321 952/264
+f 1788/1323 952/264 1786/1321
+f 458/2998 1784/1319 1787/1324
+f 1788/1323 1787/1324 1784/1319
+f 102/2995 1760/1294 1789/1326
+f 1790/1325 1789/1326 1760/1294
+f 458/2998 1787/1324 1780/1316
+f 1790/1325 1780/1316 1787/1324
+f 1795/1328 1794/1333 1791/1330
+f 104/2999 1791/1330 1794/1333
+f 461/3000 1793/1331 1792/1327
+f 1795/1328 1792/1327 1793/1331
+f 1789/1326 1798/1335 102/2995
+f 1797/1336 102/2995 1798/1335
+f 1792/1327 1798/1335 461/3000
+f 1796/1334 461/3000 1798/1335
+f 1786/1321 26/2827 1800/1337
+f 1284/678 1800/1337 26/2827
+f 1800/1337 1799/1338 1796/1334
+f 461/3000 1796/1334 1799/1338
+f 1279/674 81/2894 1802/1339
+f 1801/1340 1802/1339 81/2894
+f 1799/1338 1802/1339 461/3000
+f 1793/1331 461/3000 1802/1339
+f 1805/1344 79/2915 1806/1342
+f 1380/804 1806/1342 79/2915
+f 1804/1345 1806/1342 464/3001
+f 1803/1341 464/3001 1806/1342
+f 19/2798 1773/1309 1387/812
+f 1808/1347 1387/812 1773/1309
+f 464/3001 1803/1341 1807/1346
+f 1808/1347 1807/1346 1803/1341
+f 1809/1350 1811/1351 103/2996
+f 1769/1306 103/2996 1811/1351
+f 1807/1346 1811/1351 464/3001
+f 1810/1349 464/3001 1811/1351
+f 1814/1352 1813/1354 1812/1353
+f 105/3002 1812/1353 1813/1354
+f 464/3001 1810/1349 1804/1345
+f 1814/1352 1804/1345 1810/1349
+f 1763/1297 1818/1356 103/2996
+f 1817/1358 103/2996 1818/1356
+f 1815/1355 468/3003 1818/1356
+f 1816/1359 1818/1356 468/3003
+f 102/2995 1819/1361 1758/1292
+f 1821/1360 1758/1292 1819/1361
+f 1821/1360 1820/1362 1815/1355
+f 468/3003 1815/1355 1820/1362
+f 1825/1365 1824/1368 1822/1367
+f 106/3004 1822/1367 1824/1368
+f 468/3003 1820/1362 1823/1364
+f 1825/1365 1823/1364 1820/1362
+f 1828/1370 1827/1371 1826/1369
+f 107/3005 1826/1369 1827/1371
+f 1828/1370 1816/1359 1823/1364
+f 468/3003 1823/1364 1816/1359
+f 102/2995 1797/1336 1819/1361
+f 1831/1373 1819/1361 1797/1336
+f 471/3006 1830/1374 1829/1372
+f 1831/1373 1829/1372 1830/1374
+f 1832/1377 1834/1378 104/2999
+f 1791/1330 104/2999 1834/1378
+f 1829/1372 1834/1378 471/3006
+f 1833/1376 471/3006 1834/1378
+f 108/3007 1835/1382 1837/1383
+f 1838/1380 1837/1383 1835/1382
+f 471/3006 1833/1376 1836/1379
+f 1838/1380 1836/1379 1833/1376
+f 1824/1368 1840/1384 106/3004
+f 1839/1385 106/3004 1840/1384
+f 1836/1379 1840/1384 471/3006
+f 1830/1374 471/3006 1840/1384
+f 1843/1389 105/3002 1844/1386
+f 1813/1354 1844/1386 105/3002
+f 1842/1390 1844/1386 474/3008
+f 1841/1387 474/3008 1844/1386
+f 1846/1392 1809/1350 1817/1358
+f 103/2996 1817/1358 1809/1350
+f 474/3008 1841/1387 1845/1391
+f 1846/1392 1845/1391 1841/1387
+f 1847/1395 1849/1396 107/3005
+f 1826/1369 107/3005 1849/1396
+f 1848/1394 474/3008 1849/1396
+f 1845/1391 1849/1396 474/3008
+f 109/3009 1850/1398 1851/1399
+f 1852/1397 1851/1399 1850/1398
+f 1852/1397 1842/1390 1848/1394
+f 474/3008 1848/1394 1842/1390
+f 1855/1401 1822/1367 1839/1385
+f 106/3004 1839/1385 1822/1367
+f 476/3010 1854/1402 1853/1400
+f 1855/1401 1853/1400 1854/1402
+f 1856/1404 1858/1403 108/3007
+f 1835/1382 108/3007 1858/1403
+f 1853/1400 1858/1403 476/3010
+f 1857/1405 476/3010 1858/1403
+f 1851/1399 1861/1408 109/3009
+f 1860/1409 109/3009 1861/1408
+f 1857/1405 1861/1408 476/3010
+f 1859/1407 476/3010 1861/1408
+f 1862/1410 1847/1395 1827/1371
+f 107/3005 1827/1371 1847/1395
+f 476/3010 1859/1407 1854/1402
+f 1862/1410 1854/1402 1859/1407
+f 1863/1413 1867/1414 115/3011
+f 1866/1416 115/3011 1867/1414
+f 481/3012 1865/1415 1864/1412
+f 1867/1414 1864/1412 1865/1415
+f 125/3013 1868/1420 1870/1422
+f 1871/1421 1870/1422 1868/1420
+f 1869/1419 481/3012 1871/1421
+f 1864/1412 1871/1421 481/3012
+f 1875/1425 1874/1427 1872/1423
+f 119/3014 1872/1423 1874/1427
+f 481/3012 1869/1419 1873/1426
+f 1875/1425 1873/1426 1869/1419
+f 1878/1428 1877/1430 1876/1429
+f 123/3015 1876/1429 1877/1430
+f 1873/1426 1878/1428 481/3012
+f 1865/1415 481/3012 1878/1428
+f 1882/1432 1881/1435 1866/1416
+f 115/3011 1866/1416 1881/1435
+f 485/3016 1880/1433 1879/1431
+f 1882/1432 1879/1431 1880/1433
+f 1883/1438 1885/1439 123/3015
+f 1876/1429 123/3015 1885/1439
+f 1884/1437 485/3016 1885/1439
+f 1879/1431 1885/1439 485/3016
+f 1889/1441 1888/1444 1886/1443
+f 122/3017 1886/1443 1888/1444
+f 485/3016 1884/1437 1887/1440
+f 1889/1441 1887/1440 1884/1437
+f 127/3018 1890/1445 1891/1447
+f 1892/1446 1891/1447 1890/1445
+f 1887/1440 1892/1446 485/3016
+f 1880/1433 485/3016 1892/1446
+f 110/3019 1893/1451 1896/1453
+f 1897/1449 1896/1453 1893/1451
+f 1895/1452 1897/1449 490/3020
+f 1894/1448 490/3020 1897/1449
+f 133/3021 1898/1458 1900/1459
+f 1901/1456 1900/1459 1898/1458
+f 490/3020 1894/1448 1899/1455
+f 1901/1456 1899/1455 1894/1448
+f 1902/1462 1905/1463 120/3022
+f 1904/1464 120/3022 1905/1463
+f 1899/1455 1905/1463 490/3020
+f 1903/1461 490/3020 1905/1463
+f 121/3023 1906/1466 1907/1467
+f 1908/1465 1907/1467 1906/1466
+f 490/3020 1903/1461 1895/1452
+f 1908/1465 1895/1452 1903/1461
+f 116/3024 1909/1471 1912/1472
+f 1913/1469 1912/1472 1909/1471
+f 494/3025 1911/1474 1910/1468
+f 1913/1469 1910/1468 1911/1474
+f 1914/1477 1917/1478 132/3026
+f 1916/1479 132/3026 1917/1478
+f 1910/1468 1917/1478 494/3025
+f 1915/1476 494/3025 1917/1478
+f 120/3022 1904/1464 1919/1482
+f 1920/1481 1919/1482 1904/1464
+f 494/3025 1915/1476 1918/1480
+f 1920/1481 1918/1480 1915/1476
+f 1921/1483 1922/1484 133/3021
+f 1898/1458 133/3021 1922/1484
+f 1918/1480 1922/1484 494/3025
+f 1911/1474 494/3025 1922/1484
+f 111/3027 1923/1487 1926/1489
+f 1927/1488 1926/1489 1923/1487
+f 499/3028 1925/1491 1924/1486
+f 1927/1488 1924/1486 1925/1491
+f 129/3029 1928/1495 1930/1496
+f 1931/1493 1930/1496 1928/1495
+f 1931/1493 1929/1492 1924/1486
+f 499/3028 1924/1486 1929/1492
+f 1935/1498 1934/1501 1932/1500
+f 128/3030 1932/1500 1934/1501
+f 499/3028 1929/1492 1933/1497
+f 1935/1498 1933/1497 1929/1492
+f 1936/1502 1938/1503 136/3031
+f 1937/1504 136/3031 1938/1503
+f 1933/1497 1938/1503 499/3028
+f 1925/1491 499/3028 1938/1503
+f 1939/1507 1942/1508 110/3019
+f 1893/1451 110/3019 1942/1508
+f 1941/1509 1942/1508 503/3032
+f 1940/1506 503/3032 1942/1508
+f 125/3013 1943/1513 1945/1514
+f 1946/1511 1945/1514 1943/1513
+f 503/3032 1940/1506 1944/1510
+f 1946/1511 1944/1510 1940/1506
+f 1947/1517 1950/1518 124/3033
+f 1949/1519 124/3033 1950/1518
+f 1944/1510 1950/1518 503/3032
+f 1948/1516 503/3032 1950/1518
+f 133/3021 1900/1459 1951/1521
+f 1952/1520 1951/1521 1900/1459
+f 503/3032 1948/1516 1941/1509
+f 1952/1520 1941/1509 1948/1516
+f 1956/1523 1863/1413 1953/1525
+f 115/3011 1953/1525 1863/1413
+f 506/3034 1955/1526 1954/1522
+f 1956/1523 1954/1522 1955/1526
+f 1957/1529 1960/1530 141/3035
+f 1959/1531 141/3035 1960/1530
+f 1954/1522 1960/1530 506/3034
+f 1958/1528 506/3034 1960/1530
+f 124/3033 1949/1519 1962/1534
+f 1963/1533 1962/1534 1949/1519
+f 506/3034 1958/1528 1961/1532
+f 1963/1533 1961/1532 1958/1528
+f 125/3013 1870/1422 1943/1513
+f 1964/1535 1943/1513 1870/1422
+f 1961/1532 1964/1535 506/3034
+f 1955/1526 506/3034 1964/1535
+f 1969/1537 1968/1541 1965/1539
+f 114/3036 1965/1539 1968/1541
+f 1967/1542 1969/1537 510/3037
+f 1966/1536 510/3037 1969/1537
+f 1970/1546 1973/1544 139/3038
+f 1972/1547 139/3038 1973/1544
+f 510/3037 1966/1536 1971/1543
+f 1973/1544 1971/1543 1966/1536
+f 1962/1534 1976/1549 124/3033
+f 1975/1550 124/3033 1976/1549
+f 1974/1548 510/3037 1976/1549
+f 1971/1543 1976/1549 510/3037
+f 141/3035 1977/1551 1957/1529
+f 1978/1552 1957/1529 1977/1551
+f 1978/1552 1967/1542 1974/1548
+f 510/3037 1974/1548 1967/1542
+f 1912/1472 1982/1554 116/3024
+f 1981/1556 116/3024 1982/1554
+f 1979/1553 512/3039 1982/1554
+f 1980/1557 1982/1554 512/3039
+f 133/3021 1951/1521 1921/1483
+f 1984/1559 1921/1483 1951/1521
+f 1983/1558 512/3039 1984/1559
+f 1979/1553 1984/1559 512/3039
+f 124/3033 1975/1550 1947/1517
+f 1986/1560 1947/1517 1975/1550
+f 1986/1560 1985/1561 1983/1558
+f 512/3039 1983/1558 1985/1561
+f 1988/1562 1970/1546 1987/1563
+f 139/3038 1987/1563 1970/1546
+f 1985/1561 1988/1562 512/3039
+f 1980/1557 512/3039 1988/1562
+f 1926/1489 1992/1565 111/3027
+f 1991/1567 111/3027 1992/1565
+f 1990/1568 1992/1565 516/3040
+f 1989/1564 516/3040 1992/1565
+f 136/3031 1993/1572 1936/1502
+f 1995/1570 1936/1502 1993/1572
+f 516/3040 1989/1564 1994/1569
+f 1995/1570 1994/1569 1989/1564
+f 1996/1575 1999/1576 126/3041
+f 1998/1577 126/3041 1999/1576
+f 1994/1569 1999/1576 516/3040
+f 1997/1574 516/3040 1999/1576
+f 127/3018 2000/1579 2001/1580
+f 2002/1578 2001/1580 2000/1579
+f 516/3040 1997/1574 1990/1568
+f 2002/1578 1990/1568 1997/1574
+f 2007/1582 2006/1585 2003/1584
+f 117/3042 2003/1584 2006/1585
+f 520/3043 2005/1587 2004/1581
+f 2007/1582 2004/1581 2005/1587
+f 2008/1590 2011/1591 138/3044
+f 2010/1592 138/3044 2011/1591
+f 2009/1589 520/3043 2011/1591
+f 2004/1581 2011/1591 520/3043
+f 126/3041 1998/1577 2013/1595
+f 2014/1594 2013/1595 1998/1577
+f 520/3043 2009/1589 2012/1593
+f 2014/1594 2012/1593 2009/1589
+f 2015/1596 2016/1597 136/3031
+f 1993/1572 136/3031 2016/1597
+f 2012/1593 2016/1597 520/3043
+f 2005/1587 520/3043 2016/1597
+f 1968/1541 2020/1598 114/3036
+f 2019/1601 114/3036 2020/1598
+f 2017/1599 523/3045 2020/1598
+f 2018/1600 2020/1598 523/3045
+f 2021/1603 2023/1605 141/3035
+f 1977/1551 141/3035 2023/1605
+f 523/3045 2017/1599 2022/1606
+f 2023/1605 2022/1606 2017/1599
+f 2013/1595 2026/1608 126/3041
+f 2025/1609 126/3041 2026/1608
+f 2022/1606 2026/1608 523/3045
+f 2024/1607 523/3045 2026/1608
+f 138/3044 2027/1611 2008/1590
+f 2028/1610 2008/1590 2027/1611
+f 523/3045 2024/1607 2018/1600
+f 2028/1610 2018/1600 2024/1607
+f 1881/1435 2031/1613 115/3011
+f 1953/1525 115/3011 2031/1613
+f 524/3046 2030/1614 2029/1612
+f 2031/1613 2029/1612 2030/1614
+f 127/3018 2001/1580 1890/1445
+f 2033/1616 1890/1445 2001/1580
+f 2032/1615 524/3046 2033/1616
+f 2029/1612 2033/1616 524/3046
+f 126/3041 2025/1609 1996/1575
+f 2035/1618 1996/1575 2025/1609
+f 524/3046 2032/1615 2034/1617
+f 2035/1618 2034/1617 2032/1615
+f 141/3035 1959/1531 2021/1603
+f 2036/1619 2021/1603 1959/1531
+f 2034/1617 2036/1619 524/3046
+f 2030/1614 524/3046 2036/1619
+f 2041/1621 2040/1624 2037/1623
+f 112/3047 2037/1623 2040/1624
+f 529/3048 2039/1626 2038/1620
+f 2041/1621 2038/1620 2039/1626
+f 140/3049 2042/1629 2044/1631
+f 2045/1630 2044/1631 2042/1629
+f 2043/1628 529/3048 2045/1630
+f 2038/1620 2045/1630 529/3048
+f 130/3050 2046/1635 2048/1636
+f 2049/1633 2048/1636 2046/1635
+f 2049/1633 2047/1632 2043/1628
+f 529/3048 2043/1628 2047/1632
+f 131/3051 2050/1638 2051/1639
+f 2052/1637 2051/1639 2050/1638
+f 2047/1632 2052/1637 529/3048
+f 2039/1626 529/3048 2052/1637
+f 2056/1644 118/3052 2057/1640
+f 2053/1641 2057/1640 118/3052
+f 533/3053 2055/1646 2054/1642
+f 2057/1640 2054/1642 2055/1646
+f 2061/1650 2060/1651 2058/1649
+f 142/3054 2058/1649 2060/1651
+f 2054/1642 2061/1650 533/3053
+f 2059/1648 533/3053 2061/1650
+f 130/3050 2048/1636 2063/1654
+f 2064/1653 2063/1654 2048/1636
+f 533/3053 2059/1648 2062/1652
+f 2064/1653 2062/1652 2059/1648
+f 2065/1655 2066/1656 140/3049
+f 2042/1629 140/3049 2066/1656
+f 2062/1652 2066/1656 533/3053
+f 2055/1646 533/3053 2066/1656
+f 2067/1659 2071/1660 113/3055
+f 2070/1662 113/3055 2071/1660
+f 2069/1663 2071/1660 538/3056
+f 2068/1658 538/3056 2071/1660
+f 135/3057 2072/1664 2074/1668
+f 2075/1666 2074/1668 2072/1664
+f 2075/1666 2073/1667 2068/1658
+f 538/3056 2068/1658 2073/1667
+f 2076/1671 2079/1672 134/3058
+f 2078/1673 134/3058 2079/1672
+f 2073/1667 2079/1672 538/3056
+f 2077/1670 538/3056 2079/1672
+f 137/3059 2080/1674 2081/1676
+f 2082/1675 2081/1676 2080/1674
+f 538/3056 2077/1670 2069/1663
+f 2082/1675 2069/1663 2077/1670
+f 118/3052 2083/1680 2053/1641
+f 2086/1678 2053/1641 2083/1680
+f 541/3060 2085/1681 2084/1677
+f 2086/1678 2084/1677 2085/1681
+f 2081/1676 2089/1683 137/3059
+f 2088/1684 137/3059 2089/1683
+f 2084/1677 2089/1683 541/3060
+f 2087/1682 541/3060 2089/1683
+f 134/3058 2090/1688 2076/1671
+f 2092/1686 2076/1671 2090/1688
+f 541/3060 2087/1682 2091/1685
+f 2092/1686 2091/1685 2087/1682
+f 2060/1651 2094/1689 142/3054
+f 2093/1690 142/3054 2094/1689
+f 2091/1685 2094/1689 541/3060
+f 2085/1681 541/3060 2094/1689
+f 2095/1693 2098/1694 114/3036
+f 1965/1539 114/3036 2098/1694
+f 2097/1695 2098/1694 544/3061
+f 2096/1692 544/3061 2098/1694
+f 118/3052 2056/1644 2100/1698
+f 2101/1697 2100/1698 2056/1644
+f 544/3061 2096/1692 2099/1696
+f 2101/1697 2099/1696 2096/1692
+f 2102/1701 2104/1702 140/3049
+f 2065/1655 140/3049 2104/1702
+f 2099/1696 2104/1702 544/3061
+f 2103/1700 544/3061 2104/1702
+f 139/3038 1972/1547 2105/1704
+f 2106/1703 2105/1704 1972/1547
+f 544/3061 2103/1700 2097/1695
+f 2106/1703 2097/1695 2103/1700
+f 138/3044 2010/1592 2109/1707
+f 2110/1705 2109/1707 2010/1592
+f 2110/1705 2107/1706 2108/1709
+f 547/3062 2108/1709 2107/1706
+f 2111/1711 2113/1710 117/3042
+f 2003/1584 117/3042 2113/1710
+f 2107/1706 2113/1710 547/3062
+f 2112/1712 547/3062 2113/1710
+f 2070/1662 2116/1715 113/3055
+f 2115/1716 113/3055 2116/1715
+f 547/3062 2112/1712 2114/1714
+f 2116/1715 2114/1714 2112/1712
+f 2117/1717 2118/1718 137/3059
+f 2080/1674 137/3059 2118/1718
+f 2108/1709 547/3062 2118/1718
+f 2114/1714 2118/1718 547/3062
+f 132/3026 1916/1479 2121/1721
+f 2122/1720 2121/1721 1916/1479
+f 2122/1720 2119/1719 2120/1723
+f 550/3063 2120/1723 2119/1719
+f 2123/1725 2125/1724 116/3024
+f 1909/1471 116/3024 2125/1724
+f 2119/1719 2125/1724 550/3063
+f 2124/1726 550/3063 2125/1724
+f 2128/1729 2127/1730 2040/1624
+f 112/3047 2040/1624 2127/1730
+f 550/3063 2124/1726 2126/1728
+f 2128/1729 2126/1728 2124/1726
+f 131/3051 2129/1731 2050/1638
+f 2130/1732 2050/1638 2129/1731
+f 2120/1723 550/3063 2130/1732
+f 2126/1728 2130/1732 550/3063
+f 137/3059 2088/1684 2117/1717
+f 2133/1734 2117/1717 2088/1684
+f 551/3064 2132/1735 2131/1733
+f 2133/1734 2131/1733 2132/1735
+f 2100/1698 2135/1737 118/3052
+f 2083/1680 118/3052 2135/1737
+f 2131/1733 2135/1737 551/3064
+f 2134/1736 551/3064 2135/1737
+f 114/3036 2019/1601 2095/1693
+f 2137/1739 2095/1693 2019/1601
+f 551/3064 2134/1736 2136/1738
+f 2137/1739 2136/1738 2134/1736
+f 2109/1707 2138/1740 138/3044
+f 2027/1611 138/3044 2138/1740
+f 2136/1738 2138/1740 551/3064
+f 2132/1735 551/3064 2138/1740
+f 2105/1704 2141/1742 139/3038
+f 1987/1563 139/3038 2141/1742
+f 2140/1743 2141/1742 552/3065
+f 2139/1741 552/3065 2141/1742
+f 140/3049 2044/1631 2102/1701
+f 2143/1745 2102/1701 2044/1631
+f 552/3065 2139/1741 2142/1744
+f 2143/1745 2142/1744 2139/1741
+f 2127/1730 2145/1747 112/3047
+f 2037/1623 112/3047 2145/1747
+f 2142/1744 2145/1747 552/3065
+f 2144/1746 552/3065 2145/1747
+f 116/3024 1981/1556 2123/1725
+f 2146/1748 2123/1725 1981/1556
+f 552/3065 2144/1746 2140/1743
+f 2146/1748 2140/1743 2144/1746
+f 132/3026 2121/1721 2149/1751
+f 2150/1749 2149/1751 2121/1721
+f 2148/1753 2150/1749 556/3066
+f 2147/1750 556/3066 2150/1749
+f 2153/1755 2129/1731 2151/1757
+f 131/3051 2151/1757 2129/1731
+f 556/3066 2147/1750 2152/1754
+f 2153/1755 2152/1754 2147/1750
+f 2154/1760 2157/1761 143/3067
+f 2156/1762 143/3067 2157/1761
+f 2155/1759 556/3066 2157/1761
+f 2152/1754 2157/1761 556/3066
+f 144/3068 2158/1763 2159/1765
+f 2160/1764 2159/1765 2158/1763
+f 2160/1764 2148/1753 2155/1759
+f 556/3066 2155/1759 2148/1753
+f 2159/1767 2164/1766 144/3069
+f 2163/1771 144/3069 2164/1766
+f 2162/1772 2164/1766 560/3070
+f 2161/1768 560/3070 2164/1766
+f 2167/1774 2154/1777 2165/1776
+f 143/3071 2165/1776 2154/1777
+f 560/3070 2161/1768 2166/1773
+f 2167/1774 2166/1773 2161/1768
+f 2168/1780 2171/1781 145/3072
+f 2170/1782 145/3072 2171/1781
+f 2169/1779 560/3070 2171/1781
+f 2166/1773 2171/1781 560/3070
+f 146/3073 2172/1784 2173/1785
+f 2174/1783 2173/1785 2172/1784
+f 560/3070 2169/1779 2162/1772
+f 2174/1783 2162/1772 2169/1779
+f 144/3069 2163/1771 2177/1788
+f 2178/1787 2177/1788 2163/1771
+f 564/3074 2176/1790 2175/1786
+f 2178/1787 2175/1786 2176/1790
+f 2179/1793 2181/1794 146/3073
+f 2172/1784 146/3073 2181/1794
+f 2175/1786 2181/1794 564/3074
+f 2180/1792 564/3074 2181/1794
+f 150/3075 2182/1798 2184/1799
+f 2185/1796 2184/1799 2182/1798
+f 564/3074 2180/1792 2183/1795
+f 2185/1796 2183/1795 2180/1792
+f 2186/1800 2188/1801 149/3076
+f 2187/1802 149/3076 2188/1801
+f 2183/1795 2188/1801 564/3074
+f 2176/1790 564/3074 2188/1801
+f 146/3073 2189/1806 2179/1793
+f 2192/1804 2179/1793 2189/1806
+f 568/3077 2191/1807 2190/1803
+f 2192/1804 2190/1803 2191/1807
+f 147/3078 2193/1811 2195/1812
+f 2196/1809 2195/1812 2193/1811
+f 2190/1803 2196/1809 568/3077
+f 2194/1808 568/3077 2196/1809
+f 2200/1814 2199/1817 2197/1816
+f 151/3079 2197/1816 2199/1817
+f 568/3077 2194/1808 2198/1813
+f 2200/1814 2198/1813 2194/1808
+f 2184/1799 2202/1818 150/3075
+f 2201/1819 150/3075 2202/1818
+f 2198/1813 2202/1818 568/3077
+f 2191/1807 568/3077 2202/1818
+f 2207/1821 2206/1826 2203/1823
+f 152/3080 2203/1823 2206/1826
+f 573/3081 2205/1824 2204/1820
+f 2207/1821 2204/1820 2205/1824
+f 2208/1830 2211/1828 155/3082
+f 2210/1831 155/3082 2211/1828
+f 573/3081 2204/1820 2209/1827
+f 2211/1828 2209/1827 2204/1820
+f 2212/1834 2215/1835 154/3083
+f 2214/1836 154/3083 2215/1835
+f 573/3081 2209/1827 2213/1833
+f 2215/1835 2213/1833 2209/1827
+f 2216/1837 2218/1838 157/3084
+f 2217/1839 157/3084 2218/1838
+f 573/3081 2213/1833 2205/1824
+f 2218/1838 2205/1824 2213/1833
+f 2223/1841 2222/1846 2219/1843
+f 153/3085 2219/1843 2222/1846
+f 578/3086 2221/1844 2220/1840
+f 2223/1841 2220/1840 2221/1844
+f 2224/1850 2227/1848 158/3087
+f 2226/1851 158/3087 2227/1848
+f 578/3086 2220/1840 2225/1847
+f 2227/1848 2225/1847 2220/1840
+f 2228/1854 2231/1855 156/3088
+f 2230/1856 156/3088 2231/1855
+f 2225/1847 2231/1855 578/3086
+f 2229/1853 578/3086 2231/1855
+f 2232/1857 2234/1858 159/3089
+f 2233/1859 159/3089 2234/1858
+f 578/3086 2229/1853 2221/1844
+f 2234/1858 2221/1844 2229/1853
+f 127/3090 2235/1862 2000/1865
+f 2238/1863 2000/1865 2235/1862
+f 2237/1866 2238/1863 581/3091
+f 2236/1861 581/3091 2238/1863
+f 2226/1851 2241/1868 158/3087
+f 2240/1869 158/3087 2241/1868
+f 581/3091 2236/1861 2239/1867
+f 2241/1868 2239/1867 2236/1861
+f 2242/1872 2244/1873 153/3085
+f 2219/1843 153/3085 2244/1873
+f 2239/1867 2244/1873 581/3091
+f 2243/1871 581/3091 2244/1873
+f 2246/1874 2245/1876 1991/1875
+f 111/3092 1991/1875 2245/1876
+f 581/3091 2243/1871 2237/1866
+f 2246/1874 2237/1866 2243/1871
+f 129/3093 2247/1880 2250/1881
+f 2251/1878 2250/1881 2247/1880
+f 585/3094 2249/1883 2248/1877
+f 2251/1878 2248/1877 2249/1883
+f 2233/1859 2254/1885 159/3089
+f 2253/1886 159/3089 2254/1885
+f 2248/1877 2254/1885 585/3094
+f 2252/1884 585/3094 2254/1885
+f 2255/1889 2257/1890 156/3088
+f 2228/1854 156/3088 2257/1890
+f 585/3094 2252/1884 2256/1888
+f 2257/1890 2256/1888 2252/1884
+f 122/3095 2258/1891 2259/1893
+f 2260/1892 2259/1893 2258/1891
+f 2256/1888 2260/1892 585/3094
+f 2249/1883 585/3094 2260/1892
+f 127/3090 1891/1897 2235/1862
+f 2263/1895 2235/1862 1891/1897
+f 586/3096 2262/1898 2261/1894
+f 2263/1895 2261/1894 2262/1898
+f 122/3095 2259/1893 1886/1901
+f 2265/1900 1886/1901 2259/1893
+f 2261/1894 2265/1900 586/3096
+f 2264/1899 586/3096 2265/1900
+f 2230/1856 2267/1903 156/3088
+f 2255/1889 156/3088 2267/1903
+f 586/3096 2264/1899 2266/1902
+f 2267/1903 2266/1902 2264/1899
+f 2240/1869 2268/1904 158/3087
+f 2224/1850 158/3087 2268/1904
+f 2266/1902 2268/1904 586/3096
+f 2262/1898 586/3096 2268/1904
+f 129/3093 1930/1906 2247/1880
+f 2271/1905 2247/1880 1930/1906
+f 2269/1907 587/3097 2271/1905
+f 2270/1909 2271/1905 587/3097
+f 2273/1911 1923/1912 2245/1876
+f 111/3092 2245/1876 1923/1912
+f 2273/1911 2272/1910 2269/1907
+f 587/3097 2269/1907 2272/1910
+f 2222/1846 2275/1914 153/3085
+f 2242/1872 153/3085 2275/1914
+f 2272/1910 2275/1914 587/3097
+f 2274/1913 587/3097 2275/1914
+f 2253/1886 2276/1915 159/3089
+f 2232/1857 159/3089 2276/1915
+f 587/3097 2274/1913 2270/1909
+f 2276/1915 2270/1909 2274/1913
+f 125/3098 1945/1918 2279/1921
+f 2280/1919 2279/1921 1945/1918
+f 2278/1922 2280/1919 590/3099
+f 2277/1917 590/3099 2280/1919
+f 2283/1924 1939/1927 2281/1926
+f 110/3100 2281/1926 1939/1927
+f 590/3099 2277/1917 2282/1923
+f 2283/1924 2282/1923 2277/1917
+f 2206/1826 2286/1929 152/3080
+f 2285/1930 152/3080 2286/1929
+f 2282/1923 2286/1929 590/3099
+f 2284/1928 590/3099 2286/1929
+f 2287/1932 2288/1931 157/3084
+f 2216/1837 157/3084 2288/1931
+f 590/3099 2284/1928 2278/1922
+f 2288/1931 2278/1922 2284/1928
+f 2289/1934 2292/1933 121/3101
+f 1906/1938 121/3101 2292/1933
+f 2291/1939 2292/1933 592/3102
+f 2290/1935 592/3102 2292/1933
+f 2210/1831 2295/1941 155/3082
+f 2294/1942 155/3082 2295/1941
+f 592/3102 2290/1935 2293/1940
+f 2295/1941 2293/1940 2290/1935
+f 2285/1930 2297/1944 152/3080
+f 2203/1823 152/3080 2297/1944
+f 2293/1940 2297/1944 592/3102
+f 2296/1943 592/3102 2297/1944
+f 2281/1926 110/3100 2298/1946
+f 1896/1945 2298/1946 110/3100
+f 2298/1946 2291/1939 2296/1943
+f 592/3102 2296/1943 2291/1939
+f 1874/1949 2302/1950 119/3103
+f 2301/1952 119/3103 2302/1950
+f 2300/1953 2302/1950 594/3104
+f 2299/1948 594/3104 2302/1950
+f 125/3098 2279/1921 1868/1956
+f 2304/1955 1868/1956 2279/1921
+f 594/3104 2299/1948 2303/1954
+f 2304/1955 2303/1954 2299/1948
+f 2217/1839 2306/1958 157/3084
+f 2287/1932 157/3084 2306/1958
+f 2303/1954 2306/1958 594/3104
+f 2305/1957 594/3104 2306/1958
+f 2308/1959 2212/1834 2307/1960
+f 154/3083 2307/1960 2212/1834
+f 594/3104 2305/1957 2300/1953
+f 2308/1959 2300/1953 2305/1957
+f 121/3101 2309/1964 2289/1934
+f 2312/1962 2289/1934 2309/1964
+f 596/3105 2311/1965 2310/1961
+f 2312/1962 2310/1961 2311/1965
+f 119/3103 2301/1952 2314/1968
+f 2315/1967 2314/1968 2301/1952
+f 2310/1961 2315/1967 596/3105
+f 2313/1966 596/3105 2315/1967
+f 2214/1836 2317/1970 154/3083
+f 2307/1960 154/3083 2317/1970
+f 596/3105 2313/1966 2316/1969
+f 2317/1970 2316/1969 2313/1966
+f 2294/1942 2318/1971 155/3082
+f 2208/1830 155/3082 2318/1971
+f 2316/1969 2318/1971 596/3105
+f 2311/1965 596/3105 2318/1971
+f 147/3078 2195/1812 2321/1974
+f 2322/1973 2321/1974 2195/1812
+f 600/3106 2320/1976 2319/1972
+f 2322/1973 2319/1972 2320/1976
+f 2173/1785 2324/1978 146/3073
+f 2189/1806 146/3073 2324/1978
+f 2319/1972 2324/1978 600/3106
+f 2323/1977 600/3106 2324/1978
+f 2327/1980 2168/1780 2325/1982
+f 145/3072 2325/1982 2168/1780
+f 600/3106 2323/1977 2326/1979
+f 2327/1980 2326/1979 2323/1977
+f 2328/1985 2331/1986 162/3107
+f 2330/1987 162/3107 2331/1986
+f 600/3106 2326/1979 2329/1984
+f 2331/1986 2329/1984 2326/1979
+f 160/3108 2332/1989 2333/1990
+f 2334/1988 2333/1990 2332/1989
+f 600/3106 2329/1984 2320/1976
+f 2334/1988 2320/1976 2329/1984
+f 161/3109 2335/1991 2338/1995
+f 2339/1993 2338/1995 2335/1991
+f 2339/1993 2336/1994 2337/1997
+f 605/3110 2337/1997 2336/1994
+f 2343/1999 2342/2002 2340/2001
+f 165/3111 2340/2001 2342/2002
+f 605/3110 2336/1994 2341/1998
+f 2343/1999 2341/1998 2336/1994
+f 2344/2006 2347/2004 166/3112
+f 2346/2007 166/3112 2347/2004
+f 2341/1998 2347/2004 605/3110
+f 2345/2003 605/3110 2347/2004
+f 2349/2010 162/3113 2350/2009
+f 2348/2008 2350/2009 162/3113
+f 2337/1997 605/3110 2350/2009
+f 2345/2003 2350/2009 605/3110
+f 2353/2012 2328/2015 2349/2010
+f 162/3113 2349/2010 2328/2015
+f 608/3114 2352/2013 2351/2011
+f 2353/2012 2351/2011 2352/2013
+f 2354/2018 2356/2019 166/3112
+f 2344/2006 166/3112 2356/2019
+f 2351/2011 2356/2019 608/3114
+f 2355/2017 608/3114 2356/2019
+f 2360/2021 2359/2024 2357/2023
+f 163/3115 2357/2023 2359/2024
+f 608/3114 2355/2017 2358/2020
+f 2360/2021 2358/2020 2355/2017
+f 160/3116 2333/2026 2361/2027
+f 2362/2025 2361/2027 2333/2026
+f 2358/2020 2362/2025 608/3114
+f 2352/2013 608/3114 2362/2025
+f 2366/2029 2365/2030 2359/2024
+f 163/3115 2359/2024 2365/2030
+f 611/3117 2364/2032 2363/2028
+f 2366/2029 2363/2028 2364/2032
+f 2346/2007 2368/2034 166/3112
+f 2354/2018 166/3112 2368/2034
+f 2363/2028 2368/2034 611/3117
+f 2367/2033 611/3117 2368/2034
+f 2369/2038 2371/2036 165/3111
+f 2340/2001 165/3111 2371/2036
+f 611/3117 2367/2033 2370/2035
+f 2371/2036 2370/2035 2367/2033
+f 2373/2041 164/3118 2374/2040
+f 2372/2039 2374/2040 164/3118
+f 2370/2035 2374/2040 611/3117
+f 2364/2032 611/3117 2374/2040
+f 2375/2044 2378/2045 69/3119
+f 1247/2047 69/3119 2378/2045
+f 2377/2048 2378/2045 614/3120
+f 2376/2043 614/3120 2378/2045
+f 131/3051 2051/1639 2380/2051
+f 2381/2050 2380/2051 2051/1639
+f 614/3120 2376/2043 2379/2049
+f 2381/2050 2379/2049 2376/2043
+f 2382/2054 2384/2055 130/3050
+f 2046/1635 130/3050 2384/2055
+f 2379/2049 2384/2055 614/3120
+f 2383/2053 614/3120 2384/2055
+f 76/3121 1254/2057 2385/2058
+f 2386/2056 2385/2058 1254/2057
+f 614/3120 2383/2053 2377/2048
+f 2386/2056 2377/2048 2383/2053
+f 2387/2061 2390/2062 77/3122
+f 1261/2064 77/3122 2390/2062
+f 2388/2060 617/3123 2390/2062
+f 2389/2063 2390/2062 617/3123
+f 142/3054 2093/1690 2392/2068
+f 2393/2067 2392/2068 2093/1690
+f 617/3123 2388/2060 2391/2066
+f 2393/2067 2391/2066 2388/2060
+f 2394/2070 2396/2069 134/3058
+f 2090/1688 134/3058 2396/2069
+f 2391/2066 2396/2069 617/3123
+f 2395/2071 617/3123 2396/2069
+f 2398/2073 2397/2075 1268/2074
+f 80/3124 1268/2074 2397/2075
+f 617/3123 2395/2071 2389/2063
+f 2398/2073 2389/2063 2395/2071
+f 2397/2075 2401/2077 80/3124
+f 1275/2079 80/3124 2401/2077
+f 2399/2076 619/3125 2401/2077
+f 2400/2078 2401/2077 619/3125
+f 134/3058 2078/1673 2394/2070
+f 2403/2081 2394/2070 2078/1673
+f 2403/2081 2402/2082 2399/2076
+f 619/3125 2399/2076 2402/2082
+f 2404/2085 2406/2086 135/3057
+f 2072/1664 135/3057 2406/2086
+f 2405/2084 619/3125 2406/2086
+f 2402/2082 2406/2086 619/3125
+f 2408/2087 2407/2089 1281/2088
+f 81/3126 1281/2088 2407/2089
+f 619/3125 2405/2084 2400/2078
+f 2408/2087 2400/2078 2405/2084
+f 2409/2092 2412/2093 67/3127
+f 1287/2095 67/3127 2412/2093
+f 2411/2096 2412/2093 622/3128
+f 2410/2091 622/3128 2412/2093
+f 2415/2098 2414/2101 2314/2100
+f 119/3014 2314/2100 2414/2101
+f 622/3128 2410/2091 2413/2097
+f 2415/2098 2413/2097 2410/2091
+f 2416/2103 2418/2102 121/3023
+f 2309/2106 121/3023 2418/2102
+f 2417/2104 622/3128 2418/2102
+f 2413/2097 2418/2102 622/3128
+f 2420/2107 2419/2109 1294/2108
+f 68/3129 1294/2108 2419/2109
+f 622/3128 2417/2104 2411/2096
+f 2420/2107 2411/2096 2417/2104
+f 78/3130 2421/2110 1301/2116
+f 2424/2112 1301/2116 2421/2110
+f 2424/2112 2422/2113 2423/2114
+f 625/3131 2423/2114 2422/2113
+f 2250/2119 2427/2120 129/3029
+f 2426/2121 129/3029 2427/2120
+f 2422/2113 2427/2120 625/3131
+f 2425/2118 625/3131 2427/2120
+f 2430/2123 2258/2126 2428/2125
+f 122/3017 2428/2125 2258/2126
+f 625/3131 2425/2118 2429/2122
+f 2430/2123 2429/2122 2425/2118
+f 2431/2129 74/3132 2432/2127
+f 1308/2128 2432/2127 74/3132
+f 2429/2122 2432/2127 625/3131
+f 2423/2114 625/3131 2432/2127
+f 2385/2058 2435/2131 76/3121
+f 1315/2133 76/3121 2435/2131
+f 2434/2132 2435/2131 626/3133
+f 2433/2130 626/3133 2435/2131
+f 130/3050 2063/1654 2382/2054
+f 2437/2135 2382/2054 2063/1654
+f 626/3133 2433/2130 2436/2136
+f 2437/2135 2436/2136 2433/2130
+f 2058/1649 142/3054 2439/2137
+f 2392/2068 2439/2137 142/3054
+f 2436/2136 2439/2137 626/3133
+f 2438/2138 626/3133 2439/2137
+f 2440/2139 2387/2061 1320/2140
+f 77/3122 1320/2140 2387/2061
+f 626/3133 2438/2138 2434/2132
+f 2440/2139 2434/2132 2438/2138
+f 2441/2143 2444/2144 70/3134
+f 1325/2146 70/3134 2444/2144
+f 2443/2147 2444/2144 628/3135
+f 2442/2142 628/3135 2444/2144
+f 143/3067 2156/1762 2446/2150
+f 2447/2148 2446/2150 2156/1762
+f 628/3135 2442/2142 2445/2149
+f 2447/2148 2445/2149 2442/2142
+f 2380/2051 2449/2152 131/3051
+f 2151/1757 131/3051 2449/2152
+f 2445/2149 2449/2152 628/3135
+f 2448/2151 628/3135 2449/2152
+f 69/3119 1331/2153 2375/2044
+f 2450/2154 2375/2044 1331/2153
+f 628/3135 2448/2151 2443/2147
+f 2450/2154 2443/2147 2448/2151
+f 2451/2157 2454/2158 71/2904
+f 1337/746 71/2904 2454/2158
+f 2452/2156 630/3136 2454/2158
+f 2453/2159 2454/2158 630/3136
+f 145/3137 2170/2160 2456/2164
+f 2457/2162 2456/2164 2170/2160
+f 2457/2162 2455/2163 2452/2156
+f 630/3136 2452/2156 2455/2163
+f 2165/2169 143/3138 2459/2165
+f 2446/2166 2459/2165 143/3138
+f 2455/2163 2459/2165 630/3136
+f 2458/2167 630/3136 2459/2165
+f 2460/2170 2441/2171 1343/756
+f 70/2906 1343/756 2441/2171
+f 630/3136 2458/2167 2453/2159
+f 2460/2170 2453/2159 2458/2167
+f 59/2909 2461/2174 1349/766
+f 2464/2175 1349/766 2461/2174
+f 2463/2176 2464/2175 634/3139
+f 2462/2173 634/3139 2464/2175
+f 147/3078 2465/2177 2467/2181
+f 2468/2179 2467/2181 2465/2177
+f 634/3139 2462/2173 2466/2180
+f 2468/2179 2466/2180 2462/2173
+f 2469/2184 2472/2185 148/3140
+f 2471/2186 148/3140 2472/2185
+f 2466/2180 2472/2185 634/3139
+f 2470/2183 634/3139 2472/2185
+f 65/2911 1356/776 2473/2188
+f 2474/2187 2473/2188 1356/776
+f 634/3139 2470/2183 2463/2176
+f 2474/2187 2463/2176 2470/2183
+f 2475/2190 2478/2189 60/2913
+f 1365/785 60/2913 2478/2189
+f 636/3141 2477/2193 2476/2191
+f 2478/2189 2476/2191 2477/2193
+f 2199/1817 2481/2195 151/3079
+f 2480/2196 151/3079 2481/2195
+f 2479/2194 636/3141 2481/2195
+f 2476/2191 2481/2195 636/3141
+f 147/3078 2467/2181 2193/1811
+f 2483/2198 2193/1811 2467/2181
+f 2483/2198 2482/2197 2479/2194
+f 636/3141 2479/2194 2482/2197
+f 2484/2199 2461/2174 1371/794
+f 59/2909 1371/794 2461/2174
+f 2482/2197 2484/2199 636/3141
+f 2477/2193 636/3141 2484/2199
+f 2485/2202 2488/2203 79/3142
+f 1377/2205 79/3142 2488/2203
+f 2486/2201 638/3143 2488/2203
+f 2487/2204 2488/2203 638/3143
+f 128/3030 1934/1501 2490/2209
+f 2491/2208 2490/2209 1934/1501
+f 2491/2208 2489/2207 2486/2201
+f 638/3143 2486/2201 2489/2207
+f 129/3029 2426/2121 1928/1495
+f 2493/2211 1928/1495 2426/2121
+f 2489/2207 2493/2211 638/3143
+f 2492/2210 638/3143 2493/2211
+f 1383/2213 2494/2212 78/3130
+f 2421/2110 78/3130 2494/2212
+f 638/3143 2492/2210 2487/2204
+f 2494/2212 2487/2204 2492/2210
+f 68/3129 2419/2109 1389/2218
+f 2497/2215 1389/2218 2419/2109
+f 640/3144 2496/2216 2495/2214
+f 2497/2215 2495/2214 2496/2216
+f 1907/1467 2499/2220 121/3023
+f 2416/2103 121/3023 2499/2220
+f 2495/2214 2499/2220 640/3144
+f 2498/2219 640/3144 2499/2220
+f 120/3022 2500/2224 1902/1462
+f 2502/2222 1902/1462 2500/2224
+f 640/3144 2498/2219 2501/2221
+f 2502/2222 2501/2221 2498/2219
+f 2503/2227 58/3145 2504/2225
+f 1395/2226 2504/2225 58/3145
+f 2501/2221 2504/2225 640/3144
+f 2496/2216 640/3144 2504/2225
+f 2507/2229 1401/2232 2503/2227
+f 58/3145 2503/2227 1401/2232
+f 642/3146 2506/2230 2505/2228
+f 2507/2229 2505/2228 2506/2230
+f 1919/1482 2509/2234 120/3022
+f 2500/2224 120/3022 2509/2234
+f 2505/2228 2509/2234 642/3146
+f 2508/2233 642/3146 2509/2234
+f 132/3026 2510/2238 1914/1477
+f 2512/2236 1914/1477 2510/2238
+f 642/3146 2508/2233 2511/2235
+f 2512/2236 2511/2235 2508/2233
+f 2513/2241 61/3147 2514/2239
+f 1407/2240 2514/2239 61/3147
+f 2511/2235 2514/2239 642/3146
+f 2506/2230 642/3146 2514/2239
+f 2517/2243 1413/2246 2513/2241
+f 61/3147 2513/2241 1413/2246
+f 644/3148 2516/2244 2515/2242
+f 2517/2243 2515/2242 2516/2244
+f 2149/1751 2519/2247 132/3026
+f 2510/2238 132/3026 2519/2247
+f 2518/2248 644/3148 2519/2247
+f 2515/2242 2519/2247 644/3148
+f 144/3068 2520/2250 2158/1763
+f 2522/2249 2158/1763 2520/2250
+f 2518/2248 2522/2249 644/3148
+f 2521/2251 644/3148 2522/2249
+f 2524/2254 2523/2255 1419/2253
+f 62/3149 1419/2253 2523/2255
+f 644/3148 2521/2251 2516/2244
+f 2524/2254 2516/2244 2521/2251
+f 1425/857 62/2923 2527/2259
+f 2523/2258 2527/2259 62/2923
+f 2526/2260 2527/2259 646/3150
+f 2525/2257 646/3150 2527/2259
+f 144/3069 2177/1788 2520/2263
+f 2529/2262 2520/2263 2177/1788
+f 2529/2262 2528/2261 2525/2257
+f 646/3150 2525/2257 2528/2261
+f 2532/2265 2186/1800 2530/2267
+f 149/3076 2530/2267 2186/1800
+f 2528/2261 2532/2265 646/3150
+f 2531/2264 646/3150 2532/2265
+f 2533/2269 63/2925 2534/2268
+f 1431/866 2534/2268 63/2925
+f 646/3150 2531/2264 2526/2260
+f 2534/2268 2526/2260 2531/2264
+f 2537/2271 1437/875 2533/2269
+f 63/2925 2533/2269 1437/875
+f 648/3151 2536/2272 2535/2270
+f 2537/2271 2535/2270 2536/2272
+f 2187/1802 2539/2274 149/3076
+f 2530/2267 149/3076 2539/2274
+f 2535/2270 2539/2274 648/3151
+f 2538/2273 648/3151 2539/2274
+f 150/3075 2540/2278 2182/1798
+f 2542/2276 2182/1798 2540/2278
+f 648/3151 2538/2273 2541/2275
+f 2542/2276 2541/2275 2538/2273
+f 2543/2280 64/2927 2544/2279
+f 1443/881 2544/2279 64/2927
+f 2541/2275 2544/2279 648/3151
+f 2536/2272 648/3151 2544/2279
+f 2547/2282 1449/887 2543/2280
+f 64/2927 2543/2280 1449/887
+f 2545/2281 649/3152 2547/2282
+f 2546/2283 2547/2282 649/3152
+f 150/3075 2201/1819 2540/2278
+f 2549/2285 2540/2278 2201/1819
+f 2549/2285 2548/2284 2545/2281
+f 649/3152 2545/2281 2548/2284
+f 2480/2196 2551/2287 151/3079
+f 2197/1816 151/3079 2551/2287
+f 2548/2284 2551/2287 649/3152
+f 2550/2286 649/3152 2551/2287
+f 2552/2288 2475/2190 1454/893
+f 60/2913 1454/893 2475/2190
+f 649/3152 2550/2286 2546/2283
+f 2552/2288 2546/2283 2550/2286
+f 1461/899 2555/2289 71/2904
+f 2451/2157 71/2904 2555/2289
+f 651/3153 2554/2291 2553/2290
+f 2555/2289 2553/2290 2554/2291
+f 2558/2293 1470/916 2556/2295
+f 72/2932 2556/2295 1470/916
+f 651/3153 2553/2290 2557/2292
+f 2558/2293 2557/2292 2553/2290
+f 2338/2298 2561/2299 161/3154
+f 2560/2300 161/3154 2561/2299
+f 651/3153 2557/2292 2559/2297
+f 2561/2299 2559/2297 2557/2292
+f 162/3155 2330/2304 2348/2305
+f 2563/2302 2348/2305 2330/2304
+f 651/3153 2559/2297 2562/2301
+f 2563/2302 2562/2301 2559/2297
+f 2456/2164 2564/2306 145/3137
+f 2325/2307 145/3137 2564/2306
+f 651/3153 2562/2301 2554/2291
+f 2564/2306 2554/2291 2562/2301
+f 147/3078 2321/1974 2465/2177
+f 2567/2308 2465/2177 2321/1974
+f 2567/2308 2565/2309 2566/2310
+f 653/3156 2566/2310 2565/2309
+f 2568/2313 2570/2314 160/3108
+f 2332/1989 160/3108 2570/2314
+f 2565/2309 2570/2314 653/3156
+f 2569/2312 653/3156 2570/2314
+f 148/3140 2471/2186 2571/2316
+f 2572/2315 2571/2316 2471/2186
+f 2572/2315 2566/2310 2569/2312
+f 653/3156 2569/2312 2566/2310
+f 2577/2320 2576/2322 2573/2319
+f 170/3157 2573/2319 2576/2322
+f 2575/2323 2577/2320 658/3158
+f 2574/2318 658/3158 2577/2320
+f 2581/2325 2580/2328 2578/2327
+f 167/3159 2578/2327 2580/2328
+f 658/3158 2574/2318 2579/2324
+f 2581/2325 2579/2324 2574/2318
+f 2582/2331 2585/2332 169/3160
+f 2584/2333 169/3160 2585/2332
+f 2579/2324 2585/2332 658/3158
+f 2583/2330 658/3158 2585/2332
+f 2588/2334 2587/2336 2586/2335
+f 168/3161 2586/2335 2587/2336
+f 658/3158 2583/2330 2575/2323
+f 2588/2334 2575/2323 2583/2330
+f 2589/2339 2593/2340 177/3162
+f 2592/2342 177/3162 2593/2340
+f 2590/2338 663/3163 2593/2340
+f 2591/2341 2593/2340 663/3163
+f 178/3164 2594/2346 2596/2348
+f 2597/2347 2596/2348 2594/2346
+f 2597/2347 2595/2345 2590/2338
+f 663/3163 2590/2338 2595/2345
+f 2601/2349 2600/2353 2598/2350
+f 173/3165 2598/2350 2600/2353
+f 2595/2345 2601/2349 663/3163
+f 2599/2351 663/3163 2601/2349
+f 2604/2354 2603/2356 2602/2355
+f 174/3166 2602/2355 2603/2356
+f 663/3163 2599/2351 2591/2341
+f 2604/2354 2591/2341 2599/2351
+f 178/3164 2605/2360 2608/2361
+f 2609/2358 2608/2361 2605/2360
+f 2609/2358 2606/2357 2607/2363
+f 668/3167 2607/2363 2606/2357
+f 2612/2368 179/3168 2613/2364
+f 2610/2365 2613/2364 179/3168
+f 2606/2357 2613/2364 668/3167
+f 2611/2366 668/3167 2613/2364
+f 2617/2370 2616/2373 2614/2372
+f 172/3169 2614/2372 2616/2373
+f 668/3167 2611/2366 2615/2369
+f 2617/2370 2615/2369 2611/2366
+f 2620/2374 2619/2376 2618/2375
+f 171/3170 2618/2375 2619/2376
+f 2607/2363 668/3167 2620/2374
+f 2615/2369 2620/2374 668/3167
+f 174/3166 2603/2356 2623/2380
+f 2624/2377 2623/2380 2603/2356
+f 2622/2381 2624/2377 672/3171
+f 2621/2378 672/3171 2624/2377
+f 2627/2383 2598/2350 2625/2385
+f 173/3165 2625/2385 2598/2350
+f 672/3171 2621/2378 2626/2382
+f 2627/2383 2626/2382 2621/2378
+f 2628/2388 2631/2389 176/3172
+f 2630/2390 176/3172 2631/2389
+f 2629/2387 672/3171 2631/2389
+f 2626/2382 2631/2389 672/3171
+f 2632/2391 2634/2392 175/3173
+f 2633/2393 175/3173 2634/2392
+f 2622/2381 672/3171 2634/2392
+f 2629/2387 2634/2392 672/3171
+f 2637/2397 175/3173 2638/2394
+f 2633/2393 2638/2394 175/3173
+f 2636/2398 2638/2394 675/3174
+f 2635/2395 675/3174 2638/2394
+f 2641/2399 2628/2388 2639/2400
+f 176/3172 2639/2400 2628/2388
+f 2635/2395 2641/2399 675/3174
+f 2640/2401 675/3174 2641/2399
+f 2619/2376 2644/2403 171/3170
+f 2643/2405 171/3170 2644/2403
+f 675/3174 2640/2401 2642/2404
+f 2644/2403 2642/2404 2640/2401
+f 2614/2372 172/3169 2646/2406
+f 2645/2407 2646/2406 172/3169
+f 2642/2404 2646/2406 675/3174
+f 2636/2398 675/3174 2646/2406
+f 177/3162 2647/2410 2589/2339
+f 2650/2411 2589/2339 2647/2410
+f 678/3175 2649/2412 2648/2409
+f 2650/2411 2648/2409 2649/2412
+f 180/3176 2651/2416 2653/2417
+f 2654/2414 2653/2417 2651/2416
+f 678/3175 2648/2409 2652/2413
+f 2654/2414 2652/2413 2648/2409
+f 2656/2420 179/3168 2657/2418
+f 2612/2368 2657/2418 179/3168
+f 2652/2413 2657/2418 678/3175
+f 2655/2419 678/3175 2657/2418
+f 178/3164 2596/2348 2605/2360
+f 2658/2421 2605/2360 2596/2348
+f 2655/2419 2658/2421 678/3175
+f 2649/2412 678/3175 2658/2421
+f 2662/2423 2647/2428 2659/2425
+f 177/3177 2659/2425 2647/2428
+f 681/3178 2661/2426 2660/2422
+f 2662/2423 2660/2422 2661/2426
+f 160/3116 2361/2027 2664/2431
+f 2665/2430 2664/2431 2361/2027
+f 681/3178 2660/2422 2663/2429
+f 2665/2430 2663/2429 2660/2422
+f 2365/2030 2667/2433 163/3115
+f 2357/2023 163/3115 2667/2433
+f 2663/2429 2667/2433 681/3178
+f 2666/2432 681/3178 2667/2433
+f 164/3118 2668/2437 2372/2039
+f 2670/2435 2372/2039 2668/2437
+f 681/3178 2666/2432 2669/2434
+f 2670/2435 2669/2434 2666/2432
+f 2671/2440 180/3179 2672/2439
+f 2653/2438 2672/2439 180/3179
+f 681/3178 2669/2434 2661/2426
+f 2672/2439 2661/2426 2669/2434
+f 179/3180 2656/2441 2675/2445
+f 2676/2443 2675/2445 2656/2441
+f 2676/2443 2673/2444 2674/2447
+f 683/3181 2674/2447 2673/2444
+f 2671/2440 2678/2449 180/3179
+f 2651/2450 180/3179 2678/2449
+f 2677/2448 683/3181 2678/2449
+f 2673/2444 2678/2449 683/3181
+f 2373/2041 2680/2452 164/3118
+f 2668/2437 164/3118 2680/2452
+f 683/3181 2677/2448 2679/2451
+f 2680/2452 2679/2451 2677/2448
+f 2342/2002 2682/2453 165/3111
+f 2369/2038 165/3111 2682/2453
+f 683/3181 2679/2451 2681/2454
+f 2682/2453 2681/2454 2679/2451
+f 2683/2456 2684/2455 161/3109
+f 2335/1991 161/3109 2684/2455
+f 2681/2454 2684/2455 683/3181
+f 2674/2447 683/3181 2684/2455
+f 2616/2459 2688/2460 172/3182
+f 2687/2462 172/3182 2688/2460
+f 685/3183 2686/2463 2685/2458
+f 2688/2460 2685/2458 2686/2463
+f 179/3184 2675/2467 2610/2468
+f 2690/2465 2610/2468 2675/2467
+f 2690/2465 2689/2464 2685/2458
+f 685/3183 2685/2458 2689/2464
+f 2692/2470 2683/2471 2560/2300
+f 161/3154 2560/2300 2683/2471
+f 685/3183 2689/2464 2691/2469
+f 2692/2470 2691/2469 2689/2464
+f 72/2932 1602/1082 2556/2295
+f 2694/2473 2556/2295 1602/1082
+f 685/3183 2691/2469 2693/2472
+f 2694/2473 2693/2472 2691/2469
+f 2696/2474 1597/1077 2695/2475
+f 73/2961 2695/2475 1597/1077
+f 685/3183 2693/2472 2686/2463
+f 2696/2474 2686/2463 2693/2472
+f 1610/1096 2700/2477 66/2963
+f 2699/2478 66/2963 2700/2477
+f 688/3185 2698/2480 2697/2476
+f 2700/2477 2697/2476 2698/2480
+f 2473/2188 2702/2482 65/2911
+f 1621/1110 65/2911 2702/2482
+f 2697/2476 2702/2482 688/3185
+f 2701/2481 688/3185 2702/2482
+f 148/3140 2703/2486 2469/2184
+f 2705/2484 2469/2184 2703/2486
+f 688/3185 2701/2481 2704/2483
+f 2705/2484 2704/2483 2701/2481
+f 2707/2491 174/3186 2708/2488
+f 2623/2490 2708/2488 174/3186
+f 688/3185 2704/2483 2706/2487
+f 2708/2488 2706/2487 2704/2483
+f 2710/2492 2632/2494 2709/2493
+f 175/3187 2709/2493 2632/2494
+f 688/3185 2706/2487 2698/2480
+f 2710/2492 2698/2480 2706/2487
+f 73/2961 1625/1114 2695/2475
+f 2713/2495 2695/2475 1625/1114
+f 2713/2495 2711/2496 2712/2497
+f 689/3188 2712/2497 2711/2496
+f 66/2969 2699/2500 1631/1127
+f 2715/2501 1631/1127 2699/2500
+f 2711/2496 2715/2501 689/3188
+f 2714/2499 689/3188 2715/2501
+f 2717/2503 2709/2506 2637/2505
+f 175/3189 2637/2505 2709/2506
+f 689/3188 2714/2499 2716/2502
+f 2717/2503 2716/2502 2714/2499
+f 172/3182 2687/2462 2645/2508
+f 2718/2507 2645/2508 2687/2462
+f 689/3188 2716/2502 2712/2497
+f 2718/2507 2712/2497 2716/2502
+f 2571/2316 2721/2510 148/3140
+f 2703/2486 148/3140 2721/2510
+f 2720/2511 2721/2510 690/3190
+f 2719/2509 690/3190 2721/2510
+f 160/3108 2664/2512 2568/2313
+f 2723/2514 2568/2313 2664/2512
+f 690/3190 2719/2509 2722/2515
+f 2723/2514 2722/2515 2719/2509
+f 177/3191 2592/2519 2659/2520
+f 2725/2517 2659/2520 2592/2519
+f 690/3190 2722/2515 2724/2516
+f 2725/2517 2724/2516 2722/2515
+f 2726/2521 2602/2522 2707/2491
+f 174/3186 2707/2491 2602/2522
+f 690/3190 2724/2516 2720/2511
+f 2726/2521 2720/2511 2724/2516
+f 2600/2353 2730/2524 173/3165
+f 2729/2526 173/3165 2730/2524
+f 2727/2523 693/3192 2730/2524
+f 2728/2525 2730/2524 693/3192
+f 178/3164 2731/2528 2594/2346
+f 2733/2530 2594/2346 2731/2528
+f 693/3192 2727/2523 2732/2531
+f 2733/2530 2732/2531 2727/2523
+f 2576/2322 2736/2533 170/3157
+f 2735/2534 170/3157 2736/2533
+f 2732/2531 2736/2533 693/3192
+f 2734/2532 693/3192 2736/2533
+f 2737/2535 2738/2536 168/3161
+f 2586/2335 168/3161 2738/2536
+f 2728/2525 693/3192 2738/2536
+f 2734/2532 2738/2536 693/3192
+f 176/3172 2630/2390 2741/2539
+f 2742/2538 2741/2539 2630/2390
+f 2742/2538 2739/2537 2740/2541
+f 695/3193 2740/2541 2739/2537
+f 2625/2385 173/3165 2744/2542
+f 2729/2526 2744/2542 173/3165
+f 2744/2542 2743/2543 2739/2537
+f 695/3193 2739/2537 2743/2543
+f 2587/2336 2746/2545 168/3161
+f 2737/2535 168/3161 2746/2545
+f 2743/2543 2746/2545 695/3193
+f 2745/2544 695/3193 2746/2545
+f 2747/2546 2748/2547 169/3160
+f 2582/2331 169/3160 2748/2547
+f 2740/2541 695/3193 2748/2547
+f 2745/2544 2748/2547 695/3193
+f 171/3170 2643/2405 2751/2551
+f 2752/2549 2751/2551 2643/2405
+f 2749/2548 697/3194 2752/2549
+f 2750/2550 2752/2549 697/3194
+f 176/3172 2741/2539 2639/2400
+f 2754/2553 2639/2400 2741/2539
+f 2754/2553 2753/2554 2749/2548
+f 697/3194 2749/2548 2753/2554
+f 2584/2333 2756/2556 169/3160
+f 2747/2546 169/3160 2756/2556
+f 2753/2554 2756/2556 697/3194
+f 2755/2555 697/3194 2756/2556
+f 2757/2557 2758/2558 167/3159
+f 2578/2327 167/3159 2758/2558
+f 2750/2550 697/3194 2758/2558
+f 2755/2555 2758/2558 697/3194
+f 178/3164 2608/2361 2731/2528
+f 2761/2560 2731/2528 2608/2361
+f 698/3195 2760/2561 2759/2559
+f 2761/2560 2759/2559 2760/2561
+f 2751/2551 2763/2562 171/3170
+f 2618/2375 171/3170 2763/2562
+f 2759/2559 2763/2562 698/3195
+f 2762/2563 698/3195 2763/2562
+f 2580/2328 2765/2565 167/3159
+f 2757/2557 167/3159 2765/2565
+f 2762/2563 2765/2565 698/3195
+f 2764/2564 698/3195 2765/2565
+f 2735/2534 2766/2566 170/3157
+f 2573/2319 170/3157 2766/2566
+f 2764/2564 2766/2566 698/3195
+f 2760/2561 698/3195 2766/2566
+f 123/3196 2767/2567 1883/2573
+f 2770/2569 1883/2573 2767/2567
+f 2770/2569 2768/2570 2769/2571
+f 702/3197 2769/2571 2768/2570
+f 2773/2578 183/3198 2774/2574
+f 2771/2575 2774/2574 183/3198
+f 2768/2570 2774/2574 702/3197
+f 2772/2576 702/3197 2774/2574
+f 2777/2583 182/3199 2778/2579
+f 2775/2580 2778/2579 182/3199
+f 2772/2576 2778/2579 702/3197
+f 2776/2581 702/3197 2778/2579
+f 2780/2584 2779/2586 1888/2585
+f 122/3200 1888/2585 2779/2586
+f 702/3197 2776/2581 2769/2571
+f 2780/2584 2769/2571 2776/2581
+f 2779/2586 2783/2588 122/3200
+f 2428/2590 122/3200 2783/2588
+f 2782/2591 2783/2588 704/3201
+f 2781/2587 704/3201 2783/2588
+f 182/3199 2784/2595 2775/2580
+f 2786/2593 2775/2580 2784/2595
+f 704/3201 2781/2587 2785/2592
+f 2786/2593 2785/2592 2781/2587
+f 1703/1221 2789/2597 100/2983
+f 2788/2598 100/2983 2789/2597
+f 2785/2592 2789/2597 704/3201
+f 2787/2596 704/3201 2789/2597
+f 2790/2599 1698/1215 2431/2600
+f 74/2982 2431/2600 1698/1215
+f 704/3201 2787/2596 2782/2591
+f 2790/2599 2782/2591 2787/2596
+f 67/2984 1709/1228 2409/2603
+f 2793/2602 2409/2603 1709/1228
+f 707/3202 2792/2605 2791/2601
+f 2793/2602 2791/2601 2792/2605
+f 2794/2608 2796/2609 99/2988
+f 1718/1241 99/2988 2796/2609
+f 2791/2601 2796/2609 707/3202
+f 2795/2607 707/3202 2796/2609
+f 181/3203 2797/2610 2799/2614
+f 2800/2612 2799/2614 2797/2610
+f 707/3202 2795/2607 2798/2613
+f 2800/2612 2798/2613 2795/2607
+f 2801/2617 119/3204 2802/2616
+f 2414/2615 2802/2616 119/3204
+f 2798/2613 2802/2616 707/3202
+f 2792/2605 707/3202 2802/2616
+f 119/3204 2801/2617 1872/2621
+f 2805/2619 1872/2621 2801/2617
+f 2804/2620 2805/2619 709/3205
+f 2803/2618 709/3205 2805/2619
+f 2806/2626 2808/2624 181/3203
+f 2797/2610 181/3203 2808/2624
+f 709/3205 2803/2618 2807/2623
+f 2808/2624 2807/2623 2803/2618
+f 2773/2578 2811/2628 183/3198
+f 2810/2629 183/3198 2811/2628
+f 2809/2627 709/3205 2811/2628
+f 2807/2623 2811/2628 709/3205
+f 1877/2631 2812/2630 123/3196
+f 2767/2567 123/3196 2812/2630
+f 2812/2630 2804/2620 2809/2627
+f 709/3205 2809/2627 2804/2620
+f 2815/2633 1731/1260 2788/2598
+f 100/2983 2788/2598 1731/1260
+f 711/3206 2814/2634 2813/2632
+f 2815/2633 2813/2632 2814/2634
+f 2777/2583 2817/2636 182/3199
+f 2784/2595 182/3199 2817/2636
+f 2813/2632 2817/2636 711/3206
+f 2816/2635 711/3206 2817/2636
+f 183/3198 2818/2640 2771/2575
+f 2820/2638 2771/2575 2818/2640
+f 711/3206 2816/2635 2819/2637
+f 2820/2638 2819/2637 2816/2635
+f 75/2991 1737/1267 2821/2642
+f 2822/2641 2821/2642 1737/1267
+f 2819/2637 2822/2641 711/3206
+f 2814/2634 711/3206 2822/2641
+f 75/2991 2821/2642 1743/1273
+f 2825/2644 1743/1273 2821/2642
+f 2824/2645 2825/2644 712/3207
+f 2823/2643 712/3207 2825/2644
+f 183/3198 2810/2629 2818/2640
+f 2827/2647 2818/2640 2810/2629
+f 712/3207 2823/2643 2826/2646
+f 2827/2647 2826/2646 2823/2643
+f 2806/2626 181/3203 2829/2649
+f 2799/2614 2829/2649 181/3203
+f 2826/2646 2829/2649 712/3207
+f 2828/2648 712/3207 2829/2649
+f 2830/2650 2794/2608 1748/1279
+f 99/2988 1748/1279 2794/2608
+f 712/3207 2828/2648 2824/2645
+f 2830/2650 2824/2645 2828/2648
+f 2835/2652 2834/2655 2831/2654
+f 184/3208 2831/2654 2834/2655
+f 2835/2652 2832/2651 2833/2657
+f 716/3209 2833/2657 2832/2651
+f 2838/2662 186/3210 2839/2660
+f 2836/2658 2839/2660 186/3210
+f 2832/2651 2839/2660 716/3209
+f 2837/2661 716/3209 2839/2660
+f 2840/2663 2842/2664 185/3211
+f 2841/2665 185/3211 2842/2664
+f 2837/2661 2842/2664 716/3209
+f 2833/2657 716/3209 2842/2664
+f 2843/2669 2846/2667 184/3208
+f 2831/2654 184/3208 2846/2667
+f 719/3212 2845/2670 2844/2666
+f 2846/2667 2844/2666 2845/2670
+f 2006/1585 2849/2672 117/3042
+f 2848/2673 117/3042 2849/2672
+f 719/3212 2844/2666 2847/2671
+f 2849/2672 2847/2671 2844/2666
+f 136/3031 1937/1504 2015/1596
+f 2851/2675 2015/1596 1937/1504
+f 719/3212 2847/2671 2850/2674
+f 2851/2675 2850/2674 2847/2671
+f 2852/2678 2854/2679 128/3030
+f 1932/1500 128/3030 2854/2679
+f 2850/2674 2854/2679 719/3212
+f 2853/2677 719/3212 2854/2679
+f 186/3210 2838/2662 2855/2681
+f 2856/2680 2855/2681 2838/2662
+f 719/3212 2853/2677 2845/2670
+f 2856/2680 2845/2670 2853/2677
+f 2834/2655 2859/2683 184/3208
+f 2843/2669 184/3208 2859/2683
+f 721/3213 2858/2684 2857/2682
+f 2859/2683 2857/2682 2858/2684
+f 185/3211 2860/2688 2840/2663
+f 2862/2686 2840/2663 2860/2688
+f 721/3213 2857/2682 2861/2685
+f 2862/2686 2861/2685 2857/2682
+f 135/3057 2074/1668 2864/2691
+f 2865/2690 2864/2691 2074/1668
+f 721/3213 2861/2685 2863/2689
+f 2865/2690 2863/2689 2861/2685
+f 2115/1716 2867/2693 113/3055
+f 2067/1659 113/3055 2867/2693
+f 721/3213 2863/2689 2866/2692
+f 2867/2693 2866/2692 2863/2689
+f 117/3042 2848/2673 2111/1711
+f 2868/2694 2111/1711 2848/2673
+f 721/3213 2866/2692 2858/2684
+f 2868/2694 2858/2684 2866/2692
+f 2872/2696 2871/2699 1794/2698
+f 104/3214 1794/2698 2871/2699
+f 723/3215 2870/2701 2869/2695
+f 2872/2696 2869/2695 2870/2701
+f 2407/2089 2874/2703 81/3126
+f 1801/2704 81/3126 2874/2703
+f 2873/2702 723/3215 2874/2703
+f 2869/2695 2874/2703 723/3215
+f 2864/2691 2876/2705 135/3057
+f 2404/2085 135/3057 2876/2705
+f 2876/2705 2875/2706 2873/2702
+f 723/3215 2873/2702 2875/2706
+f 2877/2707 2878/2708 185/3211
+f 2860/2688 185/3211 2878/2708
+f 2875/2706 2878/2708 723/3215
+f 2870/2701 723/3215 2878/2708
+f 1805/2711 2881/2712 79/3142
+f 2485/2202 79/3142 2881/2712
+f 2880/2713 2881/2712 725/3216
+f 2879/2710 725/3216 2881/2712
+f 2884/2715 1812/2718 2882/2717
+f 105/3217 2882/2717 1812/2718
+f 725/3216 2879/2710 2883/2714
+f 2884/2715 2883/2714 2879/2710
+f 2855/2681 2887/2720 186/3210
+f 2886/2721 186/3210 2887/2720
+f 2883/2714 2887/2720 725/3216
+f 2885/2719 725/3216 2887/2720
+f 128/3030 2490/2209 2852/2678
+f 2888/2722 2852/2678 2490/2209
+f 725/3216 2885/2719 2880/2713
+f 2888/2722 2880/2713 2885/2719
+f 2836/2658 186/3210 2892/2726
+f 2889/2725 2892/2726 186/3210
+f 2891/2727 2892/2726 729/3218
+f 2890/2724 729/3218 2892/2726
+f 2896/2729 2895/2732 2893/2731
+f 188/3219 2893/2731 2895/2732
+f 2896/2729 2894/2728 2890/2724
+f 729/3218 2890/2724 2894/2728
+f 2900/2734 2899/2737 2897/2736
+f 187/3220 2897/2736 2899/2737
+f 729/3218 2894/2728 2898/2733
+f 2900/2734 2898/2733 2894/2728
+f 185/3211 2841/2665 2901/2739
+f 2902/2738 2901/2739 2841/2665
+f 2902/2738 2891/2727 2898/2733
+f 729/3218 2898/2733 2891/2727
+f 185/3211 2901/2739 2877/2707
+f 2905/2740 2877/2707 2901/2739
+f 731/3221 2904/2742 2903/2741
+f 2905/2740 2903/2741 2904/2742
+f 2906/2745 2908/2746 187/3220
+f 2897/2736 187/3220 2908/2746
+f 2903/2741 2908/2746 731/3221
+f 2907/2744 731/3221 2908/2746
+f 108/3222 1837/2750 2910/2751
+f 2911/2748 2910/2751 1837/2750
+f 731/3221 2907/2744 2909/2747
+f 2911/2748 2909/2747 2907/2744
+f 2871/2699 2912/2752 104/3214
+f 1832/2753 104/3214 2912/2752
+f 2909/2747 2912/2752 731/3221
+f 2904/2742 731/3221 2912/2752
+f 1843/2756 2915/2757 105/3217
+f 2882/2717 105/3217 2915/2757
+f 2913/2755 733/3223 2915/2757
+f 2914/2758 2915/2757 733/3223
+f 109/3224 2916/2762 1850/2763
+f 2918/2760 1850/2763 2916/2762
+f 2918/2760 2917/2759 2913/2755
+f 733/3223 2913/2755 2917/2759
+f 2920/2766 188/3219 2921/2764
+f 2895/2732 2921/2764 188/3219
+f 2917/2759 2921/2764 733/3223
+f 2919/2765 733/3223 2921/2764
+f 2922/2767 2889/2725 2886/2721
+f 186/3210 2886/2721 2889/2725
+f 733/3223 2919/2765 2914/2758
+f 2922/2767 2914/2758 2919/2765
+f 2925/2769 2906/2745 2899/2737
+f 187/3220 2899/2737 2906/2745
+f 734/3225 2924/2770 2923/2768
+f 2925/2769 2923/2768 2924/2770
+f 2927/2772 2893/2731 2920/2766
+f 188/3219 2920/2766 2893/2731
+f 734/3225 2923/2768 2926/2771
+f 2927/2772 2926/2771 2923/2768
+f 1860/2775 2929/2776 109/3224
+f 2916/2762 109/3224 2929/2776
+f 2926/2771 2929/2776 734/3225
+f 2928/2774 734/3225 2929/2776
+f 2910/2751 2930/2777 108/3222
+f 1856/2778 108/3222 2930/2777
+f 2928/2774 2930/2777 734/3225
+f 2924/2770 734/3225 2930/2777
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.mtl b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.mtl
new file mode 100644
index 0000000000000000000000000000000000000000..ddb3197ce4c4b4f685381f296cc6cc44fd19a3ad
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.mtl
@@ -0,0 +1,7 @@
+# Material Count: 1
+
+newmtl material_1
+Ns 96.078431
+Ka 0.000000 0.000000 0.000000
+Kd 0.500000 0.000000 0.000000
+Ks 0.500000 0.500000 0.500000
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.obj b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.obj
new file mode 100644
index 0000000000000000000000000000000000000000..cf411442fd4d6e5364e0e49aea0e3bbd570d6b06
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/data/obj_mtl_no_image/model.obj
@@ -0,0 +1,10 @@
+
+mtllib model.mtl
+
+v 0.1 0.2 0.3
+v 0.2 0.3 0.4
+v 0.3 0.4 0.5
+v 0.4 0.5 0.6
+usemtl material_1
+f 1 2 3
+f 1 2 4
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e41cd717f6a439a9c08d76a9d0e4a54e190fc5a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/common_resources.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/common_resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8da89f62aefc3f586a86456b0891e09a9fe14fe
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/common_resources.py
@@ -0,0 +1,161 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import contextlib
+import logging
+import os
+import tempfile
+import unittest
+from pathlib import Path
+from typing import Generator, Tuple
+from zipfile import ZipFile
+
+from iopath.common.file_io import PathManager
+
+
+CO3D_MANIFOLD_PATH: str = "manifold://co3d/tree/extracted"
+CO3DV2_MANIFOLD_PATH: str = "manifold://co3d/tree/v2/extracted"
+
+INSIDE_RE_WORKER: bool = os.environ.get("INSIDE_RE_WORKER", False)
+
+
+def get_path_manager(silence_logs: bool = False) -> PathManager:
+ """
+ Returns a path manager which can access manifold internally.
+
+ Args:
+ silence_logs: Whether to reduce log output from iopath library.
+ """
+ if silence_logs:
+ logging.getLogger("iopath.fb.manifold").setLevel(logging.CRITICAL)
+ logging.getLogger("iopath.common.file_io").setLevel(logging.CRITICAL)
+
+ if INSIDE_RE_WORKER:
+ raise ValueError("Cannot get to manifold from RE")
+
+ path_manager = PathManager()
+
+ if os.environ.get("FB_TEST", False):
+ from iopath.fb.manifold import ManifoldPathHandler
+
+ path_manager.register_handler(ManifoldPathHandler())
+
+ return path_manager
+
+
+@contextlib.contextmanager
+def get_skateboard_data(
+ avoid_manifold: bool = False, silence_logs: bool = False
+) -> Generator[Tuple[str, PathManager], None, None]:
+ """
+ Context manager for accessing Co3D dataset by tests, at least for
+ the first 5 skateboards. Internally, we want this to exercise the
+ normal way to access the data directly manifold, but on an RE
+ worker this is impossible so we use a workaround.
+
+ Args:
+ avoid_manifold: Use the method used by RE workers even locally.
+ silence_logs: Whether to reduce log output from iopath library.
+
+ Yields:
+ dataset_root: (str) path to dataset root.
+ path_manager: path_manager to access it with.
+ """
+ if silence_logs:
+ logging.getLogger("iopath.fb.manifold").setLevel(logging.CRITICAL)
+ logging.getLogger("iopath.common.file_io").setLevel(logging.CRITICAL)
+
+ if not os.environ.get("FB_TEST", False):
+ if os.getenv("FAIR_ENV_CLUSTER", "") == "":
+ raise unittest.SkipTest("Unknown environment. Data not available.")
+ yield "/datasets01/co3d/081922", PathManager()
+
+ elif avoid_manifold or INSIDE_RE_WORKER:
+ from libfb.py.parutil import get_file_path
+
+ par_path = "skateboard_first_5"
+ source = get_file_path(par_path)
+ assert Path(source).is_file()
+ with tempfile.TemporaryDirectory() as dest:
+ with ZipFile(source) as f:
+ f.extractall(dest)
+ yield os.path.join(dest, "extracted"), PathManager()
+ else:
+ yield CO3D_MANIFOLD_PATH, get_path_manager()
+
+
+def _provide_torchvision_weights(par_path: str, filename: str) -> None:
+ """
+ Ensure the weights files are available for a torchvision model.
+ """
+ # In OSS, torchvision looks for vgg16 weights in
+ # https://download.pytorch.org/models/vgg16-397923af.pth
+ # Inside fbcode, this is replaced by asking iopath for
+ # manifold://torchvision/tree/models/vgg16-397923af.pth
+ # (the code for this replacement is in
+ # fbcode/pytorch/vision/fb/_internally_replaced_utils.py )
+ #
+ # iopath does this by looking for the file at the cache location
+ # and if it is not there getting it from manifold.
+ # (the code for this is in
+ # fbcode/fair_infra/data/iopath/iopath/fb/manifold.py )
+ #
+ # On the remote execution worker, manifold is inaccessible.
+ # We solve this by making the cached file available before iopath
+ # looks.
+ #
+ # By default the cache location is
+ # ~/.torch/iopath_cache/manifold_cache/tree/models/vgg16-397923af.pth
+ # But we can't write to the home directory on the RE worker.
+ # We define FVCORE_CACHE to change the cache location to
+ # iopath_cache/manifold_cache/tree/models/vgg16-397923af.pth
+ # (Without it, manifold caches in unstable temporary locations on RE.)
+ #
+ # The file we want has been copied from
+ # tree/models/vgg16-397923af.pth in the torchvision bucket
+ # to
+ # tree/testing/vgg16-397923af.pth in the co3d bucket
+ # and the TARGETS file copies it somewhere in the PAR which we
+ # recover with get_file_path.
+ # (It can't copy straight to a nested location, see
+ # https://fb.workplace.com/groups/askbuck/posts/2644615728920359/)
+ # Here we symlink it to the new cache location.
+ if INSIDE_RE_WORKER:
+ from libfb.py.parutil import get_file_path
+
+ os.environ["FVCORE_CACHE"] = "iopath_cache"
+
+ source = Path(get_file_path(par_path))
+ assert source.is_file()
+
+ dest = Path("iopath_cache/manifold_cache/tree/models")
+ if not dest.exists():
+ dest.mkdir(parents=True)
+
+ if not (dest / filename).is_symlink():
+ try:
+ (dest / filename).symlink_to(source)
+ except FileExistsError:
+ print("FileExistsError: no symlink created.")
+
+
+def provide_lpips_vgg() -> None:
+ """
+ Ensure the weights files are available for lpips.LPIPS(net="vgg")
+ to be called. Specifically, torchvision's vgg16.
+ """
+ _provide_torchvision_weights("vgg_weights_for_lpips", "vgg16-397923af.pth")
+
+
+def provide_resnet34() -> None:
+ """
+ Ensure the weights files are available for
+
+ torchvision.models.resnet34(pretrained=True)
+
+ to be called.
+ """
+ _provide_torchvision_weights("resnet34_weights", "resnet34-b627a593.pth")
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/data_source.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/data_source.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a444309cd99bf7300d07f9d39b37cc800105ecbf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/data_source.yaml
@@ -0,0 +1,131 @@
+dataset_map_provider_class_type: ???
+data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
+dataset_map_provider_BlenderDatasetMapProvider_args:
+ base_dir: ???
+ object_name: ???
+ path_manager_factory_class_type: PathManagerFactory
+ n_known_frames_for_test: null
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+dataset_map_provider_JsonIndexDatasetMapProvider_args:
+ category: ???
+ task_str: singlesequence
+ dataset_root: ''
+ n_frames_per_sequence: -1
+ test_on_train: false
+ restrict_sequence_name: []
+ test_restrict_sequence_id: -1
+ assert_single_seq: false
+ only_test_set: false
+ dataset_class_type: JsonIndexDataset
+ path_manager_factory_class_type: PathManagerFactory
+ dataset_JsonIndexDataset_args:
+ limit_to: 0
+ limit_sequences_to: 0
+ exclude_sequence: []
+ limit_category_to: []
+ load_images: true
+ load_depths: true
+ load_depth_masks: true
+ load_masks: true
+ load_point_clouds: false
+ max_points: 0
+ mask_images: false
+ mask_depths: false
+ image_height: 800
+ image_width: 800
+ box_crop: true
+ box_crop_mask_thr: 0.4
+ box_crop_context: 0.3
+ remove_empty_masks: true
+ seed: 0
+ sort_frames: false
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
+ category: ???
+ subset_name: ???
+ dataset_root: ''
+ test_on_train: false
+ only_test_set: false
+ load_eval_batches: true
+ num_load_workers: 4
+ n_known_frames_for_test: 0
+ dataset_class_type: JsonIndexDataset
+ path_manager_factory_class_type: PathManagerFactory
+ dataset_JsonIndexDataset_args:
+ limit_to: 0
+ limit_sequences_to: 0
+ pick_sequence: []
+ exclude_sequence: []
+ limit_category_to: []
+ load_images: true
+ load_depths: true
+ load_depth_masks: true
+ load_masks: true
+ load_point_clouds: false
+ max_points: 0
+ mask_images: false
+ mask_depths: false
+ image_height: 800
+ image_width: 800
+ box_crop: true
+ box_crop_mask_thr: 0.4
+ box_crop_context: 0.3
+ remove_empty_masks: true
+ n_frames_per_sequence: -1
+ seed: 0
+ sort_frames: false
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+dataset_map_provider_LlffDatasetMapProvider_args:
+ base_dir: ???
+ object_name: ???
+ path_manager_factory_class_type: PathManagerFactory
+ n_known_frames_for_test: null
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+ downscale_factor: 4
+dataset_map_provider_RenderedMeshDatasetMapProvider_args:
+ num_views: 40
+ data_file: null
+ azimuth_range: 180.0
+ distance: 2.7
+ resolution: 128
+ use_point_light: true
+ gpu_idx: 0
+ path_manager_factory_class_type: PathManagerFactory
+ path_manager_factory_PathManagerFactory_args:
+ silence_logs: true
+data_loader_map_provider_SequenceDataLoaderMapProvider_args:
+ batch_size: 1
+ num_workers: 0
+ dataset_length_train: 0
+ dataset_length_val: 0
+ dataset_length_test: 0
+ train_conditioning_type: SAME
+ val_conditioning_type: SAME
+ test_conditioning_type: KNOWN
+ images_per_seq_options: []
+ sample_consecutive_frames: false
+ consecutive_frames_max_gap: 0
+ consecutive_frames_max_gap_seconds: 0.1
+data_loader_map_provider_SimpleDataLoaderMapProvider_args:
+ batch_size: 1
+ num_workers: 0
+ dataset_length_train: 0
+ dataset_length_val: 0
+ dataset_length_test: 0
+data_loader_map_provider_TrainEvalDataLoaderMapProvider_args:
+ batch_size: 1
+ num_workers: 0
+ dataset_length_train: 0
+ dataset_length_val: 0
+ dataset_length_test: 0
+ train_conditioning_type: SAME
+ val_conditioning_type: SAME
+ test_conditioning_type: KNOWN
+ images_per_seq_options: []
+ sample_consecutive_frames: false
+ consecutive_frames_max_gap: 0
+ consecutive_frames_max_gap_seconds: 0.1
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/overrides.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/overrides.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..95899d898b9d05aada3ed22b87058c9ade4e93fc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/overrides.yaml
@@ -0,0 +1,126 @@
+log_vars:
+- loss_rgb_psnr_fg
+- loss_rgb_psnr
+- loss_rgb_mse
+- loss_rgb_huber
+- loss_depth_abs
+- loss_depth_abs_fg
+- loss_mask_neg_iou
+- loss_mask_bce
+- loss_mask_beta_prior
+- loss_eikonal
+- loss_density_tv
+- loss_depth_neg_penalty
+- loss_autodecoder_norm
+- loss_prev_stage_rgb_mse
+- loss_prev_stage_rgb_psnr_fg
+- loss_prev_stage_rgb_psnr
+- loss_prev_stage_mask_bce
+- objective
+- epoch
+- sec/it
+mask_images: true
+mask_depths: true
+render_image_width: 400
+render_image_height: 400
+mask_threshold: 0.5
+output_rasterized_mc: false
+bg_color:
+- 0.0
+- 0.0
+- 0.0
+num_passes: 1
+chunk_size_grid: 4096
+render_features_dimensions: 3
+tqdm_trigger_threshold: 16
+n_train_target_views: 1
+sampling_mode_training: mask_sample
+sampling_mode_evaluation: full_grid
+global_encoder_class_type: SequenceAutodecoder
+raysampler_class_type: AdaptiveRaySampler
+renderer_class_type: LSTMRenderer
+image_feature_extractor_class_type: ResNetFeatureExtractor
+view_pooler_enabled: true
+implicit_function_class_type: IdrFeatureField
+view_metrics_class_type: ViewMetrics
+regularization_metrics_class_type: RegularizationMetrics
+loss_weights:
+ loss_rgb_mse: 1.0
+ loss_prev_stage_rgb_mse: 1.0
+ loss_mask_bce: 0.0
+ loss_prev_stage_mask_bce: 0.0
+global_encoder_SequenceAutodecoder_args:
+ autodecoder_args:
+ encoding_dim: 0
+ n_instances: 1
+ init_scale: 1.0
+ ignore_input: false
+raysampler_AdaptiveRaySampler_args:
+ n_pts_per_ray_training: 64
+ n_pts_per_ray_evaluation: 64
+ n_rays_per_image_sampled_from_mask: 1024
+ n_rays_total_training: null
+ stratified_point_sampling_training: true
+ stratified_point_sampling_evaluation: false
+ cast_ray_bundle_as_cone: false
+ scene_extent: 8.0
+ scene_center:
+ - 0.0
+ - 0.0
+ - 0.0
+renderer_LSTMRenderer_args:
+ num_raymarch_steps: 10
+ init_depth: 17.0
+ init_depth_noise_std: 0.0005
+ hidden_size: 16
+ n_feature_channels: 256
+ bg_color: null
+ verbose: false
+image_feature_extractor_ResNetFeatureExtractor_args:
+ name: resnet34
+ pretrained: true
+ stages:
+ - 1
+ - 2
+ - 3
+ - 4
+ normalize_image: true
+ image_rescale: 0.16
+ first_max_pool: true
+ proj_dim: 32
+ l2_norm: true
+ add_masks: true
+ add_images: true
+ global_average_pool: false
+ feature_rescale: 1.0
+view_pooler_args:
+ feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator
+ view_sampler_args:
+ masked_sampling: false
+ sampling_mode: bilinear
+ feature_aggregator_AngleWeightedIdentityFeatureAggregator_args:
+ exclude_target_view: true
+ exclude_target_view_mask_features: true
+ concatenate_output: true
+ weight_by_ray_angle_gamma: 1.0
+ min_ray_angle_weight: 0.1
+implicit_function_IdrFeatureField_args:
+ d_in: 3
+ d_out: 1
+ dims:
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ - 512
+ geometric_init: true
+ bias: 1.0
+ skip_in: []
+ weight_norm: true
+ n_harmonic_functions_xyz: 1729
+ pooled_feature_dim: 0
+view_metrics_ViewMetrics_args: {}
+regularization_metrics_RegularizationMetrics_args: {}
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/set_lists_100.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/set_lists_100.json
new file mode 100644
index 0000000000000000000000000000000000000000..96dbe2b4010e4db93f3d7fd0bf84145691c9cee0
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/set_lists_100.json
@@ -0,0 +1 @@
+{"train": [["cat0_seq0", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000000.jpg"], ["cat0_seq0", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000002.jpg"], ["cat0_seq0", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000004.jpg"], ["cat0_seq0", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000006.jpg"], ["cat0_seq0", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000008.jpg"], ["cat0_seq1", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000000.jpg"], ["cat0_seq1", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000002.jpg"], ["cat0_seq1", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000004.jpg"], ["cat0_seq1", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000006.jpg"], ["cat0_seq1", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000008.jpg"], ["cat0_seq2", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000000.jpg"], ["cat0_seq2", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000002.jpg"], ["cat0_seq2", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000004.jpg"], ["cat0_seq2", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000006.jpg"], ["cat0_seq2", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000008.jpg"], ["cat0_seq3", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000000.jpg"], ["cat0_seq3", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000002.jpg"], ["cat0_seq3", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000004.jpg"], ["cat0_seq3", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000006.jpg"], ["cat0_seq3", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000008.jpg"], ["cat0_seq4", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000000.jpg"], ["cat0_seq4", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000002.jpg"], ["cat0_seq4", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000004.jpg"], ["cat0_seq4", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000006.jpg"], ["cat0_seq4", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000008.jpg"], ["cat1_seq0", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000000.jpg"], ["cat1_seq0", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000002.jpg"], ["cat1_seq0", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000004.jpg"], ["cat1_seq0", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000006.jpg"], ["cat1_seq0", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000008.jpg"], ["cat1_seq1", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000000.jpg"], ["cat1_seq1", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000002.jpg"], ["cat1_seq1", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000004.jpg"], ["cat1_seq1", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000006.jpg"], ["cat1_seq1", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000008.jpg"], ["cat1_seq2", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000000.jpg"], ["cat1_seq2", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000002.jpg"], ["cat1_seq2", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000004.jpg"], ["cat1_seq2", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000006.jpg"], ["cat1_seq2", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000008.jpg"], ["cat1_seq3", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000000.jpg"], ["cat1_seq3", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000002.jpg"], ["cat1_seq3", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000004.jpg"], ["cat1_seq3", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000006.jpg"], ["cat1_seq3", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000008.jpg"], ["cat1_seq4", 0, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000000.jpg"], ["cat1_seq4", 2, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000002.jpg"], ["cat1_seq4", 4, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000004.jpg"], ["cat1_seq4", 6, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000006.jpg"], ["cat1_seq4", 8, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000008.jpg"]], "test": [["cat0_seq0", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000001.jpg"], ["cat0_seq0", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000003.jpg"], ["cat0_seq0", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000005.jpg"], ["cat0_seq0", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000007.jpg"], ["cat0_seq0", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq0/frame000009.jpg"], ["cat0_seq1", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000001.jpg"], ["cat0_seq1", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000003.jpg"], ["cat0_seq1", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000005.jpg"], ["cat0_seq1", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000007.jpg"], ["cat0_seq1", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq1/frame000009.jpg"], ["cat0_seq2", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000001.jpg"], ["cat0_seq2", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000003.jpg"], ["cat0_seq2", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000005.jpg"], ["cat0_seq2", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000007.jpg"], ["cat0_seq2", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq2/frame000009.jpg"], ["cat0_seq3", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000001.jpg"], ["cat0_seq3", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000003.jpg"], ["cat0_seq3", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000005.jpg"], ["cat0_seq3", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000007.jpg"], ["cat0_seq3", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq3/frame000009.jpg"], ["cat0_seq4", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000001.jpg"], ["cat0_seq4", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000003.jpg"], ["cat0_seq4", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000005.jpg"], ["cat0_seq4", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000007.jpg"], ["cat0_seq4", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat0_seq4/frame000009.jpg"], ["cat1_seq0", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000001.jpg"], ["cat1_seq0", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000003.jpg"], ["cat1_seq0", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000005.jpg"], ["cat1_seq0", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000007.jpg"], ["cat1_seq0", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq0/frame000009.jpg"], ["cat1_seq1", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000001.jpg"], ["cat1_seq1", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000003.jpg"], ["cat1_seq1", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000005.jpg"], ["cat1_seq1", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000007.jpg"], ["cat1_seq1", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq1/frame000009.jpg"], ["cat1_seq2", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000001.jpg"], ["cat1_seq2", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000003.jpg"], ["cat1_seq2", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000005.jpg"], ["cat1_seq2", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000007.jpg"], ["cat1_seq2", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq2/frame000009.jpg"], ["cat1_seq3", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000001.jpg"], ["cat1_seq3", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000003.jpg"], ["cat1_seq3", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000005.jpg"], ["cat1_seq3", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000007.jpg"], ["cat1_seq3", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq3/frame000009.jpg"], ["cat1_seq4", 1, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000001.jpg"], ["cat1_seq4", 3, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000003.jpg"], ["cat1_seq4", 5, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000005.jpg"], ["cat1_seq4", 7, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000007.jpg"], ["cat1_seq4", 9, "kfcdtsiagiruwsuplqemogkmqyqhfvpwbvdrikpjlnegagzxhwxrguehparmirtk/cat1_seq4/frame000009.jpg"]]}
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/sql_dataset_100.sqlite b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/sql_dataset_100.sqlite
new file mode 100644
index 0000000000000000000000000000000000000000..2d8ea3b2bb10d14175b1f8d87a4cebffa0210410
Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/data/sql_dataset/sql_dataset_100.sqlite differ
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e41cd717f6a439a9c08d76a9d0e4a54e190fc5a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_overfit_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_overfit_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..8012e214c3cd8fadfa443883cc15f5f80a7ecc7e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_overfit_model.py
@@ -0,0 +1,217 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+from typing import Any, Dict
+from unittest.mock import patch
+
+import torch
+from pytorch3d.implicitron.models.generic_model import GenericModel
+from pytorch3d.implicitron.models.overfit_model import OverfitModel
+from pytorch3d.implicitron.models.renderer.base import EvaluationMode
+from pytorch3d.implicitron.tools.config import expand_args_fields
+from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
+
+DEVICE = torch.device("cuda:0")
+
+
+def _generate_fake_inputs(N: int, H: int, W: int) -> Dict[str, Any]:
+ R, T = look_at_view_transform(azim=torch.rand(N) * 360)
+ return {
+ "camera": PerspectiveCameras(R=R, T=T, device=DEVICE),
+ "fg_probability": torch.randint(
+ high=2, size=(N, 1, H, W), device=DEVICE
+ ).float(),
+ "depth_map": torch.rand((N, 1, H, W), device=DEVICE) + 0.1,
+ "mask_crop": torch.randint(high=2, size=(N, 1, H, W), device=DEVICE).float(),
+ "sequence_name": ["sequence"] * N,
+ "image_rgb": torch.rand((N, 1, H, W), device=DEVICE),
+ }
+
+
+def mock_safe_multinomial(input: torch.Tensor, num_samples: int) -> torch.Tensor:
+ """Return non deterministic indexes to mock safe_multinomial
+
+ Args:
+ input: tensor of shape [B, n] containing non-negative values;
+ rows are interpreted as unnormalized event probabilities
+ in categorical distributions.
+ num_samples: number of samples to take.
+
+ Returns:
+ Tensor of shape [B, num_samples]
+ """
+ batch_size = input.shape[0]
+ return torch.arange(num_samples).repeat(batch_size, 1).to(DEVICE)
+
+
+class TestOverfitModel(unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_overfit_model_vs_generic_model_with_batch_size_one(self):
+ """In this test we compare OverfitModel to GenericModel behavior.
+
+ We use a Nerf setup (2 rendering passes).
+
+ OverfitModel is a specific case of GenericModel. Hence, with the same inputs,
+ they should provide the exact same results.
+ """
+ expand_args_fields(OverfitModel)
+ expand_args_fields(GenericModel)
+ batch_size, image_height, image_width = 1, 80, 80
+ assert batch_size == 1
+ overfit_model = OverfitModel(
+ render_image_height=image_height,
+ render_image_width=image_width,
+ coarse_implicit_function_class_type="NeuralRadianceFieldImplicitFunction",
+ # To avoid randomization to compare the outputs of our model
+ # we deactivate the stratified_point_sampling_training
+ raysampler_AdaptiveRaySampler_args={
+ "stratified_point_sampling_training": False
+ },
+ global_encoder_class_type="SequenceAutodecoder",
+ global_encoder_SequenceAutodecoder_args={
+ "autodecoder_args": {
+ "n_instances": 1000,
+ "init_scale": 1.0,
+ "encoding_dim": 64,
+ }
+ },
+ )
+ generic_model = GenericModel(
+ render_image_height=image_height,
+ render_image_width=image_width,
+ n_train_target_views=batch_size,
+ num_passes=2,
+ # To avoid randomization to compare the outputs of our model
+ # we deactivate the stratified_point_sampling_training
+ raysampler_AdaptiveRaySampler_args={
+ "stratified_point_sampling_training": False
+ },
+ global_encoder_class_type="SequenceAutodecoder",
+ global_encoder_SequenceAutodecoder_args={
+ "autodecoder_args": {
+ "n_instances": 1000,
+ "init_scale": 1.0,
+ "encoding_dim": 64,
+ }
+ },
+ )
+
+ # Check if they do share the number of parameters
+ num_params_mvm = sum(p.numel() for p in overfit_model.parameters())
+ num_params_gm = sum(p.numel() for p in generic_model.parameters())
+ self.assertEqual(num_params_mvm, num_params_gm)
+
+ # Adapt the mapping from generic model to overfit model
+ mapping_om_from_gm = {
+ key.replace(
+ "_implicit_functions.0._fn", "coarse_implicit_function"
+ ).replace("_implicit_functions.1._fn", "implicit_function"): val
+ for key, val in generic_model.state_dict().items()
+ }
+ # Copy parameters from generic_model to overfit_model
+ overfit_model.load_state_dict(mapping_om_from_gm)
+
+ overfit_model.to(DEVICE)
+ generic_model.to(DEVICE)
+ inputs_ = _generate_fake_inputs(batch_size, image_height, image_width)
+
+ # training forward pass
+ overfit_model.train()
+ generic_model.train()
+
+ with patch(
+ "pytorch3d.renderer.implicit.raysampling._safe_multinomial",
+ side_effect=mock_safe_multinomial,
+ ):
+ train_preds_om = overfit_model(
+ **inputs_,
+ evaluation_mode=EvaluationMode.TRAINING,
+ )
+ train_preds_gm = generic_model(
+ **inputs_,
+ evaluation_mode=EvaluationMode.TRAINING,
+ )
+
+ self.assertTrue(len(train_preds_om) == len(train_preds_gm))
+
+ self.assertTrue(train_preds_om["objective"].isfinite().item())
+ # We avoid all the randomization and the weights are the same
+ # The objective should be the same
+ self.assertTrue(
+ torch.allclose(train_preds_om["objective"], train_preds_gm["objective"])
+ )
+
+ # Test if the evaluation works
+ overfit_model.eval()
+ generic_model.eval()
+ with torch.no_grad():
+ eval_preds_om = overfit_model(
+ **inputs_,
+ evaluation_mode=EvaluationMode.EVALUATION,
+ )
+ eval_preds_gm = generic_model(
+ **inputs_,
+ evaluation_mode=EvaluationMode.EVALUATION,
+ )
+
+ self.assertEqual(
+ eval_preds_om["images_render"].shape,
+ (batch_size, 3, image_height, image_width),
+ )
+ self.assertTrue(
+ torch.allclose(eval_preds_om["objective"], eval_preds_gm["objective"])
+ )
+ self.assertTrue(
+ torch.allclose(
+ eval_preds_om["images_render"], eval_preds_gm["images_render"]
+ )
+ )
+
+ def test_overfit_model_check_share_weights(self):
+ model = OverfitModel(share_implicit_function_across_passes=True)
+ for p1, p2 in zip(
+ model.implicit_function.parameters(),
+ model.coarse_implicit_function.parameters(),
+ ):
+ self.assertEqual(id(p1), id(p2))
+
+ model.to(DEVICE)
+ inputs_ = _generate_fake_inputs(2, 80, 80)
+ model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
+
+ def test_overfit_model_check_no_share_weights(self):
+ model = OverfitModel(
+ share_implicit_function_across_passes=False,
+ coarse_implicit_function_class_type="NeuralRadianceFieldImplicitFunction",
+ coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args={
+ "transformer_dim_down_factor": 1.0,
+ "n_hidden_neurons_xyz": 256,
+ "n_layers_xyz": 8,
+ "append_xyz": (5,),
+ },
+ )
+ for p1, p2 in zip(
+ model.implicit_function.parameters(),
+ model.coarse_implicit_function.parameters(),
+ ):
+ self.assertNotEqual(id(p1), id(p2))
+
+ model.to(DEVICE)
+ inputs_ = _generate_fake_inputs(2, 80, 80)
+ model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
+
+ def test_overfit_model_coarse_implicit_function_is_none(self):
+ model = OverfitModel(
+ share_implicit_function_across_passes=False,
+ coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args=None,
+ )
+ self.assertIsNone(model.coarse_implicit_function)
+ model.to(DEVICE)
+ inputs_ = _generate_fake_inputs(2, 80, 80)
+ model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..17c21c0ebecabfafa0d7801a0e06856d2144c091
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/models/test_utils.py
@@ -0,0 +1,65 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+
+import torch
+
+from pytorch3d.implicitron.models.utils import preprocess_input, weighted_sum_losses
+
+
+class TestUtils(unittest.TestCase):
+ def test_prepare_inputs_wrong_num_dim(self):
+ img = torch.randn(3, 3, 3)
+ text = (
+ "Model received unbatched inputs. "
+ + "Perhaps they came from a FrameData which had not been collated."
+ )
+ with self.assertRaisesRegex(ValueError, text):
+ img, fg_prob, depth_map = preprocess_input(
+ img, None, None, True, True, 0.5, (0.0, 0.0, 0.0)
+ )
+
+ def test_prepare_inputs_mask_image_true(self):
+ batch, channels, height, width = 2, 3, 10, 10
+ img = torch.ones(batch, channels, height, width)
+ # Create a mask on the lower triangular matrix
+ fg_prob = torch.tril(torch.ones(batch, 1, height, width)) * 0.3
+
+ out_img, out_fg_prob, out_depth_map = preprocess_input(
+ img, fg_prob, None, True, False, 0.3, (0.0, 0.0, 0.0)
+ )
+
+ self.assertTrue(torch.equal(out_img, torch.tril(img)))
+ self.assertTrue(torch.equal(out_fg_prob, fg_prob >= 0.3))
+ self.assertIsNone(out_depth_map)
+
+ def test_prepare_inputs_mask_depth_true(self):
+ batch, channels, height, width = 2, 3, 10, 10
+ img = torch.ones(batch, channels, height, width)
+ depth_map = torch.randn(batch, channels, height, width)
+ # Create a mask on the lower triangular matrix
+ fg_prob = torch.tril(torch.ones(batch, 1, height, width)) * 0.3
+
+ out_img, out_fg_prob, out_depth_map = preprocess_input(
+ img, fg_prob, depth_map, False, True, 0.3, (0.0, 0.0, 0.0)
+ )
+
+ self.assertTrue(torch.equal(out_img, img))
+ self.assertTrue(torch.equal(out_fg_prob, fg_prob >= 0.3))
+ self.assertTrue(torch.equal(out_depth_map, torch.tril(depth_map)))
+
+ def test_weighted_sum_losses(self):
+ preds = {"a": torch.tensor(2), "b": torch.tensor(2)}
+ weights = {"a": 2.0, "b": 0.0}
+ loss = weighted_sum_losses(preds, weights)
+ self.assertEqual(loss, 4.0)
+
+ def test_weighted_sum_losses_raise_warning(self):
+ preds = {"a": torch.tensor(2), "b": torch.tensor(2)}
+ weights = {"c": 2.0, "d": 2.0}
+ self.assertIsNone(weighted_sum_losses(preds, weights))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_batch_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_batch_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2ac4a9655f9b0ffcefd72331cf5c8bcca5243f6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_batch_sampler.py
@@ -0,0 +1,268 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+from collections import defaultdict
+from dataclasses import dataclass
+from itertools import product
+
+import numpy as np
+
+import torch
+from pytorch3d.implicitron.dataset.data_loader_map_provider import (
+ DoublePoolBatchSampler,
+)
+
+from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.scene_batch_sampler import SceneBatchSampler
+
+
+@dataclass
+class MockFrameAnnotation:
+ frame_number: int
+ sequence_name: str = "sequence"
+ frame_timestamp: float = 0.0
+
+
+class MockDataset(DatasetBase):
+ def __init__(self, num_seq, max_frame_gap=1):
+ """
+ Makes a gap of max_frame_gap frame numbers in the middle of each sequence
+ """
+ self.seq_annots = {f"seq_{i}": None for i in range(num_seq)}
+ self._seq_to_idx = {
+ f"seq_{i}": list(range(i * 10, i * 10 + 10)) for i in range(num_seq)
+ }
+
+ # frame numbers within sequence: [0, ..., 4, n, ..., n+4]
+ # where n - 4 == max_frame_gap
+ frame_nos = list(range(5)) + list(range(4 + max_frame_gap, 9 + max_frame_gap))
+ self.frame_annots = [
+ {"frame_annotation": MockFrameAnnotation(no)} for no in frame_nos * num_seq
+ ]
+ for seq_name, idx in self._seq_to_idx.items():
+ for i in idx:
+ self.frame_annots[i]["frame_annotation"].sequence_name = seq_name
+
+ def get_frame_numbers_and_timestamps(self, idxs, subset_filter=None):
+ assert subset_filter is None
+ out = []
+ for idx in idxs:
+ frame_annotation = self.frame_annots[idx]["frame_annotation"]
+ out.append(
+ (frame_annotation.frame_number, frame_annotation.frame_timestamp)
+ )
+ return out
+
+ def __getitem__(self, index: int):
+ fa = self.frame_annots[index]["frame_annotation"]
+ fd = FrameData(
+ sequence_name=fa.sequence_name,
+ sequence_category="default_category",
+ frame_number=torch.LongTensor([fa.frame_number]),
+ frame_timestamp=torch.LongTensor([fa.frame_timestamp]),
+ )
+ return fd
+
+
+class TestSceneBatchSampler(unittest.TestCase):
+ def setUp(self):
+ np.random.seed(42)
+ self.dataset_overfit = MockDataset(1)
+
+ def test_overfit(self):
+ num_batches = 3
+ batch_size = 10
+ sampler = SceneBatchSampler(
+ self.dataset_overfit,
+ batch_size=batch_size,
+ num_batches=num_batches,
+ images_per_seq_options=[10], # will try to sample batch_size anyway
+ )
+
+ self.assertEqual(len(sampler), num_batches)
+
+ it = iter(sampler)
+ for _ in range(num_batches):
+ batch = next(it)
+ self.assertIsNotNone(batch)
+ self.assertEqual(len(batch), batch_size) # true for our examples
+ self.assertTrue(all(idx // 10 == 0 for idx in batch))
+
+ with self.assertRaises(StopIteration):
+ batch = next(it)
+
+ def test_multiseq(self):
+ for ips_options in [[10], [2], [3], [2, 3, 4]]:
+ for sample_consecutive_frames in [True, False]:
+ for consecutive_frames_max_gap in [0, 1, 3]:
+ self._test_multiseq_flavour(
+ ips_options,
+ sample_consecutive_frames,
+ consecutive_frames_max_gap,
+ )
+
+ def test_multiseq_gaps(self):
+ num_batches = 16
+ batch_size = 10
+ dataset_multiseq = MockDataset(5, max_frame_gap=3)
+ for ips_options in [[10], [2], [3], [2, 3, 4]]:
+ debug_info = f" Images per sequence: {ips_options}."
+
+ sampler = SceneBatchSampler(
+ dataset_multiseq,
+ batch_size=batch_size,
+ num_batches=num_batches,
+ images_per_seq_options=ips_options,
+ sample_consecutive_frames=True,
+ consecutive_frames_max_gap=1,
+ )
+
+ self.assertEqual(len(sampler), num_batches, msg=debug_info)
+
+ it = iter(sampler)
+ for _ in range(num_batches):
+ batch = next(it)
+ self.assertIsNotNone(batch, "batch is None in" + debug_info)
+ if max(ips_options) > 5:
+ # true for our examples
+ self.assertEqual(len(batch), 5, msg=debug_info)
+ else:
+ # true for our examples
+ self.assertEqual(len(batch), batch_size, msg=debug_info)
+
+ self._check_frames_are_consecutive(
+ batch, dataset_multiseq.frame_annots, debug_info
+ )
+
+ def _test_multiseq_flavour(
+ self,
+ ips_options,
+ sample_consecutive_frames,
+ consecutive_frames_max_gap,
+ num_batches=16,
+ batch_size=10,
+ ):
+ debug_info = (
+ f" Images per sequence: {ips_options}, "
+ f"sample_consecutive_frames: {sample_consecutive_frames}, "
+ f"consecutive_frames_max_gap: {consecutive_frames_max_gap}, "
+ )
+ # in this test, either consecutive_frames_max_gap == max_frame_gap,
+ # or consecutive_frames_max_gap == 0, so segments consist of full sequences
+ frame_gap = consecutive_frames_max_gap if consecutive_frames_max_gap > 0 else 3
+ dataset_multiseq = MockDataset(5, max_frame_gap=frame_gap)
+ sampler = SceneBatchSampler(
+ dataset_multiseq,
+ batch_size=batch_size,
+ num_batches=num_batches,
+ images_per_seq_options=ips_options,
+ sample_consecutive_frames=sample_consecutive_frames,
+ consecutive_frames_max_gap=consecutive_frames_max_gap,
+ )
+
+ self.assertEqual(len(sampler), num_batches, msg=debug_info)
+
+ it = iter(sampler)
+ typical_counts = set()
+ for _ in range(num_batches):
+ batch = next(it)
+ self.assertIsNotNone(batch, "batch is None in" + debug_info)
+ # true for our examples
+ self.assertEqual(len(batch), batch_size, msg=debug_info)
+ # find distribution over sequences
+ counts = _count_by_quotient(batch, 10)
+ freqs = _count_by_quotient(counts.values(), 1)
+ self.assertLessEqual(
+ len(freqs),
+ 2,
+ msg="We should have maximum of 2 different "
+ "frequences of sequences in the batch." + debug_info,
+ )
+ if len(freqs) == 2:
+ most_seq_count = max(*freqs.keys())
+ last_seq = min(*freqs.keys())
+ self.assertEqual(
+ freqs[last_seq],
+ 1,
+ msg="Only one odd sequence allowed." + debug_info,
+ )
+ else:
+ self.assertEqual(len(freqs), 1)
+ most_seq_count = next(iter(freqs))
+
+ self.assertIn(most_seq_count, ips_options)
+ typical_counts.add(most_seq_count)
+
+ if sample_consecutive_frames:
+ self._check_frames_are_consecutive(
+ batch,
+ dataset_multiseq.frame_annots,
+ debug_info,
+ max_gap=consecutive_frames_max_gap,
+ )
+
+ self.assertTrue(
+ all(i in typical_counts for i in ips_options),
+ "Some of the frequency options did not occur among "
+ f"the {num_batches} batches (could be just bad luck)." + debug_info,
+ )
+
+ with self.assertRaises(StopIteration):
+ batch = next(it)
+
+ def _check_frames_are_consecutive(self, batch, annots, debug_info, max_gap=1):
+ # make sure that sampled frames are consecutive
+ for i in range(len(batch) - 1):
+ curr_idx, next_idx = batch[i : i + 2]
+ if curr_idx // 10 == next_idx // 10: # same sequence
+ if max_gap > 0:
+ curr_idx, next_idx = [
+ annots[idx]["frame_annotation"].frame_number
+ for idx in (curr_idx, next_idx)
+ ]
+ gap = max_gap
+ else:
+ gap = 1 # we'll check that raw dataset indices are consecutive
+
+ self.assertLessEqual(next_idx - curr_idx, gap, msg=debug_info)
+
+
+def _count_by_quotient(indices, divisor):
+ counter = defaultdict(int)
+ for i in indices:
+ counter[i // divisor] += 1
+
+ return counter
+
+
+class TestRandomSampling(unittest.TestCase):
+ def test_double_pool_batch_sampler(self):
+ unknown_idxs = [2, 3, 4, 5, 8]
+ known_idxs = [2, 9, 10, 11, 12, 13, 14, 15, 16, 17]
+ for replacement, num_batches in product([True, False], [None, 4, 5, 6, 30]):
+ with self.subTest(f"{replacement}, {num_batches}"):
+ sampler = DoublePoolBatchSampler(
+ first_indices=unknown_idxs,
+ rest_indices=known_idxs,
+ batch_size=4,
+ replacement=replacement,
+ num_batches=num_batches,
+ )
+ for _ in range(6):
+ epoch = list(sampler)
+ self.assertEqual(len(epoch), num_batches or len(unknown_idxs))
+ for batch in epoch:
+ self.assertEqual(len(batch), 4)
+ self.assertIn(batch[0], unknown_idxs)
+ for i in batch[1:]:
+ self.assertIn(i, known_idxs)
+ if not replacement and 4 != num_batches:
+ self.assertEqual(
+ {batch[0] for batch in epoch}, set(unknown_idxs)
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_bbox.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_bbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..08dc119fe06237df7ab4fed65c57bd5b90957825
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_bbox.py
@@ -0,0 +1,142 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import numpy as np
+
+import torch
+
+from pytorch3d.implicitron.dataset.utils import (
+ bbox_xywh_to_xyxy,
+ bbox_xyxy_to_xywh,
+ clamp_box_to_image_bounds_and_round,
+ crop_around_box,
+ get_1d_bounds,
+ get_bbox_from_mask,
+ get_clamp_bbox,
+ rescale_bbox,
+ resize_image,
+)
+
+from tests.common_testing import TestCaseMixin
+
+
+class TestBBox(TestCaseMixin, unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_bbox_conversion(self):
+ bbox_xywh_list = torch.LongTensor(
+ [
+ [0, 0, 10, 20],
+ [10, 20, 5, 1],
+ [10, 20, 1, 1],
+ [5, 4, 0, 1],
+ ]
+ )
+ for bbox_xywh in bbox_xywh_list:
+ bbox_xyxy = bbox_xywh_to_xyxy(bbox_xywh)
+ bbox_xywh_ = bbox_xyxy_to_xywh(bbox_xyxy)
+ bbox_xyxy_ = bbox_xywh_to_xyxy(bbox_xywh_)
+ self.assertClose(bbox_xywh_, bbox_xywh)
+ self.assertClose(bbox_xyxy, bbox_xyxy_)
+
+ def test_compare_to_expected(self):
+ bbox_xywh_to_xyxy_expected = torch.LongTensor(
+ [
+ [[0, 0, 10, 20], [0, 0, 10, 20]],
+ [[10, 20, 5, 1], [10, 20, 15, 21]],
+ [[10, 20, 1, 1], [10, 20, 11, 21]],
+ [[5, 4, 0, 1], [5, 4, 5, 5]],
+ ]
+ )
+ for bbox_xywh, bbox_xyxy_expected in bbox_xywh_to_xyxy_expected:
+ self.assertClose(bbox_xywh_to_xyxy(bbox_xywh), bbox_xyxy_expected)
+ self.assertClose(bbox_xyxy_to_xywh(bbox_xyxy_expected), bbox_xywh)
+
+ clamp_amnt = 3
+ bbox_xywh_to_xyxy_clamped_expected = torch.LongTensor(
+ [
+ [[0, 0, 10, 20], [0, 0, 10, 20]],
+ [[10, 20, 5, 1], [10, 20, 15, 20 + clamp_amnt]],
+ [[10, 20, 1, 1], [10, 20, 10 + clamp_amnt, 20 + clamp_amnt]],
+ [[5, 4, 0, 1], [5, 4, 5 + clamp_amnt, 4 + clamp_amnt]],
+ ]
+ )
+ for bbox_xywh, bbox_xyxy_expected in bbox_xywh_to_xyxy_clamped_expected:
+ self.assertClose(
+ bbox_xywh_to_xyxy(bbox_xywh, clamp_size=clamp_amnt),
+ bbox_xyxy_expected,
+ )
+
+ def test_mask_to_bbox(self):
+ mask = np.array(
+ [
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ ]
+ ).astype(np.float32)
+ expected_bbox_xywh = [2, 1, 2, 1]
+ bbox_xywh = get_bbox_from_mask(mask, 0.5)
+ self.assertClose(bbox_xywh, expected_bbox_xywh)
+
+ def test_crop_around_box(self):
+ bbox = torch.LongTensor([0, 1, 2, 3]) # (x_min, y_min, x_max, y_max)
+ image = torch.LongTensor(
+ [
+ [0, 0, 10, 20],
+ [10, 20, 5, 1],
+ [10, 20, 1, 1],
+ [5, 4, 0, 1],
+ ]
+ )
+ cropped = crop_around_box(image, bbox)
+ self.assertClose(cropped, image[1:3, 0:2])
+
+ def test_clamp_box_to_image_bounds_and_round(self):
+ bbox = torch.LongTensor([0, 1, 10, 12])
+ image_size = (5, 6)
+ expected_clamped_bbox = torch.LongTensor([0, 1, image_size[1], image_size[0]])
+ clamped_bbox = clamp_box_to_image_bounds_and_round(bbox, image_size)
+ self.assertClose(clamped_bbox, expected_clamped_bbox)
+
+ def test_get_clamp_bbox(self):
+ bbox_xywh = torch.LongTensor([1, 1, 4, 5])
+ clamped_bbox_xyxy = get_clamp_bbox(bbox_xywh, box_crop_context=2)
+ # size multiplied by 2 and added coordinates
+ self.assertClose(clamped_bbox_xyxy, torch.Tensor([-3, -4, 9, 11]))
+
+ def test_rescale_bbox(self):
+ bbox = torch.Tensor([0.0, 1.0, 3.0, 4.0])
+ original_resolution = (4, 4)
+ new_resolution = (8, 8) # twice bigger
+ rescaled_bbox = rescale_bbox(bbox, original_resolution, new_resolution)
+ self.assertClose(bbox * 2, rescaled_bbox)
+
+ def test_get_1d_bounds(self):
+ array = [0, 1, 2]
+ bounds = get_1d_bounds(array)
+ # make nonzero 1d bounds of image
+ self.assertClose(bounds, [1, 3])
+
+ def test_resize_image(self):
+ image = np.random.rand(3, 300, 500) # rgb image 300x500
+ expected_shape = (150, 250)
+
+ resized_image, scale, mask_crop = resize_image(
+ image, image_height=expected_shape[0], image_width=expected_shape[1]
+ )
+
+ original_shape = image.shape[-2:]
+ expected_scale = min(
+ expected_shape[0] / original_shape[0], expected_shape[1] / original_shape[1]
+ )
+
+ self.assertEqual(scale, expected_scale)
+ self.assertEqual(resized_image.shape[-2:], expected_shape)
+ self.assertEqual(mask_crop.shape[-2:], expected_shape)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_build.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_build.py
new file mode 100644
index 0000000000000000000000000000000000000000..646497f5ddc765e7015b31ab396dc635ebb9c1b8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_build.py
@@ -0,0 +1,51 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import importlib
+import os
+import sys
+import unittest
+import unittest.mock
+
+from tests.common_testing import get_pytorch3d_dir
+
+
+# This file groups together tests which look at the code without running it.
+class TestBuild(unittest.TestCase):
+ def test_no_import_cycles(self):
+ # Check each module of pytorch3d imports cleanly,
+ # which may fail if there are import cycles.
+
+ with unittest.mock.patch.dict(sys.modules):
+ for module in list(sys.modules):
+ # If any of pytorch3d is already imported,
+ # the test would be pointless.
+ if module.startswith("pytorch3d"):
+ sys.modules.pop(module, None)
+
+ # torchvision seems to cause problems if re-imported,
+ # so make sure it has been imported here.
+ import torchvision.utils # noqa
+
+ root_dir = get_pytorch3d_dir() / "pytorch3d"
+ # Exclude opengl-related files, as Implicitron is decoupled from opengl
+ # components which will not work without adding a dep on pytorch3d_opengl.
+ ignored_modules = (
+ "__init__",
+ "plotly_vis",
+ "opengl_utils",
+ "rasterizer_opengl",
+ )
+ if os.environ.get("FB_TEST", False):
+ ignored_modules += ("orm_types", "sql_dataset", "sql_dataset_provider")
+ for module_file in root_dir.glob("**/*.py"):
+ if module_file.stem in ignored_modules:
+ continue
+ relative_module = str(module_file.relative_to(root_dir))[:-3]
+ module = "pytorch3d." + relative_module.replace("/", ".")
+ with self.subTest(name=module):
+ with unittest.mock.patch.dict(sys.modules):
+ importlib.import_module(module)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_circle_fitting.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_circle_fitting.py
new file mode 100644
index 0000000000000000000000000000000000000000..479e692cadd748fa193f974bb3b942522183055d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_circle_fitting.py
@@ -0,0 +1,198 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+from math import pi
+
+import torch
+from pytorch3d.implicitron.tools.circle_fitting import (
+ _signed_area,
+ fit_circle_in_2d,
+ fit_circle_in_3d,
+ get_rotation_to_best_fit_xy,
+)
+from pytorch3d.transforms import random_rotation, random_rotations
+from tests.common_testing import TestCaseMixin
+
+
+class TestCircleFitting(TestCaseMixin, unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def _assertParallel(self, a, b, **kwargs):
+ """
+ Given a and b of shape (..., 3) each containing 3D vectors,
+ assert that correspnding vectors are parallel. Changed sign is ok.
+ """
+ self.assertClose(torch.cross(a, b, dim=-1), torch.zeros_like(a), **kwargs)
+
+ def test_plane_levelling(self):
+ device = torch.device("cuda:0")
+ B = 16
+ N = 1024
+ random = torch.randn((B, N, 3), device=device)
+
+ # first, check that we always return a vaild rotation
+ rot = get_rotation_to_best_fit_xy(random)
+ self.assertClose(rot.det(), torch.ones_like(rot[:, 0, 0]))
+ self.assertClose(rot.norm(dim=-1), torch.ones_like(rot[:, 0]))
+
+ # then, check the result is what we expect
+ z_squeeze = 0.1
+ random[..., -1] *= z_squeeze
+ rot_gt = random_rotations(B, device=device)
+ rotated = random @ rot_gt.transpose(-1, -2)
+ rot_hat = get_rotation_to_best_fit_xy(rotated)
+ self.assertClose(rot.det(), torch.ones_like(rot[:, 0, 0]))
+ self.assertClose(rot.norm(dim=-1), torch.ones_like(rot[:, 0]))
+ # covariance matrix of the levelled points is by design diag(1, 1, z_squeeze²)
+ self.assertClose(
+ (rotated @ rot_hat)[..., -1].std(dim=-1),
+ torch.ones_like(rot_hat[:, 0, 0]) * z_squeeze,
+ rtol=0.1,
+ )
+
+ def test_simple_3d(self):
+ device = torch.device("cuda:0")
+ for _ in range(7):
+ radius = 10 * torch.rand(1, device=device)[0]
+ center = 10 * torch.rand(3, device=device)
+ rot = random_rotation(device=device)
+ offset = torch.rand(3, device=device)
+ up = torch.rand(3, device=device)
+ self._simple_3d_test(radius, center, rot, offset, up)
+
+ def _simple_3d_test(self, radius, center, rot, offset, up):
+ # angles are increasing so the points move in a well defined direction.
+ angles = torch.cumsum(torch.rand(17, device=rot.device), dim=0)
+ many = torch.stack(
+ [torch.cos(angles), torch.sin(angles), torch.zeros_like(angles)], dim=1
+ )
+ source_points = (many * radius) @ rot + center[None]
+
+ # case with no generation
+ result = fit_circle_in_3d(source_points)
+ self.assertClose(result.radius, radius)
+ self.assertClose(result.center, center)
+ self._assertParallel(result.normal, rot[2], atol=1e-5)
+ self.assertEqual(result.generated_points.shape, (0, 3))
+
+ # Generate 5 points around the circle
+ n_new_points = 5
+ result2 = fit_circle_in_3d(source_points, n_points=n_new_points)
+ self.assertClose(result2.radius, radius)
+ self.assertClose(result2.center, center)
+ self.assertClose(result2.normal, result.normal)
+ self.assertEqual(result2.generated_points.shape, (5, 3))
+
+ observed_points = result2.generated_points
+ self.assertClose(observed_points[0], observed_points[4], atol=1e-4)
+ self.assertClose(observed_points[0], source_points[0], atol=1e-5)
+ observed_normal = torch.cross(
+ observed_points[0] - observed_points[2],
+ observed_points[1] - observed_points[3],
+ dim=-1,
+ )
+ self._assertParallel(observed_normal, result.normal, atol=1e-4)
+ diameters = observed_points[:2] - observed_points[2:4]
+ self.assertClose(
+ torch.norm(diameters, dim=1), diameters.new_full((2,), 2 * radius)
+ )
+
+ # Regenerate the input points
+ result3 = fit_circle_in_3d(source_points, angles=angles - angles[0])
+ self.assertClose(result3.radius, radius)
+ self.assertClose(result3.center, center)
+ self.assertClose(result3.normal, result.normal)
+ self.assertClose(result3.generated_points, source_points, atol=1e-5)
+
+ # Test with offset
+ result4 = fit_circle_in_3d(
+ source_points, angles=angles - angles[0], offset=offset, up=up
+ )
+ self.assertClose(result4.radius, radius)
+ self.assertClose(result4.center, center)
+ self.assertClose(result4.normal, result.normal)
+ observed_offsets = result4.generated_points - source_points
+
+ # observed_offset is constant
+ self.assertClose(
+ observed_offsets.min(0).values, observed_offsets.max(0).values, atol=1e-5
+ )
+ # observed_offset has the right length
+ self.assertClose(observed_offsets[0].norm(), offset.norm())
+
+ self.assertClose(result.normal.norm(), torch.ones(()))
+ # component of observed_offset along normal
+ component = torch.dot(observed_offsets[0], result.normal)
+ self.assertClose(component.abs(), offset[2].abs(), atol=1e-5)
+ agree_normal = torch.dot(result.normal, up) > 0
+ agree_signs = component * offset[2] > 0
+ self.assertEqual(agree_normal, agree_signs)
+
+ def test_simple_2d(self):
+ radius = 7.0
+ center = torch.tensor([9, 2.5])
+ angles = torch.cumsum(torch.rand(17), dim=0)
+ many = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1)
+ source_points = (many * radius) + center[None]
+
+ result = fit_circle_in_2d(source_points)
+ self.assertClose(result.radius, torch.tensor(radius))
+ self.assertClose(result.center, center)
+ self.assertEqual(result.generated_points.shape, (0, 2))
+
+ # Generate 5 points around the circle
+ n_new_points = 5
+ result2 = fit_circle_in_2d(source_points, n_points=n_new_points)
+ self.assertClose(result2.radius, torch.tensor(radius))
+ self.assertClose(result2.center, center)
+ self.assertEqual(result2.generated_points.shape, (5, 2))
+
+ observed_points = result2.generated_points
+ self.assertClose(observed_points[0], observed_points[4])
+ self.assertClose(observed_points[0], source_points[0], atol=1e-5)
+ diameters = observed_points[:2] - observed_points[2:4]
+ self.assertClose(torch.norm(diameters, dim=1), torch.full((2,), 2 * radius))
+
+ # Regenerate the input points
+ result3 = fit_circle_in_2d(source_points, angles=angles - angles[0])
+ self.assertClose(result3.radius, torch.tensor(radius))
+ self.assertClose(result3.center, center)
+ self.assertClose(result3.generated_points, source_points, atol=1e-5)
+
+ def test_minimum_inputs(self):
+ fit_circle_in_3d(torch.rand(3, 3), n_points=10)
+
+ with self.assertRaisesRegex(
+ ValueError, "2 points are not enough to determine a circle"
+ ):
+ fit_circle_in_3d(torch.rand(2, 3))
+
+ def test_signed_area(self):
+ n_points = 1001
+ angles = torch.linspace(0, 2 * pi, n_points)
+ radius = 0.85
+ center = torch.rand(2)
+ circle = center + radius * torch.stack(
+ [torch.cos(angles), torch.sin(angles)], dim=1
+ )
+ circle_area = torch.tensor(pi * radius * radius)
+ self.assertClose(_signed_area(circle), circle_area)
+ # clockwise is negative
+ self.assertClose(_signed_area(circle.flip(0)), -circle_area)
+
+ # Semicircles
+ self.assertClose(_signed_area(circle[: (n_points + 1) // 2]), circle_area / 2)
+ self.assertClose(_signed_area(circle[n_points // 2 :]), circle_area / 2)
+
+ # A straight line bounds no area
+ self.assertClose(_signed_area(torch.rand(2, 2)), torch.tensor(0.0))
+
+ # Letter 'L' written anticlockwise.
+ L_shape = [[0, 1], [0, 0], [1, 0]]
+ # Triangle area is 0.5 * b * h.
+ self.assertClose(_signed_area(torch.tensor(L_shape)), torch.tensor(0.5))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_co3d_sql.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_co3d_sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f873cfc3466c483b3025ade5c752fa8bb64d131
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_co3d_sql.py
@@ -0,0 +1,246 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+import os
+import unittest
+
+import torch
+
+from pytorch3d.implicitron.dataset.data_loader_map_provider import ( # noqa
+ SequenceDataLoaderMapProvider,
+ SimpleDataLoaderMapProvider,
+)
+from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
+from pytorch3d.implicitron.dataset.sql_dataset import SqlIndexDataset # noqa
+from pytorch3d.implicitron.dataset.sql_dataset_provider import ( # noqa
+ SqlIndexDatasetMapProvider,
+)
+from pytorch3d.implicitron.dataset.train_eval_data_loader_provider import (
+ TrainEvalDataLoaderMapProvider,
+)
+from pytorch3d.implicitron.tools.config import get_default_args
+
+logger = logging.getLogger("pytorch3d.implicitron.dataset.sql_dataset")
+sh = logging.StreamHandler()
+logger.addHandler(sh)
+logger.setLevel(logging.DEBUG)
+
+_CO3D_SQL_DATASET_ROOT: str = os.getenv("CO3D_SQL_DATASET_ROOT", "")
+
+
+@unittest.skipUnless(_CO3D_SQL_DATASET_ROOT, "Run only if CO3D is available")
+class TestCo3dSqlDataSource(unittest.TestCase):
+ def test_no_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.ignore_subsets = True
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.pick_categories = ["skateboard"]
+ dataset_args.limit_sequences_to = 1
+
+ data_source = ImplicitronDataSource(**args)
+ self.assertIsInstance(
+ data_source.data_loader_map_provider, TrainEvalDataLoaderMapProvider
+ )
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 202)
+ for frame in data_loaders.train:
+ self.assertIsNone(frame.frame_type)
+ self.assertEqual(frame.image_rgb.shape[-1], 800) # check loading blobs
+ break
+
+ def test_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = (
+ "skateboard/set_lists/set_lists_manyview_dev_0.json"
+ )
+ # this will naturally limit to one sequence (no need to limit by cat/sequence)
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+
+ for sampler_type in [
+ "SimpleDataLoaderMapProvider",
+ "SequenceDataLoaderMapProvider",
+ "TrainEvalDataLoaderMapProvider",
+ ]:
+ args.data_loader_map_provider_class_type = sampler_type
+ data_source = ImplicitronDataSource(**args)
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 102)
+ self.assertEqual(len(data_loaders.val), 100)
+ self.assertEqual(len(data_loaders.test), 100)
+ for split in ["train", "val", "test"]:
+ for frame in data_loaders[split]:
+ self.assertEqual(frame.frame_type, [split])
+ # check loading blobs
+ self.assertEqual(frame.image_rgb.shape[-1], 800)
+ break
+
+ def test_sql_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = "set_lists/set_lists_manyview_dev_0.sqlite"
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+ dataset_args.pick_categories = ["skateboard"]
+
+ for sampler_type in [
+ "SimpleDataLoaderMapProvider",
+ "SequenceDataLoaderMapProvider",
+ "TrainEvalDataLoaderMapProvider",
+ ]:
+ args.data_loader_map_provider_class_type = sampler_type
+ data_source = ImplicitronDataSource(**args)
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 102)
+ self.assertEqual(len(data_loaders.val), 100)
+ self.assertEqual(len(data_loaders.test), 100)
+ for split in ["train", "val", "test"]:
+ for frame in data_loaders[split]:
+ self.assertEqual(frame.frame_type, [split])
+ self.assertEqual(
+ frame.image_rgb.shape[-1], 800
+ ) # check loading blobs
+ break
+
+ @unittest.skip("It takes 75 seconds; skipping by default")
+ def test_huge_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = "set_lists/set_lists_fewview_dev.sqlite"
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+
+ data_source = ImplicitronDataSource(**args)
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 3158974)
+ self.assertEqual(len(data_loaders.val), 518417)
+ self.assertEqual(len(data_loaders.test), 518417)
+ for split in ["train", "val", "test"]:
+ for frame in data_loaders[split]:
+ self.assertEqual(frame.frame_type, [split])
+ self.assertEqual(frame.image_rgb.shape[-1], 800) # check loading blobs
+ break
+
+ def test_broken_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = "et_non_est"
+ provider_args.dataset_SqlIndexDataset_args.pick_categories = ["skateboard"]
+ with self.assertRaises(FileNotFoundError) as err:
+ ImplicitronDataSource(**args)
+
+ # check the hint text
+ self.assertIn("Subset lists path given but not found", str(err.exception))
+
+ def test_eval_batches(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = "set_lists/set_lists_manyview_dev_0.sqlite"
+ provider_args.eval_batches_path = (
+ "skateboard/eval_batches/eval_batches_manyview_dev_0.json"
+ )
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+ dataset_args.pick_categories = ["skateboard"]
+
+ data_source = ImplicitronDataSource(**args)
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 102)
+ self.assertEqual(len(data_loaders.val), 100)
+ self.assertEqual(len(data_loaders.test), 50)
+ for split in ["train", "val", "test"]:
+ for frame in data_loaders[split]:
+ self.assertEqual(frame.frame_type, [split])
+ self.assertEqual(frame.image_rgb.shape[-1], 800) # check loading blobs
+ break
+
+ def test_eval_batches_from_subset_list_name(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_list_name = "manyview_dev_0"
+ provider_args.category = "skateboard"
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+
+ data_source = ImplicitronDataSource(**args)
+ dataset, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertListEqual(list(dataset.train.pick_categories), ["skateboard"])
+ self.assertEqual(len(data_loaders.train), 102)
+ self.assertEqual(len(data_loaders.val), 100)
+ self.assertEqual(len(data_loaders.test), 50)
+ for split in ["train", "val", "test"]:
+ for frame in data_loaders[split]:
+ self.assertEqual(frame.frame_type, [split])
+ self.assertEqual(frame.image_rgb.shape[-1], 800) # check loading blobs
+ break
+
+ def test_frame_access(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "SqlIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "TrainEvalDataLoaderMapProvider"
+ provider_args = args.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ provider_args.subset_lists_path = "set_lists/set_lists_manyview_dev_0.sqlite"
+
+ dataset_args = provider_args.dataset_SqlIndexDataset_args
+ dataset_args.remove_empty_masks = True
+ dataset_args.pick_categories = ["skateboard"]
+ frame_builder_args = dataset_args.frame_data_builder_FrameDataBuilder_args
+ frame_builder_args.load_point_clouds = True
+ frame_builder_args.box_crop = False # required for .meta
+
+ data_source = ImplicitronDataSource(**args)
+ dataset_map, _ = data_source.get_datasets_and_dataloaders()
+ dataset = dataset_map["train"]
+
+ for idx in [10, ("245_26182_52130", 22)]:
+ example_meta = dataset.meta[idx]
+ example = dataset[idx]
+
+ self.assertIsNone(example_meta.image_rgb)
+ self.assertIsNone(example_meta.fg_probability)
+ self.assertIsNone(example_meta.depth_map)
+ self.assertIsNone(example_meta.sequence_point_cloud)
+ self.assertIsNotNone(example_meta.camera)
+
+ self.assertIsNotNone(example.image_rgb)
+ self.assertIsNotNone(example.fg_probability)
+ self.assertIsNotNone(example.depth_map)
+ self.assertIsNotNone(example.sequence_point_cloud)
+ self.assertIsNotNone(example.camera)
+
+ self.assertEqual(example_meta.sequence_name, example.sequence_name)
+ self.assertEqual(example_meta.frame_number, example.frame_number)
+ self.assertEqual(example_meta.frame_timestamp, example.frame_timestamp)
+ self.assertEqual(example_meta.sequence_category, example.sequence_category)
+ torch.testing.assert_close(example_meta.camera.R, example.camera.R)
+ torch.testing.assert_close(example_meta.camera.T, example.camera.T)
+ torch.testing.assert_close(
+ example_meta.camera.focal_length, example.camera.focal_length
+ )
+ torch.testing.assert_close(
+ example_meta.camera.principal_point, example.camera.principal_point
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..471451b450e4704f4dbd6ad40b2dd9269c1561b9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config.py
@@ -0,0 +1,921 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import pickle
+import textwrap
+import unittest
+from dataclasses import dataclass, field, is_dataclass
+from enum import Enum
+from typing import Any, Dict, List, Optional, Tuple
+from unittest.mock import Mock
+
+from omegaconf import DictConfig, ListConfig, OmegaConf, ValidationError
+from pytorch3d.implicitron.tools.config import (
+ _get_type_to_process,
+ _is_actually_dataclass,
+ _ProcessType,
+ _Registry,
+ Configurable,
+ enable_get_default_args,
+ expand_args_fields,
+ get_default_args,
+ get_default_args_field,
+ registry,
+ remove_unused_components,
+ ReplaceableBase,
+ run_auto_creation,
+)
+
+
+@dataclass
+class Animal(ReplaceableBase):
+ pass
+
+
+class Fruit(ReplaceableBase):
+ pass
+
+
+@registry.register
+class Banana(Fruit):
+ pips: int
+ spots: int
+ bananame: str
+
+
+@registry.register
+class Pear(Fruit):
+ n_pips: int = 13
+
+
+class Pineapple(Fruit):
+ pass
+
+
+@registry.register
+class Orange(Fruit):
+ pass
+
+
+@registry.register
+class Kiwi(Fruit):
+ pass
+
+
+@registry.register
+class LargePear(Pear):
+ pass
+
+
+class BoringConfigurable(Configurable):
+ pass
+
+
+class MainTest(Configurable):
+ the_fruit: Fruit
+ n_ids: int
+ n_reps: int = 8
+ the_second_fruit: Fruit
+
+ def create_the_second_fruit(self):
+ expand_args_fields(Pineapple)
+ self.the_second_fruit = Pineapple()
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+
+class TestConfig(unittest.TestCase):
+ def test_is_actually_dataclass(self):
+ @dataclass
+ class A:
+ pass
+
+ self.assertTrue(_is_actually_dataclass(A))
+ self.assertTrue(is_dataclass(A))
+
+ class B(A):
+ a: int
+
+ self.assertFalse(_is_actually_dataclass(B))
+ self.assertTrue(is_dataclass(B))
+
+ def test_get_type_to_process(self):
+ gt = _get_type_to_process
+ self.assertIsNone(gt(int))
+ self.assertEqual(gt(Fruit), (Fruit, _ProcessType.REPLACEABLE))
+ self.assertEqual(
+ gt(Optional[Fruit]), (Fruit, _ProcessType.OPTIONAL_REPLACEABLE)
+ )
+ self.assertEqual(gt(MainTest), (MainTest, _ProcessType.CONFIGURABLE))
+ self.assertEqual(
+ gt(Optional[MainTest]), (MainTest, _ProcessType.OPTIONAL_CONFIGURABLE)
+ )
+ self.assertIsNone(gt(Optional[int]))
+ self.assertIsNone(gt(Tuple[Fruit]))
+ self.assertIsNone(gt(Tuple[Fruit, Animal]))
+ self.assertIsNone(gt(Optional[List[int]]))
+
+ def test_simple_replacement(self):
+ struct = get_default_args(MainTest)
+ struct.n_ids = 9780
+ struct.the_fruit_Pear_args.n_pips = 3
+ struct.the_fruit_class_type = "Pear"
+ struct.the_second_fruit_class_type = "Pear"
+
+ main = MainTest(**struct)
+ self.assertIsInstance(main.the_fruit, Pear)
+ self.assertEqual(main.n_reps, 8)
+ self.assertEqual(main.n_ids, 9780)
+ self.assertEqual(main.the_fruit.n_pips, 3)
+ self.assertIsInstance(main.the_second_fruit, Pineapple)
+
+ struct2 = get_default_args(MainTest)
+ self.assertEqual(struct2.the_fruit_Pear_args.n_pips, 13)
+
+ self.assertEqual(
+ MainTest._creation_functions,
+ ("create_the_fruit", "create_the_second_fruit"),
+ )
+
+ def test_detect_bases(self):
+ # testing the _base_class_from_class function
+ self.assertIsNone(_Registry._base_class_from_class(ReplaceableBase))
+ self.assertIsNone(_Registry._base_class_from_class(MainTest))
+ self.assertIs(_Registry._base_class_from_class(Fruit), Fruit)
+ self.assertIs(_Registry._base_class_from_class(Pear), Fruit)
+
+ class PricklyPear(Pear):
+ pass
+
+ self.assertIs(_Registry._base_class_from_class(PricklyPear), Fruit)
+
+ def test_registry_entries(self):
+ self.assertIs(registry.get(Fruit, "Banana"), Banana)
+ with self.assertRaisesRegex(ValueError, "Banana has not been registered."):
+ registry.get(Animal, "Banana")
+ with self.assertRaisesRegex(ValueError, "PricklyPear has not been registered."):
+ registry.get(Fruit, "PricklyPear")
+
+ self.assertIs(registry.get(Pear, "Pear"), Pear)
+ self.assertIs(registry.get(Pear, "LargePear"), LargePear)
+ with self.assertRaisesRegex(ValueError, "Banana resolves to"):
+ registry.get(Pear, "Banana")
+
+ all_fruit = set(registry.get_all(Fruit))
+ self.assertIn(Banana, all_fruit)
+ self.assertIn(Pear, all_fruit)
+ self.assertIn(LargePear, all_fruit)
+ self.assertEqual(registry.get_all(Pear), [LargePear])
+
+ @registry.register
+ class Apple(Fruit):
+ pass
+
+ @registry.register
+ class CrabApple(Apple):
+ pass
+
+ self.assertEqual(registry.get_all(Apple), [CrabApple])
+
+ self.assertIs(registry.get(Fruit, "CrabApple"), CrabApple)
+
+ with self.assertRaisesRegex(ValueError, "Cannot tell what it is."):
+
+ @registry.register
+ class NotAFruit:
+ pass
+
+ def test_recursion(self):
+ class Shape(ReplaceableBase):
+ pass
+
+ @registry.register
+ class Triangle(Shape):
+ a: float = 5.0
+
+ @registry.register
+ class Square(Shape):
+ a: float = 3.0
+
+ @registry.register
+ class LargeShape(Shape):
+ inner: Shape
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ class ShapeContainer(Configurable):
+ shape: Shape
+
+ container = ShapeContainer(**get_default_args(ShapeContainer))
+ # This is because ShapeContainer is missing __post_init__
+ with self.assertRaises(AttributeError):
+ container.shape
+
+ class ShapeContainer2(Configurable):
+ x: Shape
+ x_class_type: str = "LargeShape"
+
+ def __post_init__(self):
+ self.x_LargeShape_args.inner_class_type = "Triangle"
+ run_auto_creation(self)
+
+ container2_args = get_default_args(ShapeContainer2)
+ container2_args.x_LargeShape_args.inner_Triangle_args.a += 10
+ self.assertIn("inner_Square_args", container2_args.x_LargeShape_args)
+ # We do not perform expansion that would result in an infinite recursion,
+ # so this member is not present.
+ self.assertNotIn("inner_LargeShape_args", container2_args.x_LargeShape_args)
+ container2_args.x_LargeShape_args.inner_Square_args.a += 100
+ container2 = ShapeContainer2(**container2_args)
+ self.assertIsInstance(container2.x, LargeShape)
+ self.assertIsInstance(container2.x.inner, Triangle)
+ self.assertEqual(container2.x.inner.a, 15.0)
+
+ def test_simpleclass_member(self):
+ # Members which are not dataclasses are
+ # tolerated. But it would be nice to be able to
+ # configure them.
+ class Foo:
+ def __init__(self, a: Any = 1, b: Any = 2):
+ self.a, self.b = a, b
+
+ enable_get_default_args(Foo)
+
+ @dataclass()
+ class Bar:
+ aa: int = 9
+ bb: int = 9
+
+ class Container(Configurable):
+ bar: Bar = Bar()
+ # TODO make this work?
+ # foo: Foo = Foo()
+ fruit: Fruit
+ fruit_class_type: str = "Orange"
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ self.assertEqual(get_default_args(Foo), {"a": 1, "b": 2})
+ container_args = get_default_args(Container)
+ container = Container(**container_args)
+ self.assertIsInstance(container.fruit, Orange)
+ self.assertEqual(Container._processed_members, {"fruit": Fruit})
+ self.assertEqual(container._processed_members, {"fruit": Fruit})
+
+ container_defaulted = Container()
+ container_defaulted.fruit_Pear_args.n_pips += 4
+
+ container_args2 = get_default_args(Container)
+ container = Container(**container_args2)
+ self.assertEqual(container.fruit_Pear_args.n_pips, 13)
+
+ def test_inheritance(self):
+ # Also exercises optional replaceables
+ class FruitBowl(ReplaceableBase):
+ main_fruit: Fruit
+ main_fruit_class_type: str = "Orange"
+
+ def __post_init__(self):
+ raise ValueError("This doesn't get called")
+
+ class LargeFruitBowl(FruitBowl):
+ extra_fruit: Optional[Fruit]
+ extra_fruit_class_type: str = "Kiwi"
+ no_fruit: Optional[Fruit]
+ no_fruit_class_type: Optional[str] = None
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ large_args = get_default_args(LargeFruitBowl)
+ self.assertNotIn("extra_fruit", large_args)
+ self.assertNotIn("main_fruit", large_args)
+ large = LargeFruitBowl(**large_args)
+ self.assertIsInstance(large.main_fruit, Orange)
+ self.assertIsInstance(large.extra_fruit, Kiwi)
+ self.assertIsNone(large.no_fruit)
+ self.assertIn("no_fruit_Kiwi_args", large_args)
+
+ remove_unused_components(large_args)
+ large2 = LargeFruitBowl(**large_args)
+ self.assertIsInstance(large2.main_fruit, Orange)
+ self.assertIsInstance(large2.extra_fruit, Kiwi)
+ self.assertIsNone(large2.no_fruit)
+ needed_args = [
+ "extra_fruit_Kiwi_args",
+ "extra_fruit_class_type",
+ "main_fruit_Orange_args",
+ "main_fruit_class_type",
+ "no_fruit_class_type",
+ ]
+ self.assertEqual(sorted(large_args.keys()), needed_args)
+
+ with self.assertRaisesRegex(ValueError, "NotAFruit has not been registered."):
+ LargeFruitBowl(extra_fruit_class_type="NotAFruit")
+
+ def test_inheritance2(self):
+ # This is a case where a class could contain an instance
+ # of a subclass, which is ignored.
+ class Parent(ReplaceableBase):
+ pass
+
+ class Main(Configurable):
+ parent: Parent
+ # Note - no __post__init__
+
+ @registry.register
+ class Derived(Parent, Main):
+ pass
+
+ args = get_default_args(Main)
+ # Derived has been ignored in processing Main.
+ self.assertCountEqual(args.keys(), ["parent_class_type"])
+
+ main = Main(**args)
+
+ with self.assertRaisesRegex(ValueError, "UNDEFAULTED has not been registered."):
+ run_auto_creation(main)
+
+ main.parent_class_type = "Derived"
+ # Illustrates that a dict works fine instead of a DictConfig.
+ main.parent_Derived_args = {}
+ with self.assertRaises(AttributeError):
+ main.parent
+ run_auto_creation(main)
+ self.assertIsInstance(main.parent, Derived)
+
+ def test_redefine(self):
+ class FruitBowl(ReplaceableBase):
+ main_fruit: Fruit
+ main_fruit_class_type: str = "Grape"
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ @registry.register
+ @dataclass
+ class Grape(Fruit):
+ large: bool = False
+
+ def get_color(self):
+ return "red"
+
+ def __post_init__(self):
+ raise ValueError("This doesn't get called")
+
+ bowl_args = get_default_args(FruitBowl)
+
+ @registry.register
+ @dataclass
+ class Grape(Fruit): # noqa: F811
+ large: bool = True
+
+ def get_color(self):
+ return "green"
+
+ with self.assertWarnsRegex(
+ UserWarning, "New implementation of Grape is being chosen."
+ ):
+ defaulted_bowl = FruitBowl()
+ self.assertIsInstance(defaulted_bowl.main_fruit, Grape)
+ self.assertEqual(defaulted_bowl.main_fruit.large, True)
+ self.assertEqual(defaulted_bowl.main_fruit.get_color(), "green")
+
+ with self.assertWarnsRegex(
+ UserWarning, "New implementation of Grape is being chosen."
+ ):
+ args_bowl = FruitBowl(**bowl_args)
+ self.assertIsInstance(args_bowl.main_fruit, Grape)
+ # Redefining the same class won't help with defaults because encoded in args
+ self.assertEqual(args_bowl.main_fruit.large, False)
+ # But the override worked.
+ self.assertEqual(args_bowl.main_fruit.get_color(), "green")
+
+ # 2. Try redefining without the dataclass modifier
+ # This relies on the fact that default creation processes the class.
+ # (otherwise incomprehensible messages)
+ @registry.register
+ class Grape(Fruit): # noqa: F811
+ large: bool = True
+
+ with self.assertWarnsRegex(
+ UserWarning, "New implementation of Grape is being chosen."
+ ):
+ FruitBowl(**bowl_args)
+
+ # 3. Adding a new class doesn't get picked up, because the first
+ # get_default_args call has frozen FruitBowl. This is intrinsic to
+ # the way dataclass and expand_args_fields work in-place but
+ # expand_args_fields is not pure - it depends on the registry.
+ @registry.register
+ class Fig(Fruit):
+ pass
+
+ bowl_args2 = get_default_args(FruitBowl)
+ self.assertIn("main_fruit_Grape_args", bowl_args2)
+ self.assertNotIn("main_fruit_Fig_args", bowl_args2)
+
+ # TODO Is it possible to make this work?
+ # bowl_args2["main_fruit_Fig_args"] = get_default_args(Fig)
+ # bowl_args2.main_fruit_class_type = "Fig"
+ # bowl2 = FruitBowl(**bowl_args2) <= unexpected argument
+
+ # Note that it is possible to use Fig if you can set
+ # bowl2.main_fruit_Fig_args explicitly (not in bowl_args2)
+ # before run_auto_creation happens. See test_inheritance2
+ # for an example.
+
+ def test_no_replacement(self):
+ # Test of Configurables without ReplaceableBase
+ class A(Configurable):
+ n: int = 9
+
+ class B(Configurable):
+ a: A
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ class C(Configurable):
+ b1: B
+ b2: Optional[B]
+ b3: Optional[B]
+ b2_enabled: bool = True
+ b3_enabled: bool = False
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ c_args = get_default_args(C)
+ c = C(**c_args)
+ self.assertIsInstance(c.b1.a, A)
+ self.assertEqual(c.b1.a.n, 9)
+ self.assertFalse(hasattr(c, "b1_enabled"))
+ self.assertIsInstance(c.b2.a, A)
+ self.assertEqual(c.b2.a.n, 9)
+ self.assertTrue(c.b2_enabled)
+ self.assertIsNone(c.b3)
+ self.assertFalse(c.b3_enabled)
+
+ def test_doc(self):
+ # The case in the docstring.
+ class A(ReplaceableBase):
+ k: int = 1
+
+ @registry.register
+ class A1(A):
+ m: int = 3
+
+ @registry.register
+ class A2(A):
+ n: str = "2"
+
+ class B(Configurable):
+ a: A
+ a_class_type: str = "A2"
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ b_args = get_default_args(B)
+ self.assertNotIn("a", b_args)
+ b = B(**b_args)
+ self.assertEqual(b.a.n, "2")
+
+ def test_raw_types(self):
+ @dataclass
+ class MyDataclass:
+ int_field: int = 0
+ none_field: Optional[int] = None
+ float_field: float = 9.3
+ bool_field: bool = True
+ tuple_field: Tuple[int, ...] = (3,)
+
+ class SimpleClass:
+ def __init__(
+ self,
+ tuple_member_: Tuple[int, int] = (3, 4),
+ ):
+ self.tuple_member = tuple_member_
+
+ def get_tuple(self):
+ return self.tuple_member
+
+ enable_get_default_args(SimpleClass)
+
+ def f(*, a: int = 3, b: str = "kj"):
+ self.assertEqual(a, 3)
+ self.assertEqual(b, "kj")
+
+ enable_get_default_args(f)
+
+ class C(Configurable):
+ simple: DictConfig = get_default_args_field(SimpleClass)
+ # simple2: SimpleClass2 = SimpleClass2()
+ mydata: DictConfig = get_default_args_field(MyDataclass)
+ a_tuple: Tuple[float] = (4.0, 3.0)
+ f_args: DictConfig = get_default_args_field(f)
+
+ args = get_default_args(C)
+ c = C(**args)
+ self.assertCountEqual(args.keys(), ["simple", "mydata", "a_tuple", "f_args"])
+
+ mydata = MyDataclass(**c.mydata)
+ simple = SimpleClass(**c.simple)
+
+ # OmegaConf converts tuples to ListConfigs (which act like lists).
+ self.assertEqual(simple.get_tuple(), [3, 4])
+ self.assertTrue(isinstance(simple.get_tuple(), ListConfig))
+ # get_default_args converts sets to ListConfigs (which act like lists).
+ self.assertEqual(c.a_tuple, [4.0, 3.0])
+ self.assertTrue(isinstance(c.a_tuple, ListConfig))
+ self.assertEqual(mydata.tuple_field, (3,))
+ self.assertTrue(isinstance(mydata.tuple_field, ListConfig))
+ f(**c.f_args)
+
+ def test_irrelevant_bases(self):
+ class NotADataclass:
+ # Like torch.nn.Module, this class contains annotations
+ # but is not designed to be dataclass'd.
+ # This test ensures that such classes, when inherited fron,
+ # are not accidentally affected by expand_args_fields.
+ a: int = 9
+ b: int
+
+ class LeftConfigured(Configurable, NotADataclass):
+ left: int = 1
+
+ class RightConfigured(NotADataclass, Configurable):
+ right: int = 2
+
+ class Outer(Configurable):
+ left: LeftConfigured
+ right: RightConfigured
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ outer = Outer(**get_default_args(Outer))
+ self.assertEqual(outer.left.left, 1)
+ self.assertEqual(outer.right.right, 2)
+ with self.assertRaisesRegex(TypeError, "non-default argument"):
+ dataclass(NotADataclass)
+
+ def test_unprocessed(self):
+ # behavior of Configurable classes which need processing in __new__,
+ class UnprocessedConfigurable(Configurable):
+ a: int = 9
+
+ class UnprocessedReplaceable(ReplaceableBase):
+ a: int = 9
+
+ for Unprocessed in [UnprocessedConfigurable, UnprocessedReplaceable]:
+
+ self.assertFalse(_is_actually_dataclass(Unprocessed))
+ unprocessed = Unprocessed()
+ self.assertTrue(_is_actually_dataclass(Unprocessed))
+ self.assertTrue(isinstance(unprocessed, Unprocessed))
+ self.assertEqual(unprocessed.a, 9)
+
+ def test_enum(self):
+ # Test that enum values are kept, i.e. that OmegaConf's runtime checks
+ # are in use.
+
+ class A(Enum):
+ B1 = "b1"
+ B2 = "b2"
+
+ # Test for a Configurable class, a function, and a regular class.
+ class C(Configurable):
+ a: A = A.B1
+
+ # Also test for a calllable with enum arguments.
+ def C_fn(a: A = A.B1):
+ pass
+
+ enable_get_default_args(C_fn)
+
+ class C_cl:
+ def __init__(self, a: A = A.B1) -> None:
+ pass
+
+ enable_get_default_args(C_cl)
+
+ for C_ in [C, C_fn, C_cl]:
+ base = get_default_args(C_)
+ self.assertEqual(OmegaConf.to_yaml(base), "a: B1\n")
+ self.assertEqual(base.a, A.B1)
+ replaced = OmegaConf.merge(base, {"a": "B2"})
+ self.assertEqual(replaced.a, A.B2)
+ with self.assertRaises(ValidationError):
+ # You can't use a value which is not one of the
+ # choices, even if it is the str representation
+ # of one of the choices.
+ OmegaConf.merge(base, {"a": "b2"})
+
+ remerged = OmegaConf.merge(base, OmegaConf.create(OmegaConf.to_yaml(base)))
+ self.assertEqual(remerged.a, A.B1)
+
+ def test_pickle(self):
+ def func(a: int = 1, b: str = "3"):
+ pass
+
+ enable_get_default_args(func)
+
+ args = get_default_args(func)
+ args2 = pickle.loads(pickle.dumps(args))
+ self.assertEqual(args2.a, 1)
+ self.assertEqual(args2.b, "3")
+
+ args_regenerated = get_default_args(func)
+ pickle.dumps(args_regenerated)
+ pickle.dumps(args)
+
+ def test_remove_unused_components(self):
+ struct = get_default_args(MainTest)
+ struct.n_ids = 32
+ struct.the_fruit_class_type = "Pear"
+ struct.the_second_fruit_class_type = "Banana"
+ remove_unused_components(struct)
+ expected_keys = [
+ "n_ids",
+ "n_reps",
+ "the_fruit_Pear_args",
+ "the_fruit_class_type",
+ "the_second_fruit_Banana_args",
+ "the_second_fruit_class_type",
+ ]
+ expected_yaml = textwrap.dedent(
+ """\
+ n_ids: 32
+ n_reps: 8
+ the_fruit_class_type: Pear
+ the_fruit_Pear_args:
+ n_pips: 13
+ the_second_fruit_class_type: Banana
+ the_second_fruit_Banana_args:
+ pips: ???
+ spots: ???
+ bananame: ???
+ """
+ )
+ self.assertEqual(sorted(struct.keys()), expected_keys)
+
+ # Check that struct is what we expect
+ expected = OmegaConf.create(expected_yaml)
+ self.assertEqual(struct, expected)
+
+ # Check that we get what we expect when writing to yaml.
+ self.assertEqual(OmegaConf.to_yaml(struct, sort_keys=False), expected_yaml)
+
+ main = MainTest(**struct)
+ instance_data = OmegaConf.structured(main)
+ remove_unused_components(instance_data)
+ self.assertEqual(sorted(instance_data.keys()), expected_keys)
+ self.assertEqual(instance_data, expected)
+
+ def test_remove_unused_components_optional(self):
+ class MainTestWrapper(Configurable):
+ mt: Optional[MainTest]
+ mt_enabled: bool = False
+
+ args = get_default_args(MainTestWrapper)
+ self.assertEqual(list(args.keys()), ["mt_enabled", "mt_args"])
+ remove_unused_components(args)
+ self.assertEqual(OmegaConf.to_yaml(args), "mt_enabled: false\n")
+
+ def test_get_instance_args(self):
+ mt1, mt2 = [
+ MainTest(
+ n_ids=0,
+ n_reps=909,
+ the_fruit_class_type="Pear",
+ the_second_fruit_class_type="Pear",
+ the_fruit_Pear_args=DictConfig({}),
+ the_second_fruit_Pear_args={},
+ )
+ for _ in range(2)
+ ]
+ # Two equivalent ways to get the DictConfig back out of an instance.
+ cfg1 = OmegaConf.structured(mt1)
+ cfg2 = get_default_args(mt2)
+ self.assertEqual(cfg1, cfg2)
+ self.assertEqual(len(cfg1.the_second_fruit_Pear_args), 0)
+ self.assertEqual(len(mt2.the_second_fruit_Pear_args), 0)
+
+ from_cfg = MainTest(**cfg2)
+ self.assertEqual(len(from_cfg.the_second_fruit_Pear_args), 0)
+
+ # If you want the complete args, merge with the defaults.
+ merged_args = OmegaConf.merge(get_default_args(MainTest), cfg2)
+ from_merged = MainTest(**merged_args)
+ self.assertEqual(len(from_merged.the_second_fruit_Pear_args), 1)
+ self.assertEqual(from_merged.n_reps, 909)
+
+ def test_tweak_hook(self):
+ class A(Configurable):
+ n: int = 9
+
+ class Wrapper(Configurable):
+ fruit: Fruit
+ fruit_class_type: str = "Pear"
+ fruit2: Fruit
+ fruit2_class_type: str = "Pear"
+ a: A
+ a2: A
+ a3: A
+
+ @classmethod
+ def a_tweak_args(cls, type, args):
+ assert type == A
+ args.n = 993
+
+ @classmethod
+ def a3_tweak_args(cls, type, args):
+ del args["n"]
+
+ @classmethod
+ def fruit_tweak_args(cls, type, args):
+ assert issubclass(type, Fruit)
+ if type == Pear:
+ assert args.n_pips == 13
+ args.n_pips = 19
+
+ args = get_default_args(Wrapper)
+ self.assertEqual(args.a_args.n, 993)
+ self.assertEqual(args.a2_args.n, 9)
+ self.assertEqual(args.a3_args, {})
+ self.assertEqual(args.fruit_Pear_args.n_pips, 19)
+ self.assertEqual(args.fruit2_Pear_args.n_pips, 13)
+
+ def test_impls(self):
+ # Check that create_x actually uses create_x_impl to do its work
+ # by using all the member types, both with a faked impl function
+ # and without.
+ # members with _0 are optional and absent, those with _o are
+ # optional and present.
+ control_args = []
+
+ def fake_impl(self, control, args):
+ control_args.append(control)
+
+ for fake in [False, True]:
+
+ class MyClass(Configurable):
+ fruit: Fruit
+ fruit_class_type: str = "Orange"
+ fruit_o: Optional[Fruit]
+ fruit_o_class_type: str = "Orange"
+ fruit_0: Optional[Fruit]
+ fruit_0_class_type: Optional[str] = None
+ boring: BoringConfigurable
+ boring_o: Optional[BoringConfigurable]
+ boring_o_enabled: bool = True
+ boring_0: Optional[BoringConfigurable]
+ boring_0_enabled: bool = False
+
+ def __post_init__(self):
+ run_auto_creation(self)
+
+ if fake:
+ MyClass.create_fruit_impl = fake_impl
+ MyClass.create_fruit_o_impl = fake_impl
+ MyClass.create_boring_impl = fake_impl
+ MyClass.create_boring_o_impl = fake_impl
+
+ expand_args_fields(MyClass)
+ instance = MyClass()
+ for name in ["fruit", "fruit_o", "boring", "boring_o"]:
+ self.assertEqual(
+ hasattr(instance, name), not fake, msg=f"{name} {fake}"
+ )
+
+ self.assertIsNone(instance.fruit_0)
+ self.assertIsNone(instance.boring_0)
+ if not fake:
+ self.assertIsInstance(instance.fruit, Orange)
+ self.assertIsInstance(instance.fruit_o, Orange)
+ self.assertIsInstance(instance.boring, BoringConfigurable)
+ self.assertIsInstance(instance.boring_o, BoringConfigurable)
+
+ self.assertEqual(control_args, ["Orange", "Orange", True, True])
+
+ def test_pre_expand(self):
+ # Check that the precreate method of a class is called once before
+ # when expand_args_fields is called on the class.
+
+ class A(Configurable):
+ n: int = 9
+
+ @classmethod
+ def pre_expand(cls):
+ pass
+
+ A.pre_expand = Mock()
+ expand_args_fields(A)
+ A.pre_expand.assert_called()
+
+ def test_pre_expand_replaceable(self):
+ # Check that the precreate method of a class is called once before
+ # when expand_args_fields is called on the class.
+
+ class A(ReplaceableBase):
+ pass
+
+ @classmethod
+ def pre_expand(cls):
+ pass
+
+ class A1(A):
+ n: 9
+
+ A.pre_expand = Mock()
+ expand_args_fields(A1)
+ A.pre_expand.assert_called()
+
+
+@dataclass(eq=False)
+class MockDataclass:
+ field_no_default: int
+ field_primitive_type: int = 42
+ field_optional_none: Optional[int] = None
+ field_optional_dict_none: Optional[Dict] = None
+ field_optional_with_value: Optional[int] = 42
+ field_list_type: List[int] = field(default_factory=lambda: [])
+
+
+class RefObject:
+ pass
+
+
+REF_OBJECT = RefObject()
+
+
+class MockClassWithInit: # noqa: B903
+ def __init__(
+ self,
+ field_no_nothing,
+ field_no_default: int,
+ field_primitive_type: int = 42,
+ field_optional_none: Optional[int] = None,
+ field_optional_dict_none: Optional[Dict] = None,
+ field_optional_with_value: Optional[int] = 42,
+ field_list_type: List[int] = [], # noqa: B006
+ field_reference_type: RefObject = REF_OBJECT,
+ ):
+ self.field_no_nothing = field_no_nothing
+ self.field_no_default = field_no_default
+ self.field_primitive_type = field_primitive_type
+ self.field_optional_none = field_optional_none
+ self.field_optional_dict_none = field_optional_dict_none
+ self.field_optional_with_value = field_optional_with_value
+ self.field_list_type = field_list_type
+ self.field_reference_type = field_reference_type
+
+
+enable_get_default_args(MockClassWithInit)
+
+
+class TestRawClasses(unittest.TestCase):
+ def setUp(self) -> None:
+ self._instances = {
+ MockDataclass: MockDataclass(field_no_default=0),
+ MockClassWithInit: MockClassWithInit(
+ field_no_nothing="tratata", field_no_default=0
+ ),
+ }
+
+ def test_get_default_args(self):
+ for cls in [MockDataclass, MockClassWithInit]:
+ dataclass_defaults = get_default_args(cls)
+ # DictConfig fields with missing values are `not in`
+ self.assertNotIn("field_no_default", dataclass_defaults)
+ self.assertNotIn("field_no_nothing", dataclass_defaults)
+ self.assertNotIn("field_reference_type", dataclass_defaults)
+ expected_defaults = [
+ "field_primitive_type",
+ "field_optional_none",
+ "field_optional_dict_none",
+ "field_optional_with_value",
+ "field_list_type",
+ ]
+
+ if cls == MockDataclass: # we don't remove undefaulted from dataclasses
+ dataclass_defaults.field_no_default = 0
+ expected_defaults.insert(0, "field_no_default")
+ self.assertEqual(list(dataclass_defaults), expected_defaults)
+ for name, val in dataclass_defaults.items():
+ self.assertTrue(hasattr(self._instances[cls], name))
+ self.assertEqual(val, getattr(self._instances[cls], name))
+
+ def test_get_default_args_readonly(self):
+ for cls in [MockDataclass, MockClassWithInit]:
+ dataclass_defaults = get_default_args(cls)
+ dataclass_defaults["field_list_type"].append(13)
+ self.assertEqual(self._instances[cls].field_list_type, [])
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config_use.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config_use.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cb455da2f5f8d3fbcb3f829540adf65f1e67a5e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_config_use.py
@@ -0,0 +1,94 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+from omegaconf import OmegaConf
+from pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor import (
+ ResNetFeatureExtractor,
+)
+from pytorch3d.implicitron.models.generic_model import GenericModel
+from pytorch3d.implicitron.models.global_encoder.global_encoder import (
+ SequenceAutodecoder,
+)
+from pytorch3d.implicitron.models.implicit_function.idr_feature_field import (
+ IdrFeatureField,
+)
+from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import (
+ NeuralRadianceFieldImplicitFunction,
+)
+from pytorch3d.implicitron.models.renderer.lstm_renderer import LSTMRenderer
+from pytorch3d.implicitron.models.renderer.multipass_ea import (
+ MultiPassEmissionAbsorptionRenderer,
+)
+from pytorch3d.implicitron.models.view_pooler.feature_aggregator import (
+ AngleWeightedIdentityFeatureAggregator,
+)
+from pytorch3d.implicitron.tools.config import (
+ get_default_args,
+ remove_unused_components,
+)
+from tests.common_testing import get_tests_dir
+
+from .common_resources import provide_resnet34
+
+DATA_DIR = get_tests_dir() / "implicitron/data"
+DEBUG: bool = False
+
+# Tests the use of the config system in implicitron
+
+
+class TestGenericModel(unittest.TestCase):
+ def setUp(self):
+ self.maxDiff = None
+
+ def test_create_gm(self):
+ args = get_default_args(GenericModel)
+ gm = GenericModel(**args)
+ self.assertIsInstance(gm.renderer, MultiPassEmissionAbsorptionRenderer)
+ self.assertIsInstance(
+ gm._implicit_functions[0]._fn, NeuralRadianceFieldImplicitFunction
+ )
+ self.assertIsNone(gm.global_encoder)
+ self.assertFalse(hasattr(gm, "implicit_function"))
+ self.assertIsNone(gm.view_pooler)
+ self.assertIsNone(gm.image_feature_extractor)
+
+ def test_create_gm_overrides(self):
+ provide_resnet34()
+ args = get_default_args(GenericModel)
+ args.view_pooler_enabled = True
+ args.view_pooler_args.feature_aggregator_class_type = (
+ "AngleWeightedIdentityFeatureAggregator"
+ )
+ args.image_feature_extractor_class_type = "ResNetFeatureExtractor"
+ args.implicit_function_class_type = "IdrFeatureField"
+ args.global_encoder_class_type = "SequenceAutodecoder"
+ idr_args = args.implicit_function_IdrFeatureField_args
+ idr_args.n_harmonic_functions_xyz = 1729
+
+ args.renderer_class_type = "LSTMRenderer"
+ gm = GenericModel(**args)
+ self.assertIsInstance(gm.renderer, LSTMRenderer)
+ self.assertIsInstance(
+ gm.view_pooler.feature_aggregator,
+ AngleWeightedIdentityFeatureAggregator,
+ )
+ self.assertIsInstance(gm._implicit_functions[0]._fn, IdrFeatureField)
+ self.assertEqual(gm._implicit_functions[0]._fn.n_harmonic_functions_xyz, 1729)
+ self.assertIsInstance(gm.global_encoder, SequenceAutodecoder)
+ self.assertIsInstance(gm.image_feature_extractor, ResNetFeatureExtractor)
+ self.assertFalse(hasattr(gm, "implicit_function"))
+
+ instance_args = OmegaConf.structured(gm)
+ if DEBUG:
+ full_yaml = OmegaConf.to_yaml(instance_args, sort_keys=False)
+ (DATA_DIR / "overrides_full.yaml").write_text(full_yaml)
+ remove_unused_components(instance_args)
+ yaml = OmegaConf.to_yaml(instance_args, sort_keys=False)
+ if DEBUG:
+ (DATA_DIR / "overrides_.yaml").write_text(yaml)
+ self.assertEqual(yaml, (DATA_DIR / "overrides.yaml").read_text())
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_cow.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_cow.py
new file mode 100644
index 0000000000000000000000000000000000000000..801863e9bcadf7f469ce75a0c6ccf15dc8e6a784
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_cow.py
@@ -0,0 +1,57 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+
+import torch
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.rendered_mesh_dataset_map_provider import (
+ RenderedMeshDatasetMapProvider,
+)
+from pytorch3d.implicitron.tools.config import expand_args_fields
+from pytorch3d.renderer import FoVPerspectiveCameras
+from tests.common_testing import TestCaseMixin
+
+
+inside_re_worker = os.environ.get("INSIDE_RE_WORKER", False)
+
+
+class TestDataCow(TestCaseMixin, unittest.TestCase):
+ def test_simple(self):
+ if inside_re_worker:
+ return
+ expand_args_fields(RenderedMeshDatasetMapProvider)
+ self._runtest(use_point_light=True, num_views=4)
+ self._runtest(use_point_light=False, num_views=4)
+
+ def _runtest(self, **kwargs):
+ provider = RenderedMeshDatasetMapProvider(**kwargs)
+ dataset_map = provider.get_dataset_map()
+ known_matrix = torch.zeros(1, 4, 4)
+ known_matrix[0, 0, 0] = 1.7321
+ known_matrix[0, 1, 1] = 1.7321
+ known_matrix[0, 2, 2] = 1.0101
+ known_matrix[0, 3, 2] = -1.0101
+ known_matrix[0, 2, 3] = 1
+
+ self.assertIsNone(dataset_map.val)
+ self.assertIsNone(dataset_map.test)
+ self.assertEqual(len(dataset_map.train), provider.num_views)
+
+ value = dataset_map.train[0]
+ self.assertIsInstance(value, FrameData)
+
+ self.assertEqual(value.image_rgb.shape, (3, 128, 128))
+ self.assertEqual(value.fg_probability.shape, (1, 128, 128))
+ # corner of image is background
+ self.assertEqual(value.fg_probability[0, 0, 0], 0)
+ self.assertEqual(value.fg_probability.max(), 1.0)
+ self.assertIsInstance(value.camera, FoVPerspectiveCameras)
+ self.assertEqual(len(value.camera), 1)
+ self.assertIsNone(value.camera.K)
+ matrix = value.camera.get_projection_transform().get_matrix()
+ self.assertClose(matrix, known_matrix, atol=1e-4)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_json_index.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_json_index.py
new file mode 100644
index 0000000000000000000000000000000000000000..e11e7b449fe31bc04655540eef559b6ba6d6bb58
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_json_index.py
@@ -0,0 +1,80 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+
+from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
+from pytorch3d.implicitron.tools.config import get_default_args
+from pytorch3d.renderer import PerspectiveCameras
+from tests.common_testing import TestCaseMixin
+
+# These tests are only run internally, where the data is available.
+internal = os.environ.get("FB_TEST", False)
+inside_re_worker = os.environ.get("INSIDE_RE_WORKER", False)
+skip_tests = not internal or inside_re_worker
+
+
+@unittest.skipIf(skip_tests, "no data")
+class TestDataJsonIndex(TestCaseMixin, unittest.TestCase):
+ def test_loaders(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "JsonIndexDatasetMapProvider"
+ dataset_args = args.dataset_map_provider_JsonIndexDatasetMapProvider_args
+ dataset_args.category = "skateboard"
+ dataset_args.dataset_root = "manifold://co3d/tree/extracted"
+ dataset_args.test_restrict_sequence_id = 0
+ dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 1
+
+ data_source = ImplicitronDataSource(**args)
+
+ cameras = data_source.all_train_cameras
+ self.assertIsInstance(cameras, PerspectiveCameras)
+ self.assertEqual(len(cameras), 81)
+
+ data_sets, data_loaders = data_source.get_datasets_and_dataloaders()
+
+ self.assertEqual(len(data_sets.train), 81)
+ self.assertEqual(len(data_sets.val), 102)
+ self.assertEqual(len(data_sets.test), 102)
+
+ def test_visitor_subsets(self):
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "JsonIndexDatasetMapProvider"
+ dataset_args = args.dataset_map_provider_JsonIndexDatasetMapProvider_args
+ dataset_args.category = "skateboard"
+ dataset_args.dataset_root = "manifold://co3d/tree/extracted"
+ dataset_args.test_restrict_sequence_id = 0
+ dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 1
+
+ data_source = ImplicitronDataSource(**args)
+ datasets, _ = data_source.get_datasets_and_dataloaders()
+ dataset = datasets.test
+
+ sequences = list(dataset.sequence_names())
+ self.assertEqual(len(sequences), 1)
+ i = 0
+ for seq in sequences:
+ last_ts = float("-Inf")
+ seq_frames = list(dataset.sequence_frames_in_order(seq))
+ self.assertEqual(len(seq_frames), 102)
+ for ts, _, idx in seq_frames:
+ self.assertEqual(i, idx)
+ i += 1
+ self.assertGreaterEqual(ts, last_ts)
+ last_ts = ts
+
+ last_ts = float("-Inf")
+ known_frames = list(dataset.sequence_frames_in_order(seq, "test_known"))
+ self.assertEqual(len(known_frames), 81)
+ for ts, _, _ in known_frames:
+ self.assertGreaterEqual(ts, last_ts)
+ last_ts = ts
+
+ known_indices = list(dataset.sequence_indices_in_order(seq, "test_known"))
+ self.assertEqual(len(known_indices), 81)
+
+ break # testing only the first sequence
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_llff.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_llff.py
new file mode 100644
index 0000000000000000000000000000000000000000..040a720f1238402d4c1c046f9d8e6171917c136c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_llff.py
@@ -0,0 +1,158 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+
+import torch
+from pytorch3d.implicitron.dataset.blender_dataset_map_provider import (
+ BlenderDatasetMapProvider,
+)
+from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
+from pytorch3d.implicitron.dataset.dataset_base import FrameData
+from pytorch3d.implicitron.dataset.llff_dataset_map_provider import (
+ LlffDatasetMapProvider,
+)
+from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
+from pytorch3d.renderer import PerspectiveCameras
+from tests.common_testing import TestCaseMixin
+
+
+# These tests are only run internally, where the data is available.
+internal = os.environ.get("FB_TEST", False)
+inside_re_worker = os.environ.get("INSIDE_RE_WORKER", False)
+
+
+@unittest.skipUnless(internal, "no data")
+class TestDataLlff(TestCaseMixin, unittest.TestCase):
+ def test_synthetic(self):
+ if inside_re_worker:
+ return
+ expand_args_fields(BlenderDatasetMapProvider)
+
+ provider = BlenderDatasetMapProvider(
+ base_dir="manifold://co3d/tree/nerf_data/nerf_synthetic/lego",
+ object_name="lego",
+ )
+ dataset_map = provider.get_dataset_map()
+ known_matrix = torch.zeros(1, 4, 4)
+ known_matrix[0, 0, 0] = 2.7778
+ known_matrix[0, 1, 1] = 2.7778
+ known_matrix[0, 2, 3] = 1
+ known_matrix[0, 3, 2] = 1
+
+ for name, length in [("train", 100), ("val", 100), ("test", 200)]:
+ dataset = getattr(dataset_map, name)
+ self.assertEqual(len(dataset), length)
+ # try getting a value
+ value = dataset[0]
+ self.assertEqual(value.image_rgb.shape, (3, 800, 800))
+ self.assertEqual(value.fg_probability.shape, (1, 800, 800))
+ # corner of image is background
+ self.assertEqual(value.fg_probability[0, 0, 0], 0)
+ self.assertEqual(value.fg_probability.max(), 1.0)
+ self.assertIsInstance(value.camera, PerspectiveCameras)
+ self.assertEqual(len(value.camera), 1)
+ self.assertIsNone(value.camera.K)
+ matrix = value.camera.get_projection_transform().get_matrix()
+ self.assertClose(matrix, known_matrix, atol=1e-4)
+ self.assertIsInstance(value, FrameData)
+
+ def test_llff(self):
+ if inside_re_worker:
+ return
+ expand_args_fields(LlffDatasetMapProvider)
+
+ provider = LlffDatasetMapProvider(
+ base_dir="manifold://co3d/tree/nerf_data/nerf_llff_data/fern",
+ object_name="fern",
+ downscale_factor=8,
+ )
+ dataset_map = provider.get_dataset_map()
+ known_matrix = torch.zeros(1, 4, 4)
+ known_matrix[0, 0, 0] = 2.1564
+ known_matrix[0, 1, 1] = 2.1564
+ known_matrix[0, 2, 3] = 1
+ known_matrix[0, 3, 2] = 1
+
+ for name, length, frame_type in [
+ ("train", 17, "known"),
+ ("test", 3, "unseen"),
+ ("val", 3, "unseen"),
+ ]:
+ dataset = getattr(dataset_map, name)
+ self.assertEqual(len(dataset), length)
+ # try getting a value
+ value = dataset[0]
+ self.assertIsInstance(value, FrameData)
+ self.assertEqual(value.frame_type, frame_type)
+ self.assertEqual(value.image_rgb.shape, (3, 378, 504))
+ self.assertIsInstance(value.camera, PerspectiveCameras)
+ self.assertEqual(len(value.camera), 1)
+ self.assertIsNone(value.camera.K)
+ matrix = value.camera.get_projection_transform().get_matrix()
+ self.assertClose(matrix, known_matrix, atol=1e-4)
+
+ self.assertEqual(len(dataset_map.test.get_eval_batches()), 3)
+ for batch in dataset_map.test.get_eval_batches():
+ self.assertEqual(len(batch), 1)
+ self.assertEqual(dataset_map.test[batch[0]].frame_type, "unseen")
+
+ def test_include_known_frames(self):
+ if inside_re_worker:
+ return
+ expand_args_fields(LlffDatasetMapProvider)
+
+ provider = LlffDatasetMapProvider(
+ base_dir="manifold://co3d/tree/nerf_data/nerf_llff_data/fern",
+ object_name="fern",
+ n_known_frames_for_test=2,
+ )
+ dataset_map = provider.get_dataset_map()
+
+ for name, types in [
+ ("train", ["known"] * 17),
+ ("val", ["unseen"] * 3 + ["known"] * 17),
+ ("test", ["unseen"] * 3 + ["known"] * 17),
+ ]:
+ dataset = getattr(dataset_map, name)
+ self.assertEqual(len(dataset), len(types))
+ for i, frame_type in enumerate(types):
+ value = dataset[i]
+ self.assertEqual(value.frame_type, frame_type)
+ self.assertIsNone(value.fg_probability)
+
+ self.assertEqual(len(dataset_map.test.get_eval_batches()), 3)
+ for batch in dataset_map.test.get_eval_batches():
+ self.assertEqual(len(batch), 3)
+ self.assertEqual(dataset_map.test[batch[0]].frame_type, "unseen")
+ for i in batch[1:]:
+ self.assertEqual(dataset_map.test[i].frame_type, "known")
+
+ def test_loaders(self):
+ if inside_re_worker:
+ return
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "BlenderDatasetMapProvider"
+ dataset_args = args.dataset_map_provider_BlenderDatasetMapProvider_args
+ dataset_args.object_name = "lego"
+ dataset_args.base_dir = "manifold://co3d/tree/nerf_data/nerf_synthetic/lego"
+
+ data_source = ImplicitronDataSource(**args)
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ for i in data_loaders.train:
+ self.assertEqual(i.frame_type, ["known"])
+ self.assertEqual(i.image_rgb.shape, (1, 3, 800, 800))
+ for i in data_loaders.val:
+ self.assertEqual(i.frame_type, ["unseen"])
+ self.assertEqual(i.image_rgb.shape, (1, 3, 800, 800))
+ for i in data_loaders.test:
+ self.assertEqual(i.frame_type, ["unseen"])
+ self.assertEqual(i.image_rgb.shape, (1, 3, 800, 800))
+
+ cameras = data_source.all_train_cameras
+ self.assertIsInstance(cameras, PerspectiveCameras)
+ self.assertEqual(len(cameras), 100)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_source.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_source.py
new file mode 100644
index 0000000000000000000000000000000000000000..67c79fc5788f1069094bb499726b9c2ae85fd02f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_data_source.py
@@ -0,0 +1,123 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import contextlib
+import os
+import unittest
+import unittest.mock
+
+import torch
+from omegaconf import OmegaConf
+from pytorch3d.implicitron.dataset.data_loader_map_provider import (
+ SequenceDataLoaderMapProvider,
+ SimpleDataLoaderMapProvider,
+)
+from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
+from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
+from pytorch3d.implicitron.tools.config import get_default_args
+from tests.common_testing import get_tests_dir
+from tests.implicitron.common_resources import get_skateboard_data
+
+DATA_DIR = get_tests_dir() / "implicitron/data"
+DEBUG: bool = False
+
+
+class TestDataSource(unittest.TestCase):
+ def setUp(self):
+ self.maxDiff = None
+ torch.manual_seed(42)
+
+ stack = contextlib.ExitStack()
+ self.dataset_root, self.path_manager = stack.enter_context(
+ get_skateboard_data()
+ )
+ self.addCleanup(stack.close)
+
+ def _test_omegaconf_generic_failure(self):
+ # OmegaConf possible bug - this is why we need _GenericWorkaround
+ from dataclasses import dataclass
+
+ import torch
+
+ @dataclass
+ class D(torch.utils.data.Dataset[int]):
+ a: int = 3
+
+ OmegaConf.structured(D)
+
+ def _test_omegaconf_ListList(self):
+ # Demo that OmegaConf doesn't support nested lists
+ from dataclasses import dataclass
+ from typing import Sequence
+
+ @dataclass
+ class A:
+ a: Sequence[Sequence[int]] = ((32,),)
+
+ OmegaConf.structured(A)
+
+ def test_JsonIndexDataset_args(self):
+ # test that JsonIndexDataset works with get_default_args
+ get_default_args(JsonIndexDataset)
+
+ def test_one(self):
+ cfg = get_default_args(ImplicitronDataSource)
+ # making the test invariant to env variables
+ cfg.dataset_map_provider_JsonIndexDatasetMapProvider_args.dataset_root = ""
+ cfg.dataset_map_provider_JsonIndexDatasetMapProviderV2_args.dataset_root = ""
+ # making the test invariant to the presence of SQL dataset
+ if "dataset_map_provider_SqlIndexDatasetMapProvider_args" in cfg:
+ del cfg.dataset_map_provider_SqlIndexDatasetMapProvider_args
+ yaml = OmegaConf.to_yaml(cfg, sort_keys=False)
+ if DEBUG:
+ (DATA_DIR / "data_source.yaml").write_text(yaml)
+ self.assertEqual(yaml, (DATA_DIR / "data_source.yaml").read_text())
+
+ def test_default(self):
+ if os.environ.get("INSIDE_RE_WORKER") is not None:
+ return
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "JsonIndexDatasetMapProvider"
+ dataset_args = args.dataset_map_provider_JsonIndexDatasetMapProvider_args
+ dataset_args.category = "skateboard"
+ dataset_args.test_restrict_sequence_id = 0
+ dataset_args.n_frames_per_sequence = -1
+
+ dataset_args.dataset_root = self.dataset_root
+
+ data_source = ImplicitronDataSource(**args)
+ self.assertIsInstance(
+ data_source.data_loader_map_provider, SequenceDataLoaderMapProvider
+ )
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+ self.assertEqual(len(data_loaders.train), 81)
+ for i in data_loaders.train:
+ self.assertEqual(i.frame_type, ["test_known"])
+ break
+
+ def test_simple(self):
+ if os.environ.get("INSIDE_RE_WORKER") is not None:
+ return
+ args = get_default_args(ImplicitronDataSource)
+ args.dataset_map_provider_class_type = "JsonIndexDatasetMapProvider"
+ args.data_loader_map_provider_class_type = "SimpleDataLoaderMapProvider"
+ dataset_args = args.dataset_map_provider_JsonIndexDatasetMapProvider_args
+ dataset_args.category = "skateboard"
+ dataset_args.test_restrict_sequence_id = 0
+ dataset_args.n_frames_per_sequence = -1
+
+ dataset_args.dataset_root = self.dataset_root
+
+ data_source = ImplicitronDataSource(**args)
+ self.assertIsInstance(
+ data_source.data_loader_map_provider, SimpleDataLoaderMapProvider
+ )
+ _, data_loaders = data_source.get_datasets_and_dataloaders()
+
+ self.assertEqual(len(data_loaders.train), 81)
+ for i in data_loaders.train:
+ self.assertEqual(i.frame_type, ["test_known"])
+ break
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_dataset_visualize.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_dataset_visualize.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7af5b035cf3b4ea6e7b61ac638cd3049c611dda
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_dataset_visualize.py
@@ -0,0 +1,199 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import contextlib
+import copy
+import os
+import unittest
+
+import torch
+import torchvision
+from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
+from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
+from pytorch3d.implicitron.tools.config import expand_args_fields
+from pytorch3d.implicitron.tools.point_cloud_utils import render_point_cloud_pytorch3d
+from pytorch3d.vis.plotly_vis import plot_scene
+
+
+if os.environ.get("INSIDE_RE_WORKER") is None:
+ from visdom import Visdom
+
+from tests.common_testing import interactive_testing_requested
+
+from .common_resources import get_skateboard_data
+
+VISDOM_PORT = int(os.environ.get("VISDOM_PORT", 8097))
+
+
+class TestDatasetVisualize(unittest.TestCase):
+ def setUp(self):
+ if not interactive_testing_requested():
+ return
+ category = "skateboard"
+ stack = contextlib.ExitStack()
+ dataset_root, path_manager = stack.enter_context(get_skateboard_data())
+ self.addCleanup(stack.close)
+ frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
+ sequence_file = os.path.join(dataset_root, category, "sequence_annotations.jgz")
+ self.image_size = 256
+ expand_args_fields(JsonIndexDataset)
+ self.datasets = {
+ "simple": JsonIndexDataset(
+ frame_annotations_file=frame_file,
+ sequence_annotations_file=sequence_file,
+ dataset_root=dataset_root,
+ image_height=self.image_size,
+ image_width=self.image_size,
+ box_crop=True,
+ load_point_clouds=True,
+ path_manager=path_manager,
+ ),
+ "nonsquare": JsonIndexDataset(
+ frame_annotations_file=frame_file,
+ sequence_annotations_file=sequence_file,
+ dataset_root=dataset_root,
+ image_height=self.image_size,
+ image_width=self.image_size // 2,
+ box_crop=True,
+ load_point_clouds=True,
+ path_manager=path_manager,
+ ),
+ "nocrop": JsonIndexDataset(
+ frame_annotations_file=frame_file,
+ sequence_annotations_file=sequence_file,
+ dataset_root=dataset_root,
+ image_height=self.image_size,
+ image_width=self.image_size // 2,
+ box_crop=False,
+ load_point_clouds=True,
+ path_manager=path_manager,
+ ),
+ }
+ self.datasets.update(
+ {
+ k + "_newndc": _change_annotations_to_new_ndc(dataset)
+ for k, dataset in self.datasets.items()
+ }
+ )
+ self.visdom = Visdom(port=VISDOM_PORT)
+ if not self.visdom.check_connection():
+ print("Visdom server not running! Disabling visdom visualizations.")
+ self.visdom = None
+
+ def _render_one_pointcloud(self, point_cloud, cameras, render_size):
+ (_image_render, _, _) = render_point_cloud_pytorch3d(
+ cameras,
+ point_cloud,
+ render_size=render_size,
+ point_radius=1e-2,
+ topk=10,
+ bg_color=0.0,
+ )
+ return _image_render.clamp(0.0, 1.0)
+
+ def test_one(self):
+ """Test dataset visualization."""
+ if not interactive_testing_requested():
+ return
+ for max_frames in (16, -1):
+ for load_dataset_point_cloud in (True, False):
+ for dataset_key in self.datasets:
+ self._gen_and_render_pointcloud(
+ max_frames, load_dataset_point_cloud, dataset_key
+ )
+
+ def _gen_and_render_pointcloud(
+ self, max_frames, load_dataset_point_cloud, dataset_key
+ ):
+ dataset = self.datasets[dataset_key]
+ # load the point cloud of the first sequence
+ sequence_show = list(dataset.seq_annots.keys())[0]
+ device = torch.device("cuda:0")
+
+ point_cloud, sequence_frame_data = get_implicitron_sequence_pointcloud(
+ dataset,
+ sequence_name=sequence_show,
+ mask_points=True,
+ max_frames=max_frames,
+ num_workers=10,
+ load_dataset_point_cloud=load_dataset_point_cloud,
+ )
+
+ # render on gpu
+ point_cloud = point_cloud.to(device)
+ cameras = sequence_frame_data.camera.to(device)
+
+ # render the point_cloud from the viewpoint of loaded cameras
+ images_render = torch.cat(
+ [
+ self._render_one_pointcloud(
+ point_cloud,
+ cameras[frame_i],
+ (
+ dataset.image_height,
+ dataset.image_width,
+ ),
+ )
+ for frame_i in range(len(cameras))
+ ]
+ ).cpu()
+ images_gt_and_render = torch.cat(
+ [sequence_frame_data.image_rgb, images_render], dim=3
+ )
+
+ imfile = os.path.join(
+ os.path.split(os.path.abspath(__file__))[0],
+ "test_dataset_visualize"
+ + f"_max_frames={max_frames}"
+ + f"_load_pcl={load_dataset_point_cloud}.png",
+ )
+ print(f"Exporting image {imfile}.")
+ torchvision.utils.save_image(images_gt_and_render, imfile, nrow=2)
+
+ if self.visdom is not None:
+ test_name = f"{max_frames}_{load_dataset_point_cloud}_{dataset_key}"
+ self.visdom.images(
+ images_gt_and_render,
+ env="test_dataset_visualize",
+ win=f"pcl_renders_{test_name}",
+ opts={"title": f"pcl_renders_{test_name}"},
+ )
+ plotlyplot = plot_scene(
+ {
+ "scene_batch": {
+ "cameras": cameras,
+ "point_cloud": point_cloud,
+ }
+ },
+ camera_scale=1.0,
+ pointcloud_max_points=10000,
+ pointcloud_marker_size=1.0,
+ )
+ self.visdom.plotlyplot(
+ plotlyplot,
+ env="test_dataset_visualize",
+ win=f"pcl_{test_name}",
+ )
+
+
+def _change_annotations_to_new_ndc(dataset):
+ dataset = copy.deepcopy(dataset)
+ for frame in dataset.frame_annots:
+ vp = frame["frame_annotation"].viewpoint
+ vp.intrinsics_format = "ndc_isotropic"
+ # this assume the focal length to be equal on x and y (ok for a test)
+ max_flength = max(vp.focal_length)
+ vp.principal_point = (
+ vp.principal_point[0] * max_flength / vp.focal_length[0],
+ vp.principal_point[1] * max_flength / vp.focal_length[1],
+ )
+ vp.focal_length = (
+ max_flength,
+ max_flength,
+ )
+
+ return dataset
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_cameras.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_cameras.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec795251ee05bb3bff9425f9d9a965e4d9abffa
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_cameras.py
@@ -0,0 +1,42 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import torch
+from pytorch3d.implicitron.tools.eval_video_trajectory import (
+ generate_eval_video_cameras,
+)
+from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
+from pytorch3d.transforms import axis_angle_to_matrix
+from tests.common_testing import TestCaseMixin
+
+
+class TestEvalCameras(TestCaseMixin, unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_circular(self):
+ n_train_cameras = 10
+ n_test_cameras = 100
+ R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
+ amplitude = 0.01
+ R_jiggled = torch.bmm(
+ R, axis_angle_to_matrix(torch.rand(n_train_cameras, 3) * amplitude)
+ )
+ cameras_train = PerspectiveCameras(R=R_jiggled, T=T)
+ cameras_test = generate_eval_video_cameras(
+ cameras_train, trajectory_type="circular_lsq_fit", trajectory_scale=1.0
+ )
+
+ positions_test = cameras_test.get_camera_center()
+ center = positions_test.mean(0)
+ self.assertClose(center, torch.zeros(3), atol=0.1)
+ self.assertClose(
+ (positions_test - center).norm(dim=[1]),
+ torch.ones(n_test_cameras),
+ atol=0.1,
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_demo.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..37df395db1cb871ae2e0f54f224d404056079fb1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_eval_demo.py
@@ -0,0 +1,29 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+
+from pytorch3d.implicitron import eval_demo
+
+from tests.common_testing import interactive_testing_requested
+
+from .common_resources import CO3D_MANIFOLD_PATH
+
+"""
+This test runs a single sequence eval_demo, useful for debugging datasets.
+It only runs interactively.
+"""
+
+
+class TestEvalDemo(unittest.TestCase):
+ def test_a(self):
+ if not interactive_testing_requested():
+ return
+
+ os.environ["CO3D_DATASET_ROOT"] = CO3D_MANIFOLD_PATH
+
+ eval_demo.evaluate_dbir_for_category("donut", single_sequence_id=0)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_evaluation.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..400d7835ff09f489a93f4fb4375f3ee403db4084
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_evaluation.py
@@ -0,0 +1,323 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import contextlib
+import dataclasses
+import itertools
+import math
+import os
+import unittest
+
+import lpips
+import numpy as np
+import torch
+
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
+from pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis import eval_batch
+from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
+from pytorch3d.implicitron.models.generic_model import GenericModel # noqa
+from pytorch3d.implicitron.models.model_dbir import ModelDBIR # noqa
+from pytorch3d.implicitron.tools.config import expand_args_fields, registry
+from pytorch3d.implicitron.tools.metric_utils import calc_psnr, eval_depth
+from pytorch3d.implicitron.tools.utils import dataclass_to_cuda_
+
+from .common_resources import get_skateboard_data, provide_lpips_vgg
+
+
+class TestEvaluation(unittest.TestCase):
+ def setUp(self):
+ # initialize evaluation dataset/dataloader
+ torch.manual_seed(42)
+
+ stack = contextlib.ExitStack()
+ dataset_root, path_manager = stack.enter_context(get_skateboard_data())
+ self.addCleanup(stack.close)
+
+ category = "skateboard"
+ frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
+ sequence_file = os.path.join(dataset_root, category, "sequence_annotations.jgz")
+ self.image_size = 64
+ expand_args_fields(JsonIndexDataset)
+ self.dataset = JsonIndexDataset(
+ frame_annotations_file=frame_file,
+ sequence_annotations_file=sequence_file,
+ dataset_root=dataset_root,
+ image_height=self.image_size,
+ image_width=self.image_size,
+ box_crop=True,
+ remove_empty_masks=False,
+ path_manager=path_manager,
+ )
+ self.bg_color = (0.0, 0.0, 0.0)
+
+ # init the lpips model for eval
+ provide_lpips_vgg()
+ self.lpips_model = lpips.LPIPS(net="vgg").cuda()
+
+ def test_eval_depth(self):
+ """
+ Check that eval_depth correctly masks errors and that, for get_best_scale=True,
+ the error with scaled prediction equals the error without scaling the
+ predicted depth. Finally, test that the error values are as expected
+ for prediction and gt differing by a constant offset.
+ """
+ gt = (torch.randn(10, 1, 300, 400, device="cuda") * 5.0).clamp(0.0)
+ mask = (torch.rand_like(gt) > 0.5).type_as(gt)
+
+ for diff in 10 ** torch.linspace(-5, 0, 6):
+ for crop in (0, 5):
+
+ pred = gt + (torch.rand_like(gt) - 0.5) * 2 * diff
+
+ # scaled prediction test
+ mse_depth, abs_depth = eval_depth(
+ pred,
+ gt,
+ crop=crop,
+ mask=mask,
+ get_best_scale=True,
+ )
+ mse_depth_scale, abs_depth_scale = eval_depth(
+ pred * 10.0,
+ gt,
+ crop=crop,
+ mask=mask,
+ get_best_scale=True,
+ )
+ self.assertAlmostEqual(
+ float(mse_depth.sum()), float(mse_depth_scale.sum()), delta=1e-4
+ )
+ self.assertAlmostEqual(
+ float(abs_depth.sum()), float(abs_depth_scale.sum()), delta=1e-4
+ )
+
+ # error masking test
+ pred_masked_err = gt + (torch.rand_like(gt) + diff) * (1 - mask)
+ mse_depth_masked, abs_depth_masked = eval_depth(
+ pred_masked_err,
+ gt,
+ crop=crop,
+ mask=mask,
+ get_best_scale=True,
+ )
+ self.assertAlmostEqual(
+ float(mse_depth_masked.sum()), float(0.0), delta=1e-4
+ )
+ self.assertAlmostEqual(
+ float(abs_depth_masked.sum()), float(0.0), delta=1e-4
+ )
+ mse_depth_unmasked, abs_depth_unmasked = eval_depth(
+ pred_masked_err,
+ gt,
+ crop=crop,
+ mask=1 - mask,
+ get_best_scale=True,
+ )
+ self.assertGreater(
+ float(mse_depth_unmasked.sum()),
+ float(diff**2),
+ )
+ self.assertGreater(
+ float(abs_depth_unmasked.sum()),
+ float(diff),
+ )
+
+ # tests with constant error
+ pred_fix_diff = gt + diff * mask
+ for _mask_gt in (mask, None):
+ mse_depth_fix_diff, abs_depth_fix_diff = eval_depth(
+ pred_fix_diff,
+ gt,
+ crop=crop,
+ mask=_mask_gt,
+ get_best_scale=False,
+ )
+ if _mask_gt is not None:
+ expected_err_abs = diff
+ expected_err_mse = diff**2
+ else:
+ err_mask = (gt > 0.0).float() * mask
+ if crop > 0:
+ err_mask = err_mask[:, :, crop:-crop, crop:-crop]
+ gt_cropped = gt[:, :, crop:-crop, crop:-crop]
+ else:
+ gt_cropped = gt
+ gt_mass = (gt_cropped > 0.0).float().sum(dim=(1, 2, 3))
+ expected_err_abs = (
+ diff * err_mask.sum(dim=(1, 2, 3)) / (gt_mass)
+ )
+ expected_err_mse = diff * expected_err_abs
+ self.assertTrue(
+ torch.allclose(
+ abs_depth_fix_diff,
+ expected_err_abs * torch.ones_like(abs_depth_fix_diff),
+ atol=1e-4,
+ )
+ )
+ self.assertTrue(
+ torch.allclose(
+ mse_depth_fix_diff,
+ expected_err_mse * torch.ones_like(mse_depth_fix_diff),
+ atol=1e-4,
+ )
+ )
+
+ def test_psnr(self):
+ """
+ Compare against opencv and check that the psnr is above
+ the minimum possible value.
+ """
+ import cv2
+
+ im1 = torch.rand(100, 3, 256, 256).cuda()
+ im1_uint8 = (im1 * 255).to(torch.uint8)
+ im1_rounded = im1_uint8.float() / 255
+ for max_diff in 10 ** torch.linspace(-5, 0, 6):
+ im2 = im1 + (torch.rand_like(im1) - 0.5) * 2 * max_diff
+ im2 = im2.clamp(0.0, 1.0)
+ im2_uint8 = (im2 * 255).to(torch.uint8)
+ im2_rounded = im2_uint8.float() / 255
+ # check that our psnr matches the output of opencv
+ psnr = calc_psnr(im1_rounded, im2_rounded)
+ # some versions of cv2 can only take uint8 input
+ psnr_cv2 = cv2.PSNR(
+ im1_uint8.cpu().numpy(),
+ im2_uint8.cpu().numpy(),
+ )
+ self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4)
+ # check that all PSNRs are bigger than the minimum possible PSNR
+ max_mse = max_diff**2
+ min_psnr = 10 * math.log10(1.0 / max_mse)
+ for _im1, _im2 in zip(im1, im2):
+ _psnr = calc_psnr(_im1, _im2)
+ self.assertGreaterEqual(float(_psnr) + 1e-6, min_psnr)
+
+ def _one_sequence_test(
+ self,
+ seq_dataset,
+ model,
+ batch_indices,
+ check_metrics=False,
+ ):
+ loader = torch.utils.data.DataLoader(
+ seq_dataset,
+ shuffle=False,
+ batch_sampler=batch_indices,
+ collate_fn=FrameData.collate,
+ )
+
+ for frame_data in loader:
+ self.assertIsNone(frame_data.frame_type)
+ self.assertIsNotNone(frame_data.image_rgb)
+ # override the frame_type
+ frame_data.frame_type = [
+ "train_unseen",
+ *(["train_known"] * (len(frame_data.image_rgb) - 1)),
+ ]
+
+ frame_data = dataclass_to_cuda_(frame_data)
+ preds = model(**dataclasses.asdict(frame_data))
+
+ eval_result = eval_batch(
+ frame_data,
+ preds["implicitron_render"],
+ bg_color=self.bg_color,
+ lpips_model=self.lpips_model,
+ )
+
+ if check_metrics:
+ self._check_metrics(
+ frame_data, preds["implicitron_render"], eval_result
+ )
+
+ def _check_metrics(self, frame_data, implicitron_render, eval_result):
+ # Make a terribly bad NVS prediction and check that this is worse
+ # than the DBIR prediction.
+ implicitron_render_bad = implicitron_render.clone()
+ implicitron_render_bad.depth_render += (
+ torch.randn_like(implicitron_render_bad.depth_render) * 100.0
+ )
+ implicitron_render_bad.image_render += (
+ torch.randn_like(implicitron_render_bad.image_render) * 100.0
+ )
+ implicitron_render_bad.mask_render = (
+ torch.randn_like(implicitron_render_bad.mask_render) > 0.0
+ ).float()
+ eval_result_bad = eval_batch(
+ frame_data,
+ implicitron_render_bad,
+ bg_color=self.bg_color,
+ lpips_model=self.lpips_model,
+ )
+
+ lower_better = {
+ "psnr_masked": False,
+ "psnr_fg": False,
+ "psnr_full_image": False,
+ "depth_abs_fg": True,
+ "iou": False,
+ "rgb_l1_masked": True,
+ "rgb_l1_fg": True,
+ "lpips_masked": True,
+ "lpips_full_image": True,
+ }
+
+ for metric in lower_better:
+ m_better = eval_result[metric]
+ m_worse = eval_result_bad[metric]
+ if np.isnan(m_better) or np.isnan(m_worse):
+ continue # metric is missing, i.e. NaN
+ _assert = (
+ self.assertLessEqual
+ if lower_better[metric]
+ else self.assertGreaterEqual
+ )
+ _assert(m_better, m_worse)
+
+ def _get_random_batch_indices(
+ self, seq_dataset, n_batches=2, min_batch_size=5, max_batch_size=10
+ ):
+ batch_indices = []
+ for _ in range(n_batches):
+ batch_size = torch.randint(
+ low=min_batch_size, high=max_batch_size, size=(1,)
+ )
+ batch_indices.append(torch.randperm(len(seq_dataset))[:batch_size])
+
+ return batch_indices
+
+ def test_full_eval(self, n_sequences=5):
+ """Test evaluation."""
+
+ # caching batch indices first to preserve RNG state
+ seq_datasets = {}
+ batch_indices = {}
+ for seq in itertools.islice(self.dataset.sequence_names(), n_sequences):
+ idx = list(self.dataset.sequence_indices_in_order(seq))
+ seq_dataset = torch.utils.data.Subset(self.dataset, idx)
+ seq_datasets[seq] = seq_dataset
+ batch_indices[seq] = self._get_random_batch_indices(seq_dataset)
+
+ for model_class_type in ["ModelDBIR", "GenericModel"]:
+ ModelClass = registry.get(ImplicitronModelBase, model_class_type)
+ expand_args_fields(ModelClass)
+ model = ModelClass(
+ render_image_width=self.image_size,
+ render_image_height=self.image_size,
+ bg_color=self.bg_color,
+ )
+ model.eval()
+ model.cuda()
+
+ for seq in itertools.islice(self.dataset.sequence_names(), n_sequences):
+ self._one_sequence_test(
+ seq_datasets[seq],
+ model,
+ batch_indices[seq],
+ check_metrics=(model_class_type == "ModelDBIR"),
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_extending_orm_types.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_extending_orm_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6ceb8617ebeaa60a42f0b4e068221b9dc789434
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_extending_orm_types.py
@@ -0,0 +1,230 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import dataclasses
+import logging
+import os
+import tempfile
+import unittest
+from typing import ClassVar, Optional, Type
+
+import pandas as pd
+import pkg_resources
+import sqlalchemy as sa
+
+from pytorch3d.implicitron.dataset import types
+from pytorch3d.implicitron.dataset.frame_data import FrameData, GenericFrameDataBuilder
+from pytorch3d.implicitron.dataset.orm_types import (
+ SqlFrameAnnotation,
+ SqlSequenceAnnotation,
+)
+from pytorch3d.implicitron.dataset.sql_dataset import SqlIndexDataset
+from pytorch3d.implicitron.dataset.utils import GenericWorkaround
+from pytorch3d.implicitron.tools.config import registry
+from sqlalchemy.orm import composite, Mapped, mapped_column, Session
+
+NO_BLOBS_KWARGS = {
+ "dataset_root": "",
+ "load_images": False,
+ "load_depths": False,
+ "load_masks": False,
+ "load_depth_masks": False,
+ "box_crop": False,
+}
+
+DATASET_ROOT = pkg_resources.resource_filename(__name__, "data/sql_dataset")
+METADATA_FILE = os.path.join(DATASET_ROOT, "sql_dataset_100.sqlite")
+
+logger = logging.getLogger("pytorch3d.implicitron.dataset.sql_dataset")
+sh = logging.StreamHandler()
+logger.addHandler(sh)
+logger.setLevel(logging.DEBUG)
+
+
+@dataclasses.dataclass
+class MagneticFieldAnnotation:
+ path: str
+ average_flux_density: Optional[float] = None
+
+
+class ExtendedSqlFrameAnnotation(SqlFrameAnnotation):
+ num_dogs: Mapped[Optional[int]] = mapped_column(default=None)
+
+ magnetic_field: Mapped[MagneticFieldAnnotation] = composite(
+ mapped_column("_magnetic_field_path", nullable=True),
+ mapped_column("_magnetic_field_average_flux_density", nullable=True),
+ default_factory=lambda: None,
+ )
+
+
+class ExtendedSqlIndexDataset(SqlIndexDataset):
+ frame_annotations_type: ClassVar[Type[SqlFrameAnnotation]] = (
+ ExtendedSqlFrameAnnotation
+ )
+
+
+class CanineFrameData(FrameData):
+ num_dogs: Optional[int] = None
+ magnetic_field_average_flux_density: Optional[float] = None
+
+
+@registry.register
+class CanineFrameDataBuilder(
+ GenericWorkaround, GenericFrameDataBuilder[CanineFrameData]
+):
+ """
+ A concrete class to build an extended FrameData object
+ """
+
+ frame_data_type: ClassVar[Type[FrameData]] = CanineFrameData
+
+ def build(
+ self,
+ frame_annotation: ExtendedSqlFrameAnnotation,
+ sequence_annotation: types.SequenceAnnotation,
+ load_blobs: bool = True,
+ ) -> CanineFrameData:
+ frame_data = super().build(frame_annotation, sequence_annotation, load_blobs)
+ frame_data.num_dogs = frame_annotation.num_dogs or 101
+ frame_data.magnetic_field_average_flux_density = (
+ frame_annotation.magnetic_field.average_flux_density
+ )
+ return frame_data
+
+
+class CanineSqlIndexDataset(SqlIndexDataset):
+ frame_annotations_type: ClassVar[Type[SqlFrameAnnotation]] = (
+ ExtendedSqlFrameAnnotation
+ )
+
+ frame_data_builder_class_type: str = "CanineFrameDataBuilder"
+
+
+class TestExtendingOrmTypes(unittest.TestCase):
+ def setUp(self):
+ # create a temporary copy of the DB with an extended schema
+ engine = sa.create_engine(f"sqlite:///{METADATA_FILE}")
+ with Session(engine) as session:
+ extended_annots = [
+ ExtendedSqlFrameAnnotation(
+ **{
+ k: v
+ for k, v in frame_annot.__dict__.items()
+ if not k.startswith("_") # remove mapped fields and SA metadata
+ }
+ )
+ for frame_annot in session.scalars(sa.select(SqlFrameAnnotation))
+ ]
+ seq_annots = session.scalars(
+ sa.select(SqlSequenceAnnotation),
+ execution_options={"prebuffer_rows": True},
+ )
+ session.expunge_all()
+
+ self._temp_db = tempfile.NamedTemporaryFile(delete=False)
+ engine_ext = sa.create_engine(f"sqlite:///{self._temp_db.name}")
+ ExtendedSqlFrameAnnotation.metadata.create_all(engine_ext, checkfirst=True)
+ with Session(engine_ext, expire_on_commit=False) as session_ext:
+ session_ext.add_all(extended_annots)
+ for instance in seq_annots:
+ session_ext.merge(instance)
+ session_ext.commit()
+
+ # check the setup is correct
+ with engine_ext.connect() as connection_ext:
+ df = pd.read_sql_query(
+ sa.select(ExtendedSqlFrameAnnotation), connection_ext
+ )
+ self.assertEqual(len(df), 100)
+ self.assertIn("_magnetic_field_average_flux_density", df.columns)
+
+ df_seq = pd.read_sql_query(sa.select(SqlSequenceAnnotation), connection_ext)
+ self.assertEqual(len(df_seq), 10)
+
+ def tearDown(self):
+ self._temp_db.close()
+ os.remove(self._temp_db.name)
+
+ def test_basic(self, sequence="cat1_seq2", frame_number=4):
+ dataset = ExtendedSqlIndexDataset(
+ sqlite_metadata_file=self._temp_db.name,
+ remove_empty_masks=False,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 100)
+
+ # check the items are consecutive
+ past_sequences = set()
+ last_frame_number = -1
+ last_sequence = ""
+ for i in range(len(dataset)):
+ item = dataset[i]
+
+ if item.frame_number == 0:
+ self.assertNotIn(item.sequence_name, past_sequences)
+ past_sequences.add(item.sequence_name)
+ last_sequence = item.sequence_name
+ else:
+ self.assertEqual(item.sequence_name, last_sequence)
+ self.assertEqual(item.frame_number, last_frame_number + 1)
+
+ last_frame_number = item.frame_number
+
+ # test indexing
+ with self.assertRaises(IndexError):
+ dataset[len(dataset) + 1]
+
+ # test sequence-frame indexing
+ item = dataset[sequence, frame_number]
+ self.assertEqual(item.sequence_name, sequence)
+ self.assertEqual(item.frame_number, frame_number)
+
+ with self.assertRaises(IndexError):
+ dataset[sequence, 13]
+
+ def test_extending_frame_data(self, sequence="cat1_seq2", frame_number=4):
+ dataset = CanineSqlIndexDataset(
+ sqlite_metadata_file=self._temp_db.name,
+ remove_empty_masks=False,
+ frame_data_builder_CanineFrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 100)
+
+ # check the items are consecutive
+ past_sequences = set()
+ last_frame_number = -1
+ last_sequence = ""
+ for i in range(len(dataset)):
+ item = dataset[i]
+ self.assertIsInstance(item, CanineFrameData)
+ self.assertEqual(item.num_dogs, 101)
+ self.assertIsNone(item.magnetic_field_average_flux_density)
+
+ if item.frame_number == 0:
+ self.assertNotIn(item.sequence_name, past_sequences)
+ past_sequences.add(item.sequence_name)
+ last_sequence = item.sequence_name
+ else:
+ self.assertEqual(item.sequence_name, last_sequence)
+ self.assertEqual(item.frame_number, last_frame_number + 1)
+
+ last_frame_number = item.frame_number
+
+ # test indexing
+ with self.assertRaises(IndexError):
+ dataset[len(dataset) + 1]
+
+ # test sequence-frame indexing
+ item = dataset[sequence, frame_number]
+ self.assertIsInstance(item, CanineFrameData)
+ self.assertEqual(item.sequence_name, sequence)
+ self.assertEqual(item.frame_number, frame_number)
+ self.assertEqual(item.num_dogs, 101)
+
+ with self.assertRaises(IndexError):
+ dataset[sequence, 13]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_forward_pass.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_forward_pass.py
new file mode 100644
index 0000000000000000000000000000000000000000..64c9a1d574033b844a70427ab9642eb5c0db8576
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_forward_pass.py
@@ -0,0 +1,227 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+
+import torch
+from omegaconf import DictConfig, OmegaConf
+from pytorch3d.implicitron.models.generic_model import GenericModel
+from pytorch3d.implicitron.models.renderer.base import EvaluationMode
+from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
+from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
+from tests.common_testing import get_pytorch3d_dir
+
+from .common_resources import provide_resnet34
+
+IMPLICITRON_CONFIGS_DIR = (
+ get_pytorch3d_dir() / "projects" / "implicitron_trainer" / "configs"
+)
+
+
+class TestGenericModel(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls) -> None:
+ provide_resnet34()
+
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_gm(self):
+ # Simple test of a forward and backward pass of the default GenericModel.
+ device = torch.device("cuda:0")
+ expand_args_fields(GenericModel)
+ model = GenericModel(render_image_height=80, render_image_width=80)
+ model.to(device)
+ self._one_model_test(model, device)
+
+ def test_all_gm_configs(self):
+ # Tests all model settings in the implicitron_trainer config folder.
+ device = torch.device("cuda:0")
+ config_files = []
+
+ for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
+ config_files.extend(
+ [
+ f
+ for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
+ if not f.name.endswith("_base.yaml")
+ ]
+ )
+
+ for config_file in config_files:
+ with self.subTest(name=config_file.stem):
+ cfg = _load_model_config_from_yaml(str(config_file))
+ cfg.render_image_height = 80
+ cfg.render_image_width = 80
+ model = GenericModel(**cfg)
+ model.to(device)
+ self._one_model_test(
+ model,
+ device,
+ eval_test=True,
+ bw_test=True,
+ )
+
+ def _one_model_test(
+ self,
+ model,
+ device,
+ n_train_cameras: int = 5,
+ eval_test: bool = True,
+ bw_test: bool = True,
+ ):
+
+ R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
+ cameras = PerspectiveCameras(R=R, T=T, device=device)
+
+ N, H, W = n_train_cameras, model.render_image_height, model.render_image_width
+
+ random_args = {
+ "camera": cameras,
+ "fg_probability": _random_input_tensor(N, 1, H, W, True, device),
+ "depth_map": _random_input_tensor(N, 1, H, W, False, device) + 0.1,
+ "mask_crop": _random_input_tensor(N, 1, H, W, True, device),
+ "sequence_name": ["sequence"] * N,
+ "image_rgb": _random_input_tensor(N, 3, H, W, False, device),
+ }
+
+ # training foward pass
+ model.train()
+ train_preds = model(
+ **random_args,
+ evaluation_mode=EvaluationMode.TRAINING,
+ )
+ self.assertTrue(
+ train_preds["objective"].isfinite().item()
+ ) # check finiteness of the objective
+
+ if bw_test:
+ train_preds["objective"].backward()
+
+ if eval_test:
+ model.eval()
+ with torch.no_grad():
+ eval_preds = model(
+ **random_args,
+ evaluation_mode=EvaluationMode.EVALUATION,
+ )
+ self.assertEqual(
+ eval_preds["images_render"].shape,
+ (1, 3, model.render_image_height, model.render_image_width),
+ )
+
+ def test_idr(self):
+ # Forward pass of GenericModel with IDR.
+ device = torch.device("cuda:0")
+ args = get_default_args(GenericModel)
+ args.renderer_class_type = "SignedDistanceFunctionRenderer"
+ args.implicit_function_class_type = "IdrFeatureField"
+ args.implicit_function_IdrFeatureField_args.n_harmonic_functions_xyz = 6
+
+ model = GenericModel(**args)
+ model.to(device)
+
+ n_train_cameras = 2
+ R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
+ cameras = PerspectiveCameras(R=R, T=T, device=device)
+
+ defaulted_args = {
+ "depth_map": None,
+ "mask_crop": None,
+ "sequence_name": None,
+ }
+
+ target_image_rgb = torch.rand(
+ (n_train_cameras, 3, model.render_image_height, model.render_image_width),
+ device=device,
+ )
+ fg_probability = torch.rand(
+ (n_train_cameras, 1, model.render_image_height, model.render_image_width),
+ device=device,
+ )
+ train_preds = model(
+ camera=cameras,
+ evaluation_mode=EvaluationMode.TRAINING,
+ image_rgb=target_image_rgb,
+ fg_probability=fg_probability,
+ **defaulted_args,
+ )
+ self.assertGreater(train_preds["objective"].item(), 0)
+
+ def test_viewpool(self):
+ device = torch.device("cuda:0")
+ args = get_default_args(GenericModel)
+ args.view_pooler_enabled = True
+ args.image_feature_extractor_class_type = "ResNetFeatureExtractor"
+ args.image_feature_extractor_ResNetFeatureExtractor_args.add_masks = False
+ model = GenericModel(**args)
+ model.to(device)
+
+ n_train_cameras = 2
+ R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
+ cameras = PerspectiveCameras(R=R, T=T, device=device)
+
+ defaulted_args = {
+ "fg_probability": None,
+ "depth_map": None,
+ "mask_crop": None,
+ }
+
+ target_image_rgb = torch.rand(
+ (n_train_cameras, 3, model.render_image_height, model.render_image_width),
+ device=device,
+ )
+ train_preds = model(
+ camera=cameras,
+ evaluation_mode=EvaluationMode.TRAINING,
+ image_rgb=target_image_rgb,
+ sequence_name=["a"] * n_train_cameras,
+ **defaulted_args,
+ )
+ self.assertGreater(train_preds["objective"].item(), 0)
+
+
+def _random_input_tensor(
+ N: int,
+ C: int,
+ H: int,
+ W: int,
+ is_binary: bool,
+ device: torch.device,
+) -> torch.Tensor:
+ T = torch.rand(N, C, H, W, device=device)
+ if is_binary:
+ T = (T > 0.5).float()
+ return T
+
+
+def _load_model_config_from_yaml(config_path, strict=True) -> DictConfig:
+ default_cfg = get_default_args(GenericModel)
+ cfg = _load_model_config_from_yaml_rec(default_cfg, config_path)
+ return cfg
+
+
+def _load_model_config_from_yaml_rec(cfg: DictConfig, config_path: str) -> DictConfig:
+ cfg_loaded = OmegaConf.load(config_path)
+ cfg_model_loaded = None
+ if "model_factory_ImplicitronModelFactory_args" in cfg_loaded:
+ factory_args = cfg_loaded.model_factory_ImplicitronModelFactory_args
+ if "model_GenericModel_args" in factory_args:
+ cfg_model_loaded = factory_args.model_GenericModel_args
+ defaults = cfg_loaded.pop("defaults", None)
+ if defaults is not None:
+ for default_name in defaults:
+ if default_name in ("_self_", "default_config"):
+ continue
+ default_name = os.path.splitext(default_name)[0]
+ defpath = os.path.join(os.path.dirname(config_path), default_name + ".yaml")
+ cfg = _load_model_config_from_yaml_rec(cfg, defpath)
+ if cfg_model_loaded is not None:
+ cfg = OmegaConf.merge(cfg, cfg_model_loaded)
+ elif cfg_model_loaded is not None:
+ cfg = OmegaConf.merge(cfg, cfg_model_loaded)
+ return cfg
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_frame_data_builder.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_frame_data_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..2881045511c10543a5c27ed6cfe7518df7c30e21
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_frame_data_builder.py
@@ -0,0 +1,263 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import contextlib
+import gzip
+import os
+import unittest
+from typing import List
+
+import numpy as np
+import torch
+
+from pytorch3d.implicitron.dataset import types
+from pytorch3d.implicitron.dataset.dataset_base import FrameData
+from pytorch3d.implicitron.dataset.frame_data import FrameDataBuilder
+from pytorch3d.implicitron.dataset.utils import (
+ get_bbox_from_mask,
+ load_16big_png_depth,
+ load_1bit_png_mask,
+ load_depth,
+ load_depth_mask,
+ load_image,
+ load_mask,
+ safe_as_tensor,
+ transpose_normalize_image,
+)
+from pytorch3d.implicitron.tools.config import get_default_args
+from pytorch3d.renderer.cameras import PerspectiveCameras
+
+from tests.common_testing import TestCaseMixin
+from tests.implicitron.common_resources import get_skateboard_data
+
+
+class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ category = "skateboard"
+ stack = contextlib.ExitStack()
+ self.dataset_root, self.path_manager = stack.enter_context(
+ get_skateboard_data()
+ )
+ self.addCleanup(stack.close)
+ self.image_height = 768
+ self.image_width = 512
+
+ self.frame_data_builder = FrameDataBuilder(
+ image_height=self.image_height,
+ image_width=self.image_width,
+ dataset_root=self.dataset_root,
+ path_manager=self.path_manager,
+ )
+
+ # loading single frame annotation of dataset (see JsonIndexDataset._load_frames())
+ frame_file = os.path.join(self.dataset_root, category, "frame_annotations.jgz")
+ local_file = self.path_manager.get_local_path(frame_file)
+ with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
+ frame_annots_list = types.load_dataclass(
+ zipfile, List[types.FrameAnnotation]
+ )
+ self.frame_annotation = frame_annots_list[0]
+
+ sequence_annotations_file = os.path.join(
+ self.dataset_root, category, "sequence_annotations.jgz"
+ )
+ local_file = self.path_manager.get_local_path(sequence_annotations_file)
+ with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
+ seq_annots_list = types.load_dataclass(
+ zipfile, List[types.SequenceAnnotation]
+ )
+ seq_annots = {entry.sequence_name: entry for entry in seq_annots_list}
+ self.seq_annotation = seq_annots[self.frame_annotation.sequence_name]
+
+ point_cloud = self.seq_annotation.point_cloud
+ self.frame_data = FrameData(
+ frame_number=safe_as_tensor(self.frame_annotation.frame_number, torch.long),
+ frame_timestamp=safe_as_tensor(
+ self.frame_annotation.frame_timestamp, torch.float
+ ),
+ sequence_name=self.frame_annotation.sequence_name,
+ sequence_category=self.seq_annotation.category,
+ camera_quality_score=safe_as_tensor(
+ self.seq_annotation.viewpoint_quality_score, torch.float
+ ),
+ point_cloud_quality_score=(
+ safe_as_tensor(point_cloud.quality_score, torch.float)
+ if point_cloud is not None
+ else None
+ ),
+ )
+
+ def test_frame_data_builder_args(self):
+ # test that FrameDataBuilder works with get_default_args
+ get_default_args(FrameDataBuilder)
+
+ def test_fix_point_cloud_path(self):
+ """Some files in Co3Dv2 have an accidental absolute path stored."""
+ original_path = "some_file_path"
+ modified_path = self.frame_data_builder._fix_point_cloud_path(original_path)
+ self.assertIn(original_path, modified_path)
+ self.assertIn(self.frame_data_builder.dataset_root, modified_path)
+
+ def test_load_and_adjust_frame_data(self):
+ self.frame_data.image_size_hw = safe_as_tensor(
+ self.frame_annotation.image.size, torch.long
+ )
+ self.frame_data.effective_image_size_hw = self.frame_data.image_size_hw
+
+ fg_mask_np, mask_path = self.frame_data_builder._load_fg_probability(
+ self.frame_annotation
+ )
+ self.frame_data.mask_path = mask_path
+ self.frame_data.fg_probability = safe_as_tensor(fg_mask_np, torch.float)
+ mask_thr = self.frame_data_builder.box_crop_mask_thr
+ bbox_xywh = get_bbox_from_mask(fg_mask_np, mask_thr)
+ self.frame_data.bbox_xywh = safe_as_tensor(bbox_xywh, torch.long)
+
+ self.assertIsNotNone(self.frame_data.mask_path)
+ self.assertTrue(torch.is_tensor(self.frame_data.fg_probability))
+ self.assertTrue(torch.is_tensor(self.frame_data.bbox_xywh))
+ # assert bboxes shape
+ self.assertEqual(self.frame_data.bbox_xywh.shape, torch.Size([4]))
+
+ image_path = os.path.join(
+ self.frame_data_builder.dataset_root, self.frame_annotation.image.path
+ )
+ image_np = load_image(self.frame_data_builder._local_path(image_path))
+ self.assertIsInstance(image_np, np.ndarray)
+ self.frame_data.image_rgb = self.frame_data_builder._postprocess_image(
+ image_np, self.frame_annotation.image.size, self.frame_data.fg_probability
+ )
+ self.assertIsInstance(self.frame_data.image_rgb, torch.Tensor)
+
+ (
+ self.frame_data.depth_map,
+ depth_path,
+ self.frame_data.depth_mask,
+ ) = self.frame_data_builder._load_mask_depth(
+ self.frame_annotation,
+ self.frame_data.fg_probability,
+ )
+ self.assertTrue(torch.is_tensor(self.frame_data.depth_map))
+ self.assertIsNotNone(depth_path)
+ self.assertTrue(torch.is_tensor(self.frame_data.depth_mask))
+
+ new_size = (self.image_height, self.image_width)
+
+ if self.frame_data_builder.box_crop:
+ self.frame_data.crop_by_metadata_bbox_(
+ self.frame_data_builder.box_crop_context,
+ )
+
+ # assert image and mask shapes after resize
+ self.frame_data.resize_frame_(
+ new_size_hw=torch.tensor(new_size, dtype=torch.long),
+ )
+ self.assertEqual(
+ self.frame_data.mask_crop.shape,
+ torch.Size([1, self.image_height, self.image_width]),
+ )
+ self.assertEqual(
+ self.frame_data.image_rgb.shape,
+ torch.Size([3, self.image_height, self.image_width]),
+ )
+ self.assertEqual(
+ self.frame_data.mask_crop.shape,
+ torch.Size([1, self.image_height, self.image_width]),
+ )
+ self.assertEqual(
+ self.frame_data.fg_probability.shape,
+ torch.Size([1, self.image_height, self.image_width]),
+ )
+ self.assertEqual(
+ self.frame_data.depth_map.shape,
+ torch.Size([1, self.image_height, self.image_width]),
+ )
+ self.assertEqual(
+ self.frame_data.depth_mask.shape,
+ torch.Size([1, self.image_height, self.image_width]),
+ )
+ self.frame_data.camera = self.frame_data_builder._get_pytorch3d_camera(
+ self.frame_annotation,
+ )
+ self.assertEqual(type(self.frame_data.camera), PerspectiveCameras)
+
+ def test_transpose_normalize_image(self):
+ def inverse_transpose_normalize_image(image: np.ndarray) -> np.ndarray:
+ im = image * 255.0
+ return im.transpose((1, 2, 0)).astype(np.uint8)
+
+ # Test 2D input
+ input_image = np.array(
+ [[10, 20, 30], [40, 50, 60], [70, 80, 90]], dtype=np.uint8
+ )
+ expected_input = inverse_transpose_normalize_image(
+ transpose_normalize_image(input_image)
+ )
+ self.assertClose(input_image[..., None], expected_input)
+
+ # Test 3D input
+ input_image = np.array(
+ [
+ [[10, 20, 30], [40, 50, 60], [70, 80, 90]],
+ [[100, 110, 120], [130, 140, 150], [160, 170, 180]],
+ [[190, 200, 210], [220, 230, 240], [250, 255, 255]],
+ ],
+ dtype=np.uint8,
+ )
+ expected_input = inverse_transpose_normalize_image(
+ transpose_normalize_image(input_image)
+ )
+ self.assertClose(input_image, expected_input)
+
+ def test_load_image(self):
+ path = os.path.join(self.dataset_root, self.frame_annotation.image.path)
+ local_path = self.path_manager.get_local_path(path)
+ image = load_image(local_path)
+ self.assertEqual(image.dtype, np.float32)
+ self.assertLessEqual(np.max(image), 1.0)
+ self.assertGreaterEqual(np.min(image), 0.0)
+
+ def test_load_mask(self):
+ path = os.path.join(self.dataset_root, self.frame_annotation.mask.path)
+ path = self.path_manager.get_local_path(path)
+ mask = load_mask(path)
+ self.assertEqual(mask.dtype, np.float32)
+ self.assertLessEqual(np.max(mask), 1.0)
+ self.assertGreaterEqual(np.min(mask), 0.0)
+
+ def test_load_depth(self):
+ path = os.path.join(self.dataset_root, self.frame_annotation.depth.path)
+ path = self.path_manager.get_local_path(path)
+ depth_map = load_depth(path, self.frame_annotation.depth.scale_adjustment)
+ self.assertEqual(depth_map.dtype, np.float32)
+ self.assertEqual(len(depth_map.shape), 3)
+
+ def test_load_16big_png_depth(self):
+ path = os.path.join(self.dataset_root, self.frame_annotation.depth.path)
+ path = self.path_manager.get_local_path(path)
+ depth_map = load_16big_png_depth(path)
+ self.assertEqual(depth_map.dtype, np.float32)
+ self.assertEqual(len(depth_map.shape), 2)
+
+ def test_load_1bit_png_mask(self):
+ mask_path = os.path.join(
+ self.dataset_root, self.frame_annotation.depth.mask_path
+ )
+ mask_path = self.path_manager.get_local_path(mask_path)
+ mask = load_1bit_png_mask(mask_path)
+ self.assertEqual(mask.dtype, np.float32)
+ self.assertEqual(len(mask.shape), 2)
+
+ def test_load_depth_mask(self):
+ mask_path = os.path.join(
+ self.dataset_root, self.frame_annotation.depth.mask_path
+ )
+ mask_path = self.path_manager.get_local_path(mask_path)
+ mask = load_depth_mask(mask_path)
+ self.assertEqual(mask.dtype, np.float32)
+ self.assertEqual(len(mask.shape), 3)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_implicit_function_neural_radiance_field.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_implicit_function_neural_radiance_field.py
new file mode 100644
index 0000000000000000000000000000000000000000..f31dfcd11f0e481f58341d69448a5bda3d82ed55
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_implicit_function_neural_radiance_field.py
@@ -0,0 +1,66 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import torch
+from pytorch3d.implicitron.models.implicit_function.base import ImplicitronRayBundle
+from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import (
+ NeuralRadianceFieldImplicitFunction,
+)
+
+
+class TestNeuralRadianceFieldImplicitFunction(unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_forward_with_integrated_positionial_embedding(self):
+ shape = [2, 4, 4]
+ ray_bundle = ImplicitronRayBundle(
+ origins=torch.randn(*shape, 3),
+ directions=torch.randn(*shape, 3),
+ bins=torch.randn(*shape, 6 + 1),
+ lengths=torch.randn(*shape, 6),
+ pixel_radii_2d=torch.randn(*shape, 1),
+ xys=None,
+ )
+ model = NeuralRadianceFieldImplicitFunction(
+ n_hidden_neurons_dir=32, use_integrated_positional_encoding=True
+ )
+ raw_densities, ray_colors, _ = model(ray_bundle=ray_bundle)
+
+ self.assertEqual(raw_densities.shape, (*shape, ray_bundle.lengths.shape[-1], 1))
+ self.assertEqual(ray_colors.shape, (*shape, ray_bundle.lengths.shape[-1], 3))
+
+ def test_forward_with_integrated_positionial_embedding_raise_exception(self):
+ shape = [2, 4, 4]
+ ray_bundle = ImplicitronRayBundle(
+ origins=torch.randn(*shape, 3),
+ directions=torch.randn(*shape, 3),
+ bins=None,
+ lengths=torch.randn(*shape, 6),
+ pixel_radii_2d=torch.randn(*shape, 1),
+ xys=None,
+ )
+ model = NeuralRadianceFieldImplicitFunction(
+ n_hidden_neurons_dir=32, use_integrated_positional_encoding=True
+ )
+ with self.assertRaises(ValueError):
+ _ = model(ray_bundle=ray_bundle)
+
+ def test_forward(self):
+ shape = [2, 4, 4]
+ ray_bundle = ImplicitronRayBundle(
+ origins=torch.randn(*shape, 3),
+ directions=torch.randn(*shape, 3),
+ lengths=torch.randn(*shape, 6),
+ pixel_radii_2d=torch.randn(*shape, 1),
+ xys=None,
+ )
+ model = NeuralRadianceFieldImplicitFunction(n_hidden_neurons_dir=32)
+ raw_densities, ray_colors, _ = model(ray_bundle=ray_bundle)
+ self.assertEqual(raw_densities.shape, (*shape, 6, 1))
+ self.assertEqual(ray_colors.shape, (*shape, 6, 3))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_json_index_dataset_provider_v2.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_json_index_dataset_provider_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..456b44fad8d34ff756de9b922df09d54161d7c9a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_json_index_dataset_provider_v2.py
@@ -0,0 +1,250 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import json
+import os
+import random
+import tempfile
+import unittest
+from typing import List
+
+import numpy as np
+
+import torch
+import torchvision
+from PIL import Image
+from pytorch3d.implicitron.dataset.frame_data import FrameData
+from pytorch3d.implicitron.dataset.json_index_dataset_map_provider_v2 import (
+ JsonIndexDatasetMapProviderV2,
+)
+from pytorch3d.implicitron.dataset.types import (
+ dump_dataclass_jgzip,
+ FrameAnnotation,
+ ImageAnnotation,
+ MaskAnnotation,
+ SequenceAnnotation,
+)
+from pytorch3d.implicitron.tools.config import expand_args_fields
+from tests.common_testing import interactive_testing_requested
+
+from .common_resources import CO3DV2_MANIFOLD_PATH
+
+
+class TestJsonIndexDatasetProviderV2(unittest.TestCase):
+ def test_random_dataset(self):
+ # store random frame annotations
+ expand_args_fields(JsonIndexDatasetMapProviderV2)
+ categories = ["A", "B"]
+ subset_name = "test"
+ eval_batch_size = 5
+ n_frames = 8 * 3
+ n_sequences = 5
+ n_eval_batches = 10
+ with tempfile.TemporaryDirectory() as tmpd:
+ _make_random_json_dataset_map_provider_v2_data(
+ tmpd,
+ categories,
+ eval_batch_size=eval_batch_size,
+ n_frames=n_frames,
+ n_sequences=n_sequences,
+ n_eval_batches=n_eval_batches,
+ )
+ for n_known_frames_for_test in [0, 2]:
+ dataset_providers = {
+ category: JsonIndexDatasetMapProviderV2(
+ category=category,
+ subset_name="test",
+ dataset_root=tmpd,
+ n_known_frames_for_test=n_known_frames_for_test,
+ )
+ for category in [*categories, ",".join(sorted(categories))]
+ }
+ for category, dataset_provider in dataset_providers.items():
+ dataset_map = dataset_provider.get_dataset_map()
+ for set_ in ["train", "val", "test"]:
+ dataset = getattr(dataset_map, set_)
+
+ cat2seq = dataset.category_to_sequence_names()
+ self.assertEqual(",".join(sorted(cat2seq.keys())), category)
+
+ if not (n_known_frames_for_test != 0 and set_ == "test"):
+ # check the lengths only in case we do not have the
+ # n_known_frames_for_test set
+ expected_dataset_len = n_frames * n_sequences // 3
+ if "," in category:
+ # multicategory json index dataset, sum the lengths of
+ # category-specific ones
+ expected_dataset_len = sum(
+ len(
+ getattr(
+ dataset_providers[c].get_dataset_map(), set_
+ )
+ )
+ for c in categories
+ )
+ self.assertEqual(
+ sum(len(s) for s in cat2seq.values()),
+ n_sequences * len(categories),
+ )
+ self.assertEqual(len(cat2seq), len(categories))
+ else:
+ self.assertEqual(
+ len(cat2seq[category]),
+ n_sequences,
+ )
+ self.assertEqual(len(cat2seq), 1)
+ self.assertEqual(len(dataset), expected_dataset_len)
+
+ if set_ == "test":
+ # check the number of eval batches
+ expected_n_eval_batches = n_eval_batches
+ if "," in category:
+ expected_n_eval_batches *= len(categories)
+ self.assertTrue(
+ len(dataset.get_eval_batches())
+ == expected_n_eval_batches
+ )
+ if set_ in ["train", "val"]:
+ dataloader = torch.utils.data.DataLoader(
+ getattr(dataset_map, set_),
+ batch_size=3,
+ shuffle=True,
+ collate_fn=FrameData.collate,
+ )
+ else:
+ dataloader = torch.utils.data.DataLoader(
+ getattr(dataset_map, set_),
+ batch_sampler=dataset_map[set_].get_eval_batches(),
+ collate_fn=FrameData.collate,
+ )
+ for batch in dataloader:
+ if set_ == "test":
+ self.assertTrue(
+ batch.image_rgb.shape[0]
+ == n_known_frames_for_test + eval_batch_size
+ )
+ category_to_subset_list = (
+ dataset_provider.get_category_to_subset_name_list()
+ )
+ category_to_subset_list_ = {c: [subset_name] for c in categories}
+
+ self.assertTrue(category_to_subset_list == category_to_subset_list_)
+
+
+def _make_random_json_dataset_map_provider_v2_data(
+ root: str,
+ categories: List[str],
+ n_frames: int = 8,
+ n_sequences: int = 5,
+ n_eval_batches: int = 10,
+ H: int = 50,
+ W: int = 30,
+ subset_name: str = "test",
+ eval_batch_size: int = 5,
+):
+ os.makedirs(root, exist_ok=True)
+ category_to_subset_list = {}
+ for category in categories:
+ frame_annotations = []
+ sequence_annotations = []
+ frame_index = []
+ for seq_i in range(n_sequences):
+ seq_name = category + str(seq_i)
+ for i in range(n_frames):
+ # generate and store image
+ imdir = os.path.join(root, category, seq_name, "images")
+ os.makedirs(imdir, exist_ok=True)
+ img_path = os.path.join(imdir, f"frame{i:05d}.jpg")
+ img = torch.rand(3, H, W)
+ torchvision.utils.save_image(img, img_path)
+
+ # generate and store mask
+ maskdir = os.path.join(root, category, seq_name, "masks")
+ os.makedirs(maskdir, exist_ok=True)
+ mask_path = os.path.join(maskdir, f"frame{i:05d}.png")
+ mask = np.zeros((H, W))
+ mask[H // 2 :, W // 2 :] = 1
+ Image.fromarray(
+ (mask * 255.0).astype(np.uint8),
+ mode="L",
+ ).convert(
+ "L"
+ ).save(mask_path)
+
+ fa = FrameAnnotation(
+ sequence_name=seq_name,
+ frame_number=i,
+ frame_timestamp=float(i),
+ image=ImageAnnotation(
+ path=img_path.replace(os.path.normpath(root) + "/", ""),
+ size=list(img.shape[-2:]),
+ ),
+ mask=MaskAnnotation(
+ path=mask_path.replace(os.path.normpath(root) + "/", ""),
+ mass=mask.sum().item(),
+ ),
+ )
+ frame_annotations.append(fa)
+ frame_index.append((seq_name, i, fa.image.path))
+
+ sequence_annotations.append(
+ SequenceAnnotation(
+ sequence_name=seq_name,
+ category=category,
+ )
+ )
+
+ dump_dataclass_jgzip(
+ os.path.join(root, category, "frame_annotations.jgz"),
+ frame_annotations,
+ )
+ dump_dataclass_jgzip(
+ os.path.join(root, category, "sequence_annotations.jgz"),
+ sequence_annotations,
+ )
+
+ test_frame_index = frame_index[2::3]
+
+ set_list = {
+ "train": frame_index[0::3],
+ "val": frame_index[1::3],
+ "test": test_frame_index,
+ }
+ set_lists_dir = os.path.join(root, category, "set_lists")
+ os.makedirs(set_lists_dir, exist_ok=True)
+ set_list_file = os.path.join(set_lists_dir, f"set_lists_{subset_name}.json")
+ with open(set_list_file, "w") as f:
+ json.dump(set_list, f)
+
+ eval_batches = [
+ random.sample(test_frame_index, eval_batch_size)
+ for _ in range(n_eval_batches)
+ ]
+
+ eval_b_dir = os.path.join(root, category, "eval_batches")
+ os.makedirs(eval_b_dir, exist_ok=True)
+ eval_b_file = os.path.join(eval_b_dir, f"eval_batches_{subset_name}.json")
+ with open(eval_b_file, "w") as f:
+ json.dump(eval_batches, f)
+
+ category_to_subset_list[category] = [subset_name]
+
+ with open(os.path.join(root, "category_to_subset_name_list.json"), "w") as f:
+ json.dump(category_to_subset_list, f)
+
+
+class TestCo3dv2(unittest.TestCase):
+ def test_simple(self):
+ if not interactive_testing_requested():
+ return
+ dataset_provider = JsonIndexDatasetMapProviderV2(
+ category="apple",
+ subset_name="manyview_dev_0",
+ dataset_root=CO3DV2_MANIFOLD_PATH,
+ dataset_JsonIndexDataset_args={"load_point_clouds": True},
+ )
+ dataset_provider.get_dataset_map().train[0]
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_model_visualize.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_model_visualize.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c9b1f9a5343c6b126f198cf2f90a3724a168748
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_model_visualize.py
@@ -0,0 +1,155 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import contextlib
+import math
+import os
+import unittest
+from typing import Tuple
+
+import torch
+from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
+from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
+
+from pytorch3d.implicitron.models.visualization.render_flyaround import render_flyaround
+from pytorch3d.implicitron.tools.config import expand_args_fields
+from pytorch3d.implicitron.tools.point_cloud_utils import render_point_cloud_pytorch3d
+from pytorch3d.renderer.cameras import CamerasBase
+from tests.common_testing import interactive_testing_requested
+from visdom import Visdom
+
+from .common_resources import get_skateboard_data
+
+
+class TestModelVisualize(unittest.TestCase):
+ def test_flyaround_one_sequence(
+ self,
+ image_size: int = 256,
+ ):
+ if not interactive_testing_requested():
+ return
+ category = "skateboard"
+ stack = contextlib.ExitStack()
+ dataset_root, path_manager = stack.enter_context(get_skateboard_data())
+ self.addCleanup(stack.close)
+ frame_file = os.path.join(dataset_root, category, "frame_annotations.jgz")
+ sequence_file = os.path.join(dataset_root, category, "sequence_annotations.jgz")
+ subset_lists_file = os.path.join(dataset_root, category, "set_lists.json")
+ expand_args_fields(JsonIndexDataset)
+ train_dataset = JsonIndexDataset(
+ frame_annotations_file=frame_file,
+ sequence_annotations_file=sequence_file,
+ subset_lists_file=subset_lists_file,
+ dataset_root=dataset_root,
+ image_height=image_size,
+ image_width=image_size,
+ box_crop=True,
+ load_point_clouds=True,
+ path_manager=path_manager,
+ subsets=[
+ "train_known",
+ ],
+ )
+
+ # select few sequences to visualize
+ sequence_names = list(train_dataset.seq_annots.keys())
+
+ # select the first sequence name
+ show_sequence_name = sequence_names[0]
+
+ output_dir = os.path.split(os.path.abspath(__file__))[0]
+
+ visdom_show_preds = Visdom().check_connection()
+
+ for load_dataset_pointcloud in [True, False]:
+
+ model = _PointcloudRenderingModel(
+ train_dataset,
+ show_sequence_name,
+ device="cuda:0",
+ load_dataset_pointcloud=load_dataset_pointcloud,
+ )
+
+ video_path = os.path.join(
+ output_dir,
+ f"load_pcl_{load_dataset_pointcloud}",
+ )
+
+ os.makedirs(output_dir, exist_ok=True)
+
+ for output_video_frames_dir in [None, video_path]:
+ render_flyaround(
+ train_dataset,
+ show_sequence_name,
+ model,
+ video_path,
+ n_flyaround_poses=10,
+ fps=5,
+ max_angle=2 * math.pi,
+ trajectory_type="circular_lsq_fit",
+ trajectory_scale=1.1,
+ scene_center=(0.0, 0.0, 0.0),
+ up=(0.0, 1.0, 0.0),
+ traj_offset=1.0,
+ n_source_views=1,
+ visdom_show_preds=visdom_show_preds,
+ visdom_environment="test_model_visalize",
+ visdom_server="http://127.0.0.1",
+ visdom_port=8097,
+ num_workers=10,
+ seed=None,
+ video_resize=None,
+ visualize_preds_keys=[
+ "images_render",
+ "depths_render",
+ "masks_render",
+ "_all_source_images",
+ ],
+ output_video_frames_dir=output_video_frames_dir,
+ )
+
+
+class _PointcloudRenderingModel(torch.nn.Module):
+ def __init__(
+ self,
+ train_dataset: JsonIndexDataset,
+ sequence_name: str,
+ render_size: Tuple[int, int] = (400, 400),
+ device=None,
+ load_dataset_pointcloud: bool = False,
+ max_frames: int = 30,
+ num_workers: int = 10,
+ ):
+ super().__init__()
+ self._render_size = render_size
+ point_cloud, _ = get_implicitron_sequence_pointcloud(
+ train_dataset,
+ sequence_name=sequence_name,
+ mask_points=True,
+ max_frames=max_frames,
+ num_workers=num_workers,
+ load_dataset_point_cloud=load_dataset_pointcloud,
+ )
+ self._point_cloud = point_cloud.to(device)
+
+ def forward(
+ self,
+ camera: CamerasBase,
+ **kwargs,
+ ):
+ image_render, mask_render, depth_render = render_point_cloud_pytorch3d(
+ camera[0],
+ self._point_cloud,
+ render_size=self._render_size,
+ point_radius=1e-2,
+ topk=10,
+ bg_color=0.0,
+ )
+ return {
+ "images_render": image_render.clamp(0.0, 1.0),
+ "masks_render": mask_render,
+ "depths_render": depth_render,
+ }
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_base.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..cafd06a8d23b6fdf9f36be821d7f3ea508540bd6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_base.py
@@ -0,0 +1,285 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+
+import numpy as np
+
+import torch
+
+from pytorch3d.implicitron.models.renderer.base import (
+ approximate_conical_frustum_as_gaussians,
+ compute_3d_diagonal_covariance_gaussian,
+ conical_frustum_to_gaussian,
+ ImplicitronRayBundle,
+)
+from pytorch3d.implicitron.models.renderer.ray_sampler import AbstractMaskRaySampler
+
+from tests.common_testing import TestCaseMixin
+
+
+class TestRendererBase(TestCaseMixin, unittest.TestCase):
+ def test_implicitron_from_bins(self) -> None:
+ bins = torch.randn(2, 3, 4, 5)
+ ray_bundle = ImplicitronRayBundle(
+ origins=None,
+ directions=None,
+ lengths=None,
+ xys=None,
+ bins=bins,
+ )
+ self.assertClose(ray_bundle.lengths, 0.5 * (bins[..., 1:] + bins[..., :-1]))
+ self.assertClose(ray_bundle.bins, bins)
+
+ def test_implicitron_raise_value_error_bins_is_set_and_try_to_set_lengths(
+ self,
+ ) -> None:
+ ray_bundle = ImplicitronRayBundle(
+ origins=torch.rand(2, 3, 4, 3),
+ directions=torch.rand(2, 3, 4, 3),
+ lengths=None,
+ xys=torch.rand(2, 3, 4, 2),
+ bins=torch.rand(2, 3, 4, 14),
+ )
+ with self.assertRaisesRegex(
+ ValueError,
+ "If the bins attribute is not None you cannot set the lengths attribute.",
+ ):
+ ray_bundle.lengths = torch.empty(2)
+
+ def test_implicitron_raise_value_error_if_bins_dim_equal_1(self) -> None:
+ with self.assertRaisesRegex(
+ ValueError, "The last dim of bins must be at least superior or equal to 2."
+ ):
+ ImplicitronRayBundle(
+ origins=torch.rand(2, 3, 4, 3),
+ directions=torch.rand(2, 3, 4, 3),
+ lengths=None,
+ xys=torch.rand(2, 3, 4, 2),
+ bins=torch.rand(2, 3, 4, 1),
+ )
+
+ def test_implicitron_raise_value_error_if_neither_bins_or_lengths_provided(
+ self,
+ ) -> None:
+ with self.assertRaisesRegex(
+ ValueError,
+ "Please set either bins or lengths to initialize an ImplicitronRayBundle.",
+ ):
+ ImplicitronRayBundle(
+ origins=torch.rand(2, 3, 4, 3),
+ directions=torch.rand(2, 3, 4, 3),
+ lengths=None,
+ xys=torch.rand(2, 3, 4, 2),
+ bins=None,
+ )
+
+ def test_conical_frustum_to_gaussian(self) -> None:
+ origins = torch.zeros(3, 3, 3)
+ directions = torch.tensor(
+ [
+ [[0, 0, 0], [1, 0, 0], [3, 0, 0]],
+ [[0, 0.25, 0], [1, 0.25, 0], [3, 0.25, 0]],
+ [[0, 1, 0], [1, 1, 0], [3, 1, 0]],
+ ]
+ )
+ bins = torch.tensor(
+ [
+ [[0.5, 1.5], [0.3, 0.7], [0.3, 0.7]],
+ [[0.5, 1.5], [0.3, 0.7], [0.3, 0.7]],
+ [[0.5, 1.5], [0.3, 0.7], [0.3, 0.7]],
+ ]
+ )
+ # see test_compute_pixel_radii_from_ray_direction
+ radii = torch.tensor(
+ [
+ [1.25, 2.25, 2.25],
+ [1.75, 2.75, 2.75],
+ [1.75, 2.75, 2.75],
+ ]
+ )
+ radii = radii[..., None] / 12**0.5
+
+ # The expected mean and diagonal covariance have been computed
+ # by hand from the official code of MipNerf.
+ # https://github.com/google/mipnerf/blob/84c969e0a623edd183b75693aed72a7e7c22902d/internal/mip.py#L125
+ # mean, cov_diag = cast_rays(length, origins, directions, radii, 'cone', diag=True)
+
+ expected_mean = torch.tensor(
+ [
+ [
+ [[0.0, 0.0, 0.0]],
+ [[0.5506329, 0.0, 0.0]],
+ [[1.6518986, 0.0, 0.0]],
+ ],
+ [
+ [[0.0, 0.28846154, 0.0]],
+ [[0.5506329, 0.13765822, 0.0]],
+ [[1.6518986, 0.13765822, 0.0]],
+ ],
+ [
+ [[0.0, 1.1538461, 0.0]],
+ [[0.5506329, 0.5506329, 0.0]],
+ [[1.6518986, 0.5506329, 0.0]],
+ ],
+ ]
+ )
+ expected_diag_cov = torch.tensor(
+ [
+ [
+ [[0.04544772, 0.04544772, 0.04544772]],
+ [[0.01130973, 0.03317059, 0.03317059]],
+ [[0.10178753, 0.03317059, 0.03317059]],
+ ],
+ [
+ [[0.08907752, 0.00404956, 0.08907752]],
+ [[0.0142245, 0.04734321, 0.04955113]],
+ [[0.10212927, 0.04991625, 0.04955113]],
+ ],
+ [
+ [[0.08907752, 0.0647929, 0.08907752]],
+ [[0.03608529, 0.03608529, 0.04955113]],
+ [[0.10674264, 0.05590574, 0.04955113]],
+ ],
+ ]
+ )
+
+ ray = ImplicitronRayBundle(
+ origins=origins,
+ directions=directions,
+ bins=bins,
+ lengths=None,
+ pixel_radii_2d=radii,
+ xys=None,
+ )
+ mean, diag_cov = conical_frustum_to_gaussian(ray)
+
+ self.assertClose(mean, expected_mean)
+ self.assertClose(diag_cov, expected_diag_cov)
+
+ def test_scale_conical_frustum_to_gaussian(self) -> None:
+ origins = torch.zeros(2, 2, 3)
+ directions = torch.Tensor(
+ [
+ [[0, 1, 0], [0, 0, 1]],
+ [[0, 1, 0], [0, 0, 1]],
+ ]
+ )
+ bins = torch.Tensor(
+ [
+ [[0.5, 1.5], [0.3, 0.7]],
+ [[0.5, 1.5], [0.3, 0.7]],
+ ]
+ )
+ radii = torch.ones(2, 2, 1)
+
+ ray = ImplicitronRayBundle(
+ origins=origins,
+ directions=directions,
+ bins=bins,
+ pixel_radii_2d=radii,
+ lengths=None,
+ xys=None,
+ )
+
+ mean, diag_cov = conical_frustum_to_gaussian(ray)
+
+ scaling_factor = 2.5
+ ray = ImplicitronRayBundle(
+ origins=origins,
+ directions=directions,
+ bins=bins * scaling_factor,
+ pixel_radii_2d=radii,
+ lengths=None,
+ xys=None,
+ )
+ mean_scaled, diag_cov_scaled = conical_frustum_to_gaussian(ray)
+ np.testing.assert_allclose(mean * scaling_factor, mean_scaled)
+ np.testing.assert_allclose(
+ diag_cov * scaling_factor**2, diag_cov_scaled, atol=1e-6
+ )
+
+ def test_approximate_conical_frustum_as_gaussian(self) -> None:
+ """Ensure that the computation modularity in our function is well done."""
+ bins = torch.Tensor([[0.5, 1.5], [0.3, 0.7]])
+ radii = torch.Tensor([[1.0], [1.0]])
+ t_mean, t_var, r_var = approximate_conical_frustum_as_gaussians(bins, radii)
+
+ self.assertEqual(t_mean.shape, (2, 1))
+ self.assertEqual(t_var.shape, (2, 1))
+ self.assertEqual(r_var.shape, (2, 1))
+
+ mu = np.array([[1.0], [0.5]])
+ delta = np.array([[0.5], [0.2]])
+
+ np.testing.assert_allclose(
+ mu + (2 * mu * delta**2) / (3 * mu**2 + delta**2), t_mean.numpy()
+ )
+ np.testing.assert_allclose(
+ (delta**2) / 3
+ - (4 / 15)
+ * ((delta**4 * (12 * mu**2 - delta**2)) / (3 * mu**2 + delta**2) ** 2),
+ t_var.numpy(),
+ )
+ np.testing.assert_allclose(
+ radii**2
+ * (
+ (mu**2) / 4
+ + (5 / 12) * delta**2
+ - 4 / 15 * (delta**4) / (3 * mu**2 + delta**2)
+ ),
+ r_var.numpy(),
+ )
+
+ def test_compute_3d_diagonal_covariance_gaussian(self) -> None:
+ ray_directions = torch.Tensor([[0, 0, 1]])
+ t_var = torch.Tensor([0.5, 0.5, 1])
+ r_var = torch.Tensor([0.6, 0.3, 0.4])
+ expected_diag_cov = np.array(
+ [
+ [
+ # t_cov_diag + xy_cov_diag
+ [0.0 + 0.6, 0.0 + 0.6, 0.5 + 0.0],
+ [0.0 + 0.3, 0.0 + 0.3, 0.5 + 0.0],
+ [0.0 + 0.4, 0.0 + 0.4, 1.0 + 0.0],
+ ]
+ ]
+ )
+ diag_cov = compute_3d_diagonal_covariance_gaussian(ray_directions, t_var, r_var)
+ np.testing.assert_allclose(diag_cov.numpy(), expected_diag_cov)
+
+ def test_conical_frustum_to_gaussian_raise_valueerror(self) -> None:
+ lengths = torch.linspace(0, 1, steps=6)
+ directions = torch.tensor([0, 0, 1])
+ origins = torch.tensor([1, 1, 1])
+ ray = ImplicitronRayBundle(
+ origins=origins, directions=directions, lengths=lengths, xys=None
+ )
+
+ expected_error_message = (
+ "RayBundle pixel_radii_2d or bins have not been provided."
+ " Look at pytorch3d.renderer.implicit.renderer.ray_sampler::"
+ "AbstractMaskRaySampler to see how to compute them. Have you forgot to set"
+ "`cast_ray_bundle_as_cone` to True?"
+ )
+
+ with self.assertRaisesRegex(ValueError, expected_error_message):
+ _ = conical_frustum_to_gaussian(ray)
+
+ # Ensure message is coherent with AbstractMaskRaySampler
+ class FakeRaySampler(AbstractMaskRaySampler):
+ def _get_min_max_depth_bounds(self, *args):
+ return None
+
+ message_assertion = (
+ "If cast_ray_bundle_as_cone has been removed please update the doc"
+ "conical_frustum_to_gaussian"
+ )
+ self.assertIsNotNone(
+ getattr(FakeRaySampler(), "cast_ray_bundle_as_cone", None),
+ message_assertion,
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_ray_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_ray_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..17c1d1326a1911e7fbeff5bc636962fb13437b13
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_models_renderer_ray_sampler.py
@@ -0,0 +1,290 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+from itertools import product
+from typing import Tuple
+
+from unittest.mock import patch
+
+import torch
+from pytorch3d.common.compat import meshgrid_ij
+from pytorch3d.implicitron.models.renderer.base import EvaluationMode
+from pytorch3d.implicitron.models.renderer.ray_sampler import (
+ AdaptiveRaySampler,
+ compute_radii,
+ NearFarRaySampler,
+)
+
+from pytorch3d.renderer.cameras import (
+ CamerasBase,
+ FoVOrthographicCameras,
+ FoVPerspectiveCameras,
+ OrthographicCameras,
+ PerspectiveCameras,
+)
+from pytorch3d.renderer.implicit.utils import HeterogeneousRayBundle
+from tests.common_camera_utils import init_random_cameras
+
+from tests.common_testing import TestCaseMixin
+
+CAMERA_TYPES = (
+ FoVPerspectiveCameras,
+ FoVOrthographicCameras,
+ OrthographicCameras,
+ PerspectiveCameras,
+)
+
+
+def unproject_xy_grid_from_ndc_to_world_coord(
+ cameras: CamerasBase, xy_grid: torch.Tensor
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+
+ Unproject a xy_grid from NDC coordinates to world coordinates.
+
+ Args:
+ cameras: CamerasBase.
+ xy_grid: A tensor of shape `(..., H*W, 2)` representing the
+ x, y coords.
+
+ Returns:
+ A tensor of shape `(..., H*W, 3)` representing the
+ """
+
+ batch_size = xy_grid.shape[0]
+ n_rays_per_image = xy_grid.shape[1:-1].numel()
+ xy = xy_grid.view(batch_size, -1, 2)
+ xyz = torch.cat([xy, xy_grid.new_ones(batch_size, n_rays_per_image, 1)], dim=-1)
+ plane_at_depth1 = cameras.unproject_points(xyz, from_ndc=True)
+ return plane_at_depth1.view(*xy_grid.shape[:-1], 3)
+
+
+class TestRaysampler(TestCaseMixin, unittest.TestCase):
+ def test_ndc_raysampler_n_ray_total_is_none(self):
+ sampler = NearFarRaySampler()
+ message = (
+ "If you introduce the support of `n_rays_total` for {0}, please handle the "
+ "packing and unpacking logic for the radii and lengths computation."
+ )
+ self.assertIsNone(
+ sampler._training_raysampler._n_rays_total, message.format(type(sampler))
+ )
+ self.assertIsNone(
+ sampler._evaluation_raysampler._n_rays_total, message.format(type(sampler))
+ )
+
+ sampler = AdaptiveRaySampler()
+ self.assertIsNone(
+ sampler._training_raysampler._n_rays_total, message.format(type(sampler))
+ )
+ self.assertIsNone(
+ sampler._evaluation_raysampler._n_rays_total, message.format(type(sampler))
+ )
+
+ def test_catch_heterogeneous_exception(self):
+ cameras = init_random_cameras(FoVPerspectiveCameras, 1, random_z=True)
+
+ class FakeSampler:
+ def __init__(self):
+ self.min_x, self.max_x = 1, 2
+ self.min_y, self.max_y = 1, 2
+
+ def __call__(self, **kwargs):
+ return HeterogeneousRayBundle(
+ torch.rand(3), torch.rand(3), torch.rand(3), torch.rand(1)
+ )
+
+ with patch(
+ "pytorch3d.implicitron.models.renderer.ray_sampler.NDCMultinomialRaysampler",
+ return_value=FakeSampler(),
+ ):
+ for sampler in [
+ AdaptiveRaySampler(cast_ray_bundle_as_cone=True),
+ NearFarRaySampler(cast_ray_bundle_as_cone=True),
+ ]:
+ with self.assertRaises(TypeError):
+ _ = sampler(cameras, EvaluationMode.TRAINING)
+ for sampler in [
+ AdaptiveRaySampler(cast_ray_bundle_as_cone=False),
+ NearFarRaySampler(cast_ray_bundle_as_cone=False),
+ ]:
+ _ = sampler(cameras, EvaluationMode.TRAINING)
+
+ def test_compute_radii(self):
+ batch_size = 1
+ image_height, image_width = 20, 10
+ min_y, max_y, min_x, max_x = -1.0, 1.0, -1.0, 1.0
+ y, x = meshgrid_ij(
+ torch.linspace(min_y, max_y, image_height, dtype=torch.float32),
+ torch.linspace(min_x, max_x, image_width, dtype=torch.float32),
+ )
+ xy_grid = torch.stack([x, y], dim=-1).view(-1, 2)
+ pixel_width = (max_x - min_x) / (image_width - 1)
+ pixel_height = (max_y - min_y) / (image_height - 1)
+
+ for cam_type in CAMERA_TYPES:
+ # init a batch of random cameras
+ cameras = init_random_cameras(cam_type, batch_size, random_z=True)
+ # This method allow us to compute the radii whithout having
+ # access to the full grid. Raysamplers during the training
+ # will sample random rays from the grid.
+ radii = compute_radii(
+ cameras, xy_grid, pixel_hw_ndc=(pixel_height, pixel_width)
+ )
+ plane_at_depth1 = unproject_xy_grid_from_ndc_to_world_coord(
+ cameras, xy_grid
+ )
+ # This method absolutely needs the full grid to work.
+ expected_radii = compute_pixel_radii_from_grid(
+ plane_at_depth1.reshape(1, image_height, image_width, 3)
+ )
+ self.assertClose(expected_radii.reshape(-1, 1), radii)
+
+ def test_forward(self):
+ n_rays_per_image = 16
+ image_height, image_width = 20, 20
+ kwargs = {
+ "image_width": image_width,
+ "image_height": image_height,
+ "n_pts_per_ray_training": 32,
+ "n_pts_per_ray_evaluation": 32,
+ "n_rays_per_image_sampled_from_mask": n_rays_per_image,
+ "cast_ray_bundle_as_cone": False,
+ }
+
+ batch_size = 2
+ samplers = [NearFarRaySampler(**kwargs), AdaptiveRaySampler(**kwargs)]
+ evaluation_modes = [EvaluationMode.TRAINING, EvaluationMode.EVALUATION]
+
+ for cam_type, sampler, evaluation_mode in product(
+ CAMERA_TYPES, samplers, evaluation_modes
+ ):
+ cameras = init_random_cameras(cam_type, batch_size, random_z=True)
+ ray_bundle = sampler(cameras, evaluation_mode)
+
+ shape_out = (
+ (batch_size, image_width, image_height)
+ if evaluation_mode == EvaluationMode.EVALUATION
+ else (batch_size, n_rays_per_image, 1)
+ )
+ n_pts_per_ray = (
+ kwargs["n_pts_per_ray_evaluation"]
+ if evaluation_mode == EvaluationMode.EVALUATION
+ else kwargs["n_pts_per_ray_training"]
+ )
+ self.assertIsNone(ray_bundle.bins)
+ self.assertIsNone(ray_bundle.pixel_radii_2d)
+ self.assertEqual(
+ ray_bundle.lengths.shape,
+ (*shape_out, n_pts_per_ray),
+ )
+ self.assertEqual(ray_bundle.directions.shape, (*shape_out, 3))
+ self.assertEqual(ray_bundle.origins.shape, (*shape_out, 3))
+
+ def test_forward_with_use_bins(self):
+ n_rays_per_image = 16
+ image_height, image_width = 20, 20
+ kwargs = {
+ "image_width": image_width,
+ "image_height": image_height,
+ "n_pts_per_ray_training": 32,
+ "n_pts_per_ray_evaluation": 32,
+ "n_rays_per_image_sampled_from_mask": n_rays_per_image,
+ "cast_ray_bundle_as_cone": True,
+ }
+
+ batch_size = 1
+ samplers = [NearFarRaySampler(**kwargs), AdaptiveRaySampler(**kwargs)]
+ evaluation_modes = [EvaluationMode.TRAINING, EvaluationMode.EVALUATION]
+ for cam_type, sampler, evaluation_mode in product(
+ CAMERA_TYPES, samplers, evaluation_modes
+ ):
+ cameras = init_random_cameras(cam_type, batch_size, random_z=True)
+ ray_bundle = sampler(cameras, evaluation_mode)
+
+ lengths = 0.5 * (ray_bundle.bins[..., :-1] + ray_bundle.bins[..., 1:])
+
+ self.assertClose(ray_bundle.lengths, lengths)
+ shape_out = (
+ (batch_size, image_width, image_height)
+ if evaluation_mode == EvaluationMode.EVALUATION
+ else (batch_size, n_rays_per_image, 1)
+ )
+ self.assertEqual(ray_bundle.pixel_radii_2d.shape, (*shape_out, 1))
+ self.assertEqual(ray_bundle.directions.shape, (*shape_out, 3))
+ self.assertEqual(ray_bundle.origins.shape, (*shape_out, 3))
+
+
+# Helper to test compute_radii
+def compute_pixel_radii_from_grid(pixel_grid: torch.Tensor) -> torch.Tensor:
+ """
+ Compute the radii of a conical frustum given the pixel grid.
+
+ To compute the radii we first compute the translation from a pixel
+ to its neighbors along the x and y axis. Then, we compute the norm
+ of each translation along the x and y axis.
+ The radii are then obtained by the following formula:
+
+ (dx_norm + dy_norm) * 0.5 * 2 / 12**0.5
+
+ where 2/12**0.5 is a scaling factor to match
+ the variance of the pixel’s footprint.
+
+ Args:
+ pixel_grid: A tensor of shape `(..., H, W, dim)` representing the
+ full grid of rays pixel_grid.
+
+ Returns:
+ The radiis for each pixels and shape `(..., H, W, 1)`.
+ """
+ # [B, H, W - 1, 3]
+ x_translation = torch.diff(pixel_grid, dim=-2)
+ # [B, H - 1, W, 3]
+ y_translation = torch.diff(pixel_grid, dim=-3)
+ # [B, H, W - 1, 1]
+ dx_norm = torch.linalg.norm(x_translation, dim=-1, keepdim=True)
+ # [B, H - 1, W, 1]
+ dy_norm = torch.linalg.norm(y_translation, dim=-1, keepdim=True)
+
+ # Fill the missing value [B, H, W, 1]
+ dx_norm = torch.concatenate([dx_norm, dx_norm[..., -1:, :]], -2)
+ dy_norm = torch.concatenate([dy_norm, dy_norm[..., -1:, :, :]], -3)
+
+ # Cut the distance in half to obtain the base radius: (dx_norm + dy_norm) * 0.5
+ # and multiply it by the scaling factor: * 2 / 12**0.5
+ radii = (dx_norm + dy_norm) / 12**0.5
+ return radii
+
+
+class TestRadiiComputationOnFullGrid(TestCaseMixin, unittest.TestCase):
+ def test_compute_pixel_radii_from_grid(self):
+ pixel_grid = torch.tensor(
+ [
+ [[0.0, 0, 0], [1.0, 0.0, 0], [3.0, 0.0, 0.0]],
+ [[0.0, 0.25, 0], [1.0, 0.25, 0], [3.0, 0.25, 0]],
+ [[0.0, 1, 0], [1.0, 1.0, 0], [3.0000, 1.0, 0]],
+ ]
+ )
+
+ expected_y_norm = torch.tensor(
+ [
+ [0.25, 0.25, 0.25],
+ [0.75, 0.75, 0.75],
+ [0.75, 0.75, 0.75], # duplicated from previous row
+ ]
+ )
+ expected_x_norm = torch.tensor(
+ [
+ # 3rd column is duplicated from 2nd
+ [1.0, 2.0, 2.0],
+ [1.0, 2.0, 2.0],
+ [1.0, 2.0, 2.0],
+ ]
+ )
+ expected_radii = (expected_x_norm + expected_y_norm) / 12**0.5
+ radii = compute_pixel_radii_from_grid(pixel_grid)
+ self.assertClose(radii, expected_radii[..., None])
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_orm_types.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_orm_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6f94c0100ffc5d8ca111a553b21861b881c206a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_orm_types.py
@@ -0,0 +1,62 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import numpy as np
+
+from pytorch3d.implicitron.dataset.orm_types import ArrayTypeFactory, TupleTypeFactory
+
+
+class TestOrmTypes(unittest.TestCase):
+ def test_tuple_serialization_none(self):
+ ttype = TupleTypeFactory()()
+ output = ttype.process_bind_param(None, None)
+ self.assertIsNone(output)
+ output = ttype.process_result_value(output, None)
+ self.assertIsNone(output)
+
+ def test_tuple_serialization_1d(self):
+ for input_tuple in [(1, 2, 3), (4.5, 6.7)]:
+ ttype = TupleTypeFactory(type(input_tuple[0]), (len(input_tuple),))()
+ output = ttype.process_bind_param(input_tuple, None)
+ input_hat = ttype.process_result_value(output, None)
+ self.assertEqual(type(input_hat[0]), type(input_tuple[0]))
+ np.testing.assert_almost_equal(input_hat, input_tuple, decimal=6)
+
+ def test_tuple_serialization_2d(self):
+ input_tuple = ((1.0, 2.0, 3.0), (4.5, 5.5, 6.6))
+ ttype = TupleTypeFactory(type(input_tuple[0][0]), (2, 3))()
+ output = ttype.process_bind_param(input_tuple, None)
+ input_hat = ttype.process_result_value(output, None)
+ self.assertEqual(type(input_hat[0][0]), type(input_tuple[0][0]))
+ # we use float32 to serialise
+ np.testing.assert_almost_equal(input_hat, input_tuple, decimal=6)
+
+ def test_array_serialization_none(self):
+ ttype = ArrayTypeFactory((3, 3))()
+ output = ttype.process_bind_param(None, None)
+ self.assertIsNone(output)
+ output = ttype.process_result_value(output, None)
+ self.assertIsNone(output)
+
+ def test_array_serialization(self):
+ for input_list in [[1, 2, 3], [[4.5, 6.7], [8.9, 10.0]]]:
+ input_array = np.array(input_list)
+
+ # first, dynamic-size array
+ ttype = ArrayTypeFactory()()
+ output = ttype.process_bind_param(input_array, None)
+ input_hat = ttype.process_result_value(output, None)
+ self.assertEqual(input_hat.dtype, np.float32)
+ np.testing.assert_almost_equal(input_hat, input_array, decimal=6)
+
+ # second, fixed-size array
+ ttype = ArrayTypeFactory(tuple(input_array.shape))()
+ output = ttype.process_bind_param(input_array, None)
+ input_hat = ttype.process_result_value(output, None)
+ self.assertEqual(input_hat.dtype, np.float32)
+ np.testing.assert_almost_equal(input_hat, input_array, decimal=6)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_pointcloud_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_pointcloud_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc3176cc6717a9e548e2eb1fade44a43255008dd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_pointcloud_utils.py
@@ -0,0 +1,72 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import torch
+from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud
+
+from pytorch3d.renderer.cameras import PerspectiveCameras
+from tests.common_testing import TestCaseMixin
+
+
+class TestPointCloudUtils(TestCaseMixin, unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+
+ def test_unproject(self):
+ H, W = 50, 100
+
+ # Random RGBD image with depth 3
+ # (depth 0 = at the camera)
+ # and purple in the upper right corner
+
+ image = torch.rand(4, H, W)
+ depth = 3
+ image[3] = depth
+ image[1, H // 2 :, W // 2 :] *= 0.4
+
+ # two ways to define the same camera:
+ # at the origin facing the positive z axis
+ ndc_camera = PerspectiveCameras(focal_length=1.0)
+ screen_camera = PerspectiveCameras(
+ focal_length=H // 2,
+ in_ndc=False,
+ image_size=((H, W),),
+ principal_point=((W / 2, H / 2),),
+ )
+
+ for camera in (ndc_camera, screen_camera):
+ # 1. z-depth
+ cloud = get_rgbd_point_cloud(
+ camera,
+ image_rgb=image[:3][None],
+ depth_map=image[3:][None],
+ euclidean=False,
+ )
+ [points] = cloud.points_list()
+ self.assertConstant(points[:, 2], depth) # constant depth
+ extremes = depth * torch.tensor([W / H - 1 / H, 1 - 1 / H])
+ self.assertClose(points[:, :2].min(0).values, -extremes)
+ self.assertClose(points[:, :2].max(0).values, extremes)
+
+ # 2. euclidean
+ cloud = get_rgbd_point_cloud(
+ camera,
+ image_rgb=image[:3][None],
+ depth_map=image[3:][None],
+ euclidean=True,
+ )
+ [points] = cloud.points_list()
+ self.assertConstant(torch.norm(points, dim=1), depth, atol=1e-5)
+
+ # 3. four channels
+ get_rgbd_point_cloud(
+ camera,
+ image_rgb=image[None],
+ depth_map=image[3:][None],
+ euclidean=True,
+ )
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_ray_point_refiner.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_ray_point_refiner.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a6ab36126cf8baab4e0854ba7f4e9574ceaf43f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_ray_point_refiner.py
@@ -0,0 +1,157 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+from itertools import product
+
+import torch
+
+from pytorch3d.implicitron.models.renderer.ray_point_refiner import (
+ apply_blurpool_on_weights,
+ RayPointRefiner,
+)
+from pytorch3d.implicitron.models.renderer.ray_sampler import ImplicitronRayBundle
+from tests.common_testing import TestCaseMixin
+
+
+class TestRayPointRefiner(TestCaseMixin, unittest.TestCase):
+ def test_simple(self):
+ length = 15
+ n_pts_per_ray = 10
+
+ for add_input_samples, use_blurpool in product([False, True], [False, True]):
+ ray_point_refiner = RayPointRefiner(
+ n_pts_per_ray=n_pts_per_ray,
+ random_sampling=False,
+ add_input_samples=add_input_samples,
+ blurpool_weights=use_blurpool,
+ )
+ lengths = torch.arange(length, dtype=torch.float32).expand(3, 25, length)
+ bundle = ImplicitronRayBundle(
+ lengths=lengths,
+ origins=None,
+ directions=None,
+ xys=None,
+ camera_ids=None,
+ camera_counts=None,
+ )
+ weights = torch.ones(3, 25, length)
+ refined = ray_point_refiner(bundle, weights)
+
+ self.assertIsNone(refined.directions)
+ self.assertIsNone(refined.origins)
+ self.assertIsNone(refined.xys)
+ expected = torch.linspace(0.5, length - 1.5, n_pts_per_ray)
+ expected = expected.expand(3, 25, n_pts_per_ray)
+ if add_input_samples:
+ full_expected = torch.cat((lengths, expected), dim=-1).sort()[0]
+ else:
+ full_expected = expected
+ self.assertClose(refined.lengths, full_expected)
+
+ ray_point_refiner_random = RayPointRefiner(
+ n_pts_per_ray=n_pts_per_ray,
+ random_sampling=True,
+ add_input_samples=add_input_samples,
+ blurpool_weights=use_blurpool,
+ )
+ refined_random = ray_point_refiner_random(bundle, weights)
+ lengths_random = refined_random.lengths
+ self.assertEqual(lengths_random.shape, full_expected.shape)
+ if not add_input_samples:
+ self.assertGreater(lengths_random.min().item(), 0.5)
+ self.assertLess(lengths_random.max().item(), length - 1.5)
+
+ # Check sorted
+ self.assertTrue(
+ (lengths_random[..., 1:] - lengths_random[..., :-1] > 0).all()
+ )
+
+ def test_simple_use_bins(self):
+ """
+ Same spirit than test_simple but use bins in the ImplicitronRayBunle.
+ It has been duplicated to avoid cognitive overload while reading the
+ test (lot of if else).
+ """
+ length = 15
+ n_pts_per_ray = 10
+
+ for add_input_samples, use_blurpool in product([False, True], [False, True]):
+ ray_point_refiner = RayPointRefiner(
+ n_pts_per_ray=n_pts_per_ray,
+ random_sampling=False,
+ add_input_samples=add_input_samples,
+ )
+
+ bundle = ImplicitronRayBundle(
+ lengths=None,
+ bins=torch.arange(length + 1, dtype=torch.float32).expand(
+ 3, 25, length + 1
+ ),
+ origins=None,
+ directions=None,
+ xys=None,
+ camera_ids=None,
+ camera_counts=None,
+ )
+ weights = torch.ones(3, 25, length)
+ refined = ray_point_refiner(bundle, weights, blurpool_weights=use_blurpool)
+
+ self.assertIsNone(refined.directions)
+ self.assertIsNone(refined.origins)
+ self.assertIsNone(refined.xys)
+ expected_bins = torch.linspace(0, length, n_pts_per_ray + 1)
+ expected_bins = expected_bins.expand(3, 25, n_pts_per_ray + 1)
+ if add_input_samples:
+ expected_bins = torch.cat((bundle.bins, expected_bins), dim=-1).sort()[
+ 0
+ ]
+ full_expected = torch.lerp(
+ expected_bins[..., :-1], expected_bins[..., 1:], 0.5
+ )
+
+ self.assertClose(refined.lengths, full_expected)
+
+ ray_point_refiner_random = RayPointRefiner(
+ n_pts_per_ray=n_pts_per_ray,
+ random_sampling=True,
+ add_input_samples=add_input_samples,
+ )
+
+ refined_random = ray_point_refiner_random(
+ bundle, weights, blurpool_weights=use_blurpool
+ )
+ lengths_random = refined_random.lengths
+ self.assertEqual(lengths_random.shape, full_expected.shape)
+ if not add_input_samples:
+ self.assertGreater(lengths_random.min().item(), 0)
+ self.assertLess(lengths_random.max().item(), length)
+
+ # Check sorted
+ self.assertTrue(
+ (lengths_random[..., 1:] - lengths_random[..., :-1] > 0).all()
+ )
+
+ def test_apply_blurpool_on_weights(self):
+ weights = torch.tensor(
+ [
+ [0.5, 0.6, 0.7],
+ [0.5, 0.3, 0.9],
+ ]
+ )
+ expected_weights = 0.5 * torch.tensor(
+ [
+ [0.5 + 0.6, 0.6 + 0.7, 0.7 + 0.7],
+ [0.5 + 0.5, 0.5 + 0.9, 0.9 + 0.9],
+ ]
+ )
+ out_weights = apply_blurpool_on_weights(weights)
+ self.assertTrue(torch.allclose(out_weights, expected_weights))
+
+ def test_shapes_apply_blurpool_on_weights(self):
+ weights = torch.randn((5, 4, 3, 2, 1))
+ out_weights = apply_blurpool_on_weights(weights)
+ self.assertEqual((5, 4, 3, 2, 1), out_weights.shape)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_sql_dataset.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_sql_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5baf505752545579a7e069ef6d00b17ccc315a8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_sql_dataset.py
@@ -0,0 +1,546 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import logging
+import os
+import unittest
+from collections import Counter
+
+import pkg_resources
+
+import torch
+
+from pytorch3d.implicitron.dataset.sql_dataset import SqlIndexDataset
+
+NO_BLOBS_KWARGS = {
+ "dataset_root": "",
+ "load_images": False,
+ "load_depths": False,
+ "load_masks": False,
+ "load_depth_masks": False,
+ "box_crop": False,
+}
+
+logger = logging.getLogger("pytorch3d.implicitron.dataset.sql_dataset")
+sh = logging.StreamHandler()
+logger.addHandler(sh)
+logger.setLevel(logging.DEBUG)
+
+
+DATASET_ROOT = pkg_resources.resource_filename(__name__, "data/sql_dataset")
+METADATA_FILE = os.path.join(DATASET_ROOT, "sql_dataset_100.sqlite")
+SET_LIST_FILE = os.path.join(DATASET_ROOT, "set_lists_100.json")
+
+
+class TestSqlDataset(unittest.TestCase):
+ def test_basic(self, sequence="cat1_seq2", frame_number=4):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 100)
+
+ # check the items are consecutive
+ past_sequences = set()
+ last_frame_number = -1
+ last_sequence = ""
+ for i in range(len(dataset)):
+ item = dataset[i]
+
+ if item.frame_number == 0:
+ self.assertNotIn(item.sequence_name, past_sequences)
+ past_sequences.add(item.sequence_name)
+ last_sequence = item.sequence_name
+ else:
+ self.assertEqual(item.sequence_name, last_sequence)
+ self.assertEqual(item.frame_number, last_frame_number + 1)
+
+ last_frame_number = item.frame_number
+
+ # test indexing
+ with self.assertRaises(IndexError):
+ dataset[len(dataset) + 1]
+
+ # test sequence-frame indexing
+ item = dataset[sequence, frame_number]
+ self.assertEqual(item.sequence_name, sequence)
+ self.assertEqual(item.frame_number, frame_number)
+
+ with self.assertRaises(IndexError):
+ dataset[sequence, 13]
+
+ def test_filter_empty_masks(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 78)
+
+ def test_pick_frames_sql_clause(self):
+ dataset_no_empty_masks = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ pick_frames_sql_clause="_mask_mass IS NULL OR _mask_mass > 0",
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ # check the datasets are equal
+ self.assertEqual(len(dataset), len(dataset_no_empty_masks))
+ for i in range(len(dataset)):
+ item_nem = dataset_no_empty_masks[i]
+ item = dataset[i]
+ self.assertEqual(item_nem.image_path, item.image_path)
+
+ # remove_empty_masks together with the custom criterion
+ dataset_ts = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ pick_frames_sql_clause="frame_timestamp < 0.15",
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+ self.assertEqual(len(dataset_ts), 19)
+
+ def test_limit_categories(self, category="cat0"):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ pick_categories=[category],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 50)
+ for i in range(len(dataset)):
+ self.assertEqual(dataset[i].sequence_category, category)
+
+ def test_limit_sequences(self, num_sequences=3):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_sequences_to=num_sequences,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 10 * num_sequences)
+
+ def delist(sequence_name):
+ return sequence_name if isinstance(sequence_name, str) else sequence_name[0]
+
+ unique_seqs = {delist(dataset[i].sequence_name) for i in range(len(dataset))}
+ self.assertEqual(len(unique_seqs), num_sequences)
+
+ def test_pick_exclude_sequencess(self, sequence="cat1_seq2"):
+ # pick sequence
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ pick_sequences=[sequence],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 10)
+ unique_seqs = {dataset[i].sequence_name for i in range(len(dataset))}
+ self.assertCountEqual(unique_seqs, {sequence})
+
+ item = dataset[sequence, 0]
+ self.assertEqual(item.sequence_name, sequence)
+ self.assertEqual(item.frame_number, 0)
+
+ # exclude sequence
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ exclude_sequences=[sequence],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 90)
+ unique_seqs = {dataset[i].sequence_name for i in range(len(dataset))}
+ self.assertNotIn(sequence, unique_seqs)
+
+ with self.assertRaises(IndexError):
+ dataset[sequence, 0]
+
+ def test_limit_frames(self, num_frames=13):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_to=num_frames,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), num_frames)
+ unique_seqs = {dataset[i].sequence_name for i in range(len(dataset))}
+ self.assertEqual(len(unique_seqs), 2)
+
+ # test when the limit is not binding
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_to=1000,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 100)
+
+ def test_limit_frames_per_sequence(self, num_frames=2):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ n_frames_per_sequence=num_frames,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), num_frames * 10)
+ seq_counts = Counter(dataset[i].sequence_name for i in range(len(dataset)))
+ self.assertEqual(len(seq_counts), 10)
+ self.assertCountEqual(
+ set(seq_counts.values()), {2}
+ ) # all counts are num_frames
+
+ with self.assertRaises(IndexError):
+ dataset[next(iter(seq_counts)), num_frames + 1]
+
+ # test when the limit is not binding
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ n_frames_per_sequence=13,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+ self.assertEqual(len(dataset), 100)
+
+ def test_limit_sequence_per_category(self, num_sequences=2):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_sequences_per_category_to=num_sequences,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), num_sequences * 10 * 2)
+ seq_names = list(dataset.sequence_names())
+ self.assertEqual(len(seq_names), num_sequences * 2)
+ # check that we respect the row order
+ for seq_name in seq_names:
+ self.assertLess(int(seq_name[-1]), num_sequences)
+
+ # test when the limit is not binding
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_sequences_per_category_to=13,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+ self.assertEqual(len(dataset), 100)
+
+ def test_filter_medley(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ pick_categories=["cat1"],
+ exclude_sequences=["cat1_seq0"], # retaining "cat1_seq1" and on
+ limit_sequences_to=2, # retaining "cat1_seq1" and "cat1_seq2"
+ limit_to=14, # retaining full "cat1_seq1" and 4 from "cat1_seq2"
+ n_frames_per_sequence=6, # cutting "cat1_seq1" to 6 frames
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ # result: preserved 6 frames from cat1_seq1 and 4 from cat1_seq2
+ seq_counts = Counter(dataset[i].sequence_name for i in range(len(dataset)))
+ self.assertCountEqual(seq_counts.keys(), ["cat1_seq1", "cat1_seq2"])
+ self.assertEqual(seq_counts["cat1_seq1"], 6)
+ self.assertEqual(seq_counts["cat1_seq2"], 4)
+
+ def test_subsets_trivial(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ subset_lists_file=SET_LIST_FILE,
+ limit_to=100, # force sorting
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 100)
+
+ # check the items are consecutive
+ past_sequences = set()
+ last_frame_number = -1
+ last_sequence = ""
+ for i in range(len(dataset)):
+ item = dataset[i]
+
+ if item.frame_number == 0:
+ self.assertNotIn(item.sequence_name, past_sequences)
+ past_sequences.add(item.sequence_name)
+ last_sequence = item.sequence_name
+ else:
+ self.assertEqual(item.sequence_name, last_sequence)
+ self.assertEqual(item.frame_number, last_frame_number + 1)
+
+ last_frame_number = item.frame_number
+
+ def test_subsets_filter_empty_masks(self):
+ # we need to test this case as it uses quite different logic with `df.drop()`
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 78)
+
+ def test_subsets_pick_frames_sql_clause(self):
+ dataset_no_empty_masks = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ pick_frames_sql_clause="_mask_mass IS NULL OR _mask_mass > 0",
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ # check the datasets are equal
+ self.assertEqual(len(dataset), len(dataset_no_empty_masks))
+ for i in range(len(dataset)):
+ item_nem = dataset_no_empty_masks[i]
+ item = dataset[i]
+ self.assertEqual(item_nem.image_path, item.image_path)
+
+ # remove_empty_masks together with the custom criterion
+ dataset_ts = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ pick_frames_sql_clause="frame_timestamp < 0.15",
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset_ts), 19)
+
+ def test_single_subset(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 50)
+
+ with self.assertRaises(IndexError):
+ dataset[51]
+
+ # check the items are consecutive
+ past_sequences = set()
+ last_frame_number = -1
+ last_sequence = ""
+ for i in range(len(dataset)):
+ item = dataset[i]
+
+ if item.frame_number < 2:
+ self.assertNotIn(item.sequence_name, past_sequences)
+ past_sequences.add(item.sequence_name)
+ last_sequence = item.sequence_name
+ else:
+ self.assertEqual(item.sequence_name, last_sequence)
+ self.assertEqual(item.frame_number, last_frame_number + 2)
+
+ last_frame_number = item.frame_number
+
+ item = dataset[last_sequence, 0]
+ self.assertEqual(item.sequence_name, last_sequence)
+
+ with self.assertRaises(IndexError):
+ dataset[last_sequence, 1]
+
+ def test_subset_with_filters(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train"],
+ pick_categories=["cat1"],
+ exclude_sequences=["cat1_seq0"], # retaining "cat1_seq1" and on
+ limit_sequences_to=2, # retaining "cat1_seq1" and "cat1_seq2"
+ limit_to=7, # retaining full train set of "cat1_seq1" and 2 from "cat1_seq2"
+ n_frames_per_sequence=3, # cutting "cat1_seq1" to 3 frames
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ # result: preserved 6 frames from cat1_seq1 and 4 from cat1_seq2
+ seq_counts = Counter(dataset[i].sequence_name for i in range(len(dataset)))
+ self.assertCountEqual(seq_counts.keys(), ["cat1_seq1", "cat1_seq2"])
+ self.assertEqual(seq_counts["cat1_seq1"], 3)
+ self.assertEqual(seq_counts["cat1_seq2"], 2)
+
+ def test_visitor(self):
+ dataset_sorted = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ sequences = dataset_sorted.sequence_names()
+ i = 0
+ for seq in sequences:
+ last_ts = float("-Inf")
+ for ts, _, idx in dataset_sorted.sequence_frames_in_order(seq):
+ self.assertEqual(i, idx)
+ i += 1
+ self.assertGreaterEqual(ts, last_ts)
+ last_ts = ts
+
+ # test legacy visitor
+ old_indices = None
+ for seq in sequences:
+ last_ts = float("-Inf")
+ rows = dataset_sorted._index.index.get_loc(seq)
+ indices = list(range(rows.start or 0, rows.stop, rows.step or 1))
+ fn_ts_list = dataset_sorted.get_frame_numbers_and_timestamps(indices)
+ self.assertEqual(len(fn_ts_list), len(indices))
+
+ if old_indices:
+ # check raising if we ask for multiple sequences
+ with self.assertRaises(ValueError):
+ dataset_sorted.get_frame_numbers_and_timestamps(
+ indices + old_indices
+ )
+
+ old_indices = indices
+
+ def test_visitor_subsets(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ limit_to=100, # force sorting
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ sequences = dataset.sequence_names()
+ i = 0
+ for seq in sequences:
+ last_ts = float("-Inf")
+ seq_frames = list(dataset.sequence_frames_in_order(seq))
+ self.assertEqual(len(seq_frames), 10)
+ for ts, _, idx in seq_frames:
+ self.assertEqual(i, idx)
+ i += 1
+ self.assertGreaterEqual(ts, last_ts)
+ last_ts = ts
+
+ last_ts = float("-Inf")
+ train_frames = list(dataset.sequence_frames_in_order(seq, "train"))
+ self.assertEqual(len(train_frames), 5)
+ for ts, _, _ in train_frames:
+ self.assertGreaterEqual(ts, last_ts)
+ last_ts = ts
+
+ def test_category_to_sequence_names(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ cat_to_seqs = dataset.category_to_sequence_names()
+ self.assertEqual(len(cat_to_seqs), 2)
+ self.assertIn("cat1", cat_to_seqs)
+ self.assertEqual(len(cat_to_seqs["cat1"]), 5)
+
+ # check that override preserves the behavior
+ cat_to_seqs_base = super(SqlIndexDataset, dataset).category_to_sequence_names()
+ self.assertDictEqual(cat_to_seqs, cat_to_seqs_base)
+
+ def test_category_to_sequence_names_filters(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=True,
+ subset_lists_file=SET_LIST_FILE,
+ exclude_sequences=["cat1_seq0"],
+ subsets=["train", "test"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ cat_to_seqs = dataset.category_to_sequence_names()
+ self.assertEqual(len(cat_to_seqs), 2)
+ self.assertIn("cat1", cat_to_seqs)
+ self.assertEqual(len(cat_to_seqs["cat1"]), 4) # minus one
+
+ # check that override preserves the behavior
+ cat_to_seqs_base = super(SqlIndexDataset, dataset).category_to_sequence_names()
+ self.assertDictEqual(cat_to_seqs, cat_to_seqs_base)
+
+ def test_meta_access(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train"],
+ frame_data_builder_FrameDataBuilder_args=NO_BLOBS_KWARGS,
+ )
+
+ self.assertEqual(len(dataset), 50)
+
+ for idx in [10, ("cat0_seq2", 2)]:
+ example_meta = dataset.meta[idx]
+ example = dataset[idx]
+ self.assertEqual(example_meta.sequence_name, example.sequence_name)
+ self.assertEqual(example_meta.frame_number, example.frame_number)
+ self.assertEqual(example_meta.frame_timestamp, example.frame_timestamp)
+ self.assertEqual(example_meta.sequence_category, example.sequence_category)
+ torch.testing.assert_close(example_meta.camera.R, example.camera.R)
+ torch.testing.assert_close(example_meta.camera.T, example.camera.T)
+ torch.testing.assert_close(
+ example_meta.camera.focal_length, example.camera.focal_length
+ )
+ torch.testing.assert_close(
+ example_meta.camera.principal_point, example.camera.principal_point
+ )
+
+ def test_meta_access_no_blobs(self):
+ dataset = SqlIndexDataset(
+ sqlite_metadata_file=METADATA_FILE,
+ remove_empty_masks=False,
+ subset_lists_file=SET_LIST_FILE,
+ subsets=["train"],
+ frame_data_builder_FrameDataBuilder_args={
+ "dataset_root": ".",
+ "box_crop": False, # required by blob-less accessor
+ },
+ )
+
+ self.assertIsNone(dataset.meta[0].image_rgb)
+ self.assertIsNone(dataset.meta[0].fg_probability)
+ self.assertIsNone(dataset.meta[0].depth_map)
+ self.assertIsNone(dataset.meta[0].sequence_point_cloud)
+ self.assertIsNotNone(dataset.meta[0].camera)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_srn.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_srn.py
new file mode 100644
index 0000000000000000000000000000000000000000..311bbaa6dbfa86e579754a31a45adccacd3cc34e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_srn.py
@@ -0,0 +1,121 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+
+import torch
+from pytorch3d.implicitron.models.generic_model import GenericModel
+from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import (
+ SRNHyperNetImplicitFunction,
+ SRNImplicitFunction,
+ SRNPixelGenerator,
+)
+from pytorch3d.implicitron.models.renderer.ray_sampler import ImplicitronRayBundle
+from pytorch3d.implicitron.tools.config import get_default_args
+from pytorch3d.renderer import PerspectiveCameras
+
+from tests.common_testing import TestCaseMixin
+
+_BATCH_SIZE: int = 3
+_N_RAYS: int = 100
+_N_POINTS_ON_RAY: int = 10
+
+
+class TestSRN(TestCaseMixin, unittest.TestCase):
+ def setUp(self) -> None:
+ torch.manual_seed(42)
+ get_default_args(SRNHyperNetImplicitFunction)
+ get_default_args(SRNImplicitFunction)
+
+ def test_pixel_generator(self):
+ SRNPixelGenerator()
+
+ def _get_bundle(self, *, device) -> ImplicitronRayBundle:
+ origins = torch.rand(_BATCH_SIZE, _N_RAYS, 3, device=device)
+ directions = torch.rand(_BATCH_SIZE, _N_RAYS, 3, device=device)
+ lengths = torch.rand(_BATCH_SIZE, _N_RAYS, _N_POINTS_ON_RAY, device=device)
+ bundle = ImplicitronRayBundle(
+ lengths=lengths,
+ origins=origins,
+ directions=directions,
+ xys=None,
+ camera_ids=None,
+ camera_counts=None,
+ )
+ return bundle
+
+ def test_srn_implicit_function(self):
+ implicit_function = SRNImplicitFunction()
+ device = torch.device("cpu")
+ bundle = self._get_bundle(device=device)
+ rays_densities, rays_colors = implicit_function(ray_bundle=bundle)
+ out_features = implicit_function.raymarch_function.out_features
+ self.assertEqual(
+ rays_densities.shape,
+ (_BATCH_SIZE, _N_RAYS, _N_POINTS_ON_RAY, out_features),
+ )
+ self.assertIsNone(rays_colors)
+
+ def test_srn_hypernet_implicit_function(self):
+ # TODO investigate: If latent_dim_hypernet=0, why does this crash and dump core?
+ latent_dim_hypernet = 39
+ device = torch.device("cuda:0")
+ implicit_function = SRNHyperNetImplicitFunction(
+ latent_dim_hypernet=latent_dim_hypernet
+ )
+ implicit_function.to(device)
+ global_code = torch.rand(_BATCH_SIZE, latent_dim_hypernet, device=device)
+ bundle = self._get_bundle(device=device)
+ rays_densities, rays_colors = implicit_function(
+ ray_bundle=bundle, global_code=global_code
+ )
+ out_features = implicit_function.hypernet.out_features
+ self.assertEqual(
+ rays_densities.shape,
+ (_BATCH_SIZE, _N_RAYS, _N_POINTS_ON_RAY, out_features),
+ )
+ self.assertIsNone(rays_colors)
+
+ @torch.no_grad()
+ def test_lstm(self):
+ args = get_default_args(GenericModel)
+ args.render_image_height = 80
+ args.render_image_width = 80
+ args.implicit_function_class_type = "SRNImplicitFunction"
+ args.renderer_class_type = "LSTMRenderer"
+ args.raysampler_class_type = "NearFarRaySampler"
+ args.raysampler_NearFarRaySampler_args.n_pts_per_ray_training = 1
+ args.raysampler_NearFarRaySampler_args.n_pts_per_ray_evaluation = 1
+ args.renderer_LSTMRenderer_args.bg_color = [0.4, 0.4, 0.2]
+ gm = GenericModel(**args)
+
+ camera = PerspectiveCameras()
+ image = gm.forward(
+ camera=camera,
+ image_rgb=None,
+ fg_probability=None,
+ sequence_name="",
+ mask_crop=None,
+ depth_map=None,
+ )["images_render"]
+ self.assertEqual(image.shape, (1, 3, 80, 80))
+ self.assertGreater(image.max(), 0.8)
+
+ # Force everything to be background
+ pixel_generator = gm._implicit_functions[0]._fn.pixel_generator
+ pixel_generator._density_layer.weight.zero_()
+ pixel_generator._density_layer.bias.fill_(-1.0e6)
+
+ image = gm.forward(
+ camera=camera,
+ image_rgb=None,
+ fg_probability=None,
+ sequence_name="",
+ mask_crop=None,
+ depth_map=None,
+ )["images_render"]
+ self.assertConstant(image[:, :2], 0.4)
+ self.assertConstant(image[:, 2], 0.2)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_types.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..aff749be56c77353caa3c9cffae8e19a257007e3
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_types.py
@@ -0,0 +1,97 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import dataclasses
+import unittest
+from typing import Dict, List, NamedTuple, Tuple
+
+from pytorch3d.implicitron.dataset import types
+from pytorch3d.implicitron.dataset.types import FrameAnnotation
+
+
+class _NT(NamedTuple):
+ annot: FrameAnnotation
+
+
+class TestDatasetTypes(unittest.TestCase):
+ def setUp(self):
+ self.entry = FrameAnnotation(
+ frame_number=23,
+ sequence_name="1",
+ frame_timestamp=1.2,
+ image=types.ImageAnnotation(path="/tmp/1.jpg", size=(224, 224)),
+ mask=types.MaskAnnotation(path="/tmp/1.png", mass=42.0),
+ viewpoint=types.ViewpointAnnotation(
+ R=(
+ (1, 0, 0),
+ (1, 0, 0),
+ (1, 0, 0),
+ ),
+ T=(0, 0, 0),
+ principal_point=(100, 100),
+ focal_length=(200, 200),
+ ),
+ )
+
+ def test_asdict_rec(self):
+ first = [dataclasses.asdict(self.entry)]
+ second = types._asdict_rec([self.entry])
+ self.assertEqual(first, second)
+
+ def test_parsing(self):
+ """Test that we handle collections enclosing dataclasses."""
+
+ dct = dataclasses.asdict(self.entry)
+
+ parsed = types._dataclass_from_dict(dct, FrameAnnotation)
+ self.assertEqual(parsed, self.entry)
+
+ # namedtuple
+ parsed = types._dataclass_from_dict(_NT(dct), _NT)
+ self.assertEqual(parsed.annot, self.entry)
+
+ # tuple
+ parsed = types._dataclass_from_dict((dct,), Tuple[FrameAnnotation])
+ self.assertEqual(parsed, (self.entry,))
+
+ # list
+ parsed = types._dataclass_from_dict(
+ [
+ dct,
+ ],
+ List[FrameAnnotation],
+ )
+ self.assertEqual(
+ parsed,
+ [
+ self.entry,
+ ],
+ )
+
+ # dict
+ parsed = types._dataclass_from_dict({"key": dct}, Dict[str, FrameAnnotation])
+ self.assertEqual(parsed, {"key": self.entry})
+
+ def test_parsing_vectorized(self):
+ dct = dataclasses.asdict(self.entry)
+
+ self._compare_with_scalar(dct, FrameAnnotation)
+ self._compare_with_scalar(_NT(dct), _NT)
+ self._compare_with_scalar((dct,), Tuple[FrameAnnotation])
+ self._compare_with_scalar([dct], List[FrameAnnotation])
+ self._compare_with_scalar({"key": dct}, Dict[str, FrameAnnotation])
+
+ dct2 = dct.copy()
+ dct2["meta"] = {"aux": 76}
+ self._compare_with_scalar(dct2, FrameAnnotation)
+
+ def _compare_with_scalar(self, obj, typeannot, repeat=3):
+ input = [obj] * 3
+ vect_output = types._dataclass_list_from_dict_list(input, typeannot)
+ self.assertEqual(len(input), repeat)
+ gt = types._dataclass_from_dict(obj, typeannot)
+ self.assertTrue(all(res == gt for res in vect_output))
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_viewsampling.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_viewsampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..4094bf4ab7f762da027bdf7511feda23b72af119
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_viewsampling.py
@@ -0,0 +1,270 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+
+import pytorch3d as pt3d
+import torch
+from pytorch3d.implicitron.models.view_pooler.view_sampler import ViewSampler
+from pytorch3d.implicitron.tools.config import expand_args_fields
+
+
+class TestViewsampling(unittest.TestCase):
+ def setUp(self):
+ torch.manual_seed(42)
+ expand_args_fields(ViewSampler)
+
+ def _init_view_sampler_problem(self, random_masks):
+ """
+ Generates a view-sampling problem:
+ - 4 source views, 1st/2nd from the first sequence 'seq1', the rest from 'seq2'
+ - 3 sets of 3D points from sequences 'seq1', 'seq2', 'seq2' respectively.
+ - first 50 points in each batch correctly project to the source views,
+ while the remaining 50 do not land in any projection plane.
+ - each source view is labeled with image feature tensors of shape 7x100x50,
+ where all elements of the n-th tensor are set to `n+1`.
+ - the elements of the source view masks are either set to random binary number
+ (if `random_masks==True`), or all set to 1 (`random_masks==False`).
+ - the source view cameras are uniformly distributed on a unit circle
+ in the x-z plane and look at (0,0,0).
+ """
+ seq_id_camera = ["seq1", "seq1", "seq2", "seq2"]
+ seq_id_pts = ["seq1", "seq2", "seq2"]
+ pts_batch = 3
+ n_pts = 100
+ n_views = 4
+ fdim = 7
+ H = 100
+ W = 50
+
+ # points that land into the projection planes of all cameras
+ pts_inside = (
+ torch.nn.functional.normalize(
+ torch.randn(pts_batch, n_pts // 2, 3, device="cuda"),
+ dim=-1,
+ )
+ * 0.1
+ )
+
+ # move the outside points far above the scene
+ pts_outside = pts_inside.clone()
+ pts_outside[:, :, 1] += 1e8
+ pts = torch.cat([pts_inside, pts_outside], dim=1)
+
+ R, T = pt3d.renderer.look_at_view_transform(
+ dist=1.0,
+ elev=0.0,
+ azim=torch.linspace(0, 360, n_views + 1)[:n_views],
+ degrees=True,
+ device=pts.device,
+ )
+ focal_length = R.new_ones(n_views, 2)
+ principal_point = R.new_zeros(n_views, 2)
+ camera = pt3d.renderer.PerspectiveCameras(
+ R=R,
+ T=T,
+ focal_length=focal_length,
+ principal_point=principal_point,
+ device=pts.device,
+ )
+
+ feats_map = torch.arange(n_views, device=pts.device, dtype=pts.dtype) + 1
+ feats = {"feats": feats_map[:, None, None, None].repeat(1, fdim, H, W)}
+
+ masks = (
+ torch.rand(n_views, 1, H, W, device=pts.device, dtype=pts.dtype) > 0.5
+ ).type_as(R)
+
+ if not random_masks:
+ masks[:] = 1.0
+
+ return pts, camera, feats, masks, seq_id_camera, seq_id_pts
+
+ def test_compare_with_naive(self):
+ """
+ Compares the outputs of the efficient ViewSampler module with a
+ naive implementation.
+ """
+
+ (
+ pts,
+ camera,
+ feats,
+ masks,
+ seq_id_camera,
+ seq_id_pts,
+ ) = self._init_view_sampler_problem(True)
+
+ for masked_sampling in (True, False):
+ feats_sampled_n, masks_sampled_n = _view_sample_naive(
+ pts,
+ seq_id_pts,
+ camera,
+ seq_id_camera,
+ feats,
+ masks,
+ masked_sampling,
+ )
+ # make sure we generate the constructor for ViewSampler
+ expand_args_fields(ViewSampler)
+ view_sampler = ViewSampler(masked_sampling=masked_sampling)
+ feats_sampled, masks_sampled = view_sampler(
+ pts=pts,
+ seq_id_pts=seq_id_pts,
+ camera=camera,
+ seq_id_camera=seq_id_camera,
+ feats=feats,
+ masks=masks,
+ )
+ for k in feats_sampled.keys():
+ self.assertTrue(torch.allclose(feats_sampled[k], feats_sampled_n[k]))
+ self.assertTrue(torch.allclose(masks_sampled, masks_sampled_n))
+
+ def test_viewsampling(self):
+ """
+ Generates a viewsampling problem with predictable outcome, and compares
+ the ViewSampler's output to the expected result.
+ """
+
+ (
+ pts,
+ camera,
+ feats,
+ masks,
+ seq_id_camera,
+ seq_id_pts,
+ ) = self._init_view_sampler_problem(False)
+
+ expand_args_fields(ViewSampler)
+
+ for masked_sampling in (True, False):
+
+ view_sampler = ViewSampler(masked_sampling=masked_sampling)
+
+ feats_sampled, masks_sampled = view_sampler(
+ pts=pts,
+ seq_id_pts=seq_id_pts,
+ camera=camera,
+ seq_id_camera=seq_id_camera,
+ feats=feats,
+ masks=masks,
+ )
+
+ n_views = camera.R.shape[0]
+ n_pts = pts.shape[1]
+ feat_dim = feats["feats"].shape[1]
+ pts_batch = pts.shape[0]
+ n_pts_away = n_pts // 2
+
+ for pts_i in range(pts_batch):
+ for view_i in range(n_views):
+ if seq_id_pts[pts_i] != seq_id_camera[view_i]:
+ # points / cameras come from different sequences
+ gt_masks = pts.new_zeros(n_pts, 1)
+ gt_feats = pts.new_zeros(n_pts, feat_dim)
+ else:
+ gt_masks = pts.new_ones(n_pts, 1)
+ gt_feats = pts.new_ones(n_pts, feat_dim) * (view_i + 1)
+ gt_feats[n_pts_away:] = 0.0
+ if masked_sampling:
+ gt_masks[n_pts_away:] = 0.0
+
+ for k in feats_sampled:
+ self.assertTrue(
+ torch.allclose(
+ feats_sampled[k][pts_i, view_i],
+ gt_feats,
+ )
+ )
+ self.assertTrue(
+ torch.allclose(
+ masks_sampled[pts_i, view_i],
+ gt_masks,
+ )
+ )
+
+
+def _view_sample_naive(
+ pts,
+ seq_id_pts,
+ camera,
+ seq_id_camera,
+ feats,
+ masks,
+ masked_sampling,
+):
+ """
+ A naive implementation of the forward pass of ViewSampler.
+ Refer to ViewSampler's docstring for description of the arguments.
+ """
+
+ pts_batch = pts.shape[0]
+ n_views = camera.R.shape[0]
+ n_pts = pts.shape[1]
+
+ feats_sampled = [[[] for _ in range(n_views)] for _ in range(pts_batch)]
+ masks_sampled = [[[] for _ in range(n_views)] for _ in range(pts_batch)]
+
+ for pts_i in range(pts_batch):
+ for view_i in range(n_views):
+ if seq_id_pts[pts_i] != seq_id_camera[view_i]:
+ # points/cameras come from different sequences
+ feats_sampled_ = {
+ k: f.new_zeros(n_pts, f.shape[1]) for k, f in feats.items()
+ }
+ masks_sampled_ = masks.new_zeros(n_pts, 1)
+ else:
+ # same sequence of pts and cameras -> sample
+ feats_sampled_, masks_sampled_ = _sample_one_view_naive(
+ camera[view_i],
+ pts[pts_i],
+ {k: f[view_i] for k, f in feats.items()},
+ masks[view_i],
+ masked_sampling,
+ sampling_mode="bilinear",
+ )
+ feats_sampled[pts_i][view_i] = feats_sampled_
+ masks_sampled[pts_i][view_i] = masks_sampled_
+
+ masks_sampled_cat = torch.stack([torch.stack(m) for m in masks_sampled])
+ feats_sampled_cat = {}
+ for k in feats_sampled[0][0].keys():
+ feats_sampled_cat[k] = torch.stack(
+ [torch.stack([f_[k] for f_ in f]) for f in feats_sampled]
+ )
+ return feats_sampled_cat, masks_sampled_cat
+
+
+def _sample_one_view_naive(
+ camera,
+ pts,
+ feats,
+ masks,
+ masked_sampling,
+ sampling_mode="bilinear",
+):
+ """
+ Sample a single source view.
+ """
+ proj_ndc = camera.transform_points(pts[None])[None, ..., :-1] # 1 x 1 x n_pts x 2
+ feats_sampled = {
+ k: pt3d.renderer.ndc_grid_sample(f[None], proj_ndc, mode=sampling_mode).permute(
+ 0, 3, 1, 2
+ )[0, :, :, 0]
+ for k, f in feats.items()
+ } # n_pts x dim
+ if not masked_sampling:
+ n_pts = pts.shape[0]
+ masks_sampled = proj_ndc.new_ones(n_pts, 1)
+ else:
+ masks_sampled = pt3d.renderer.ndc_grid_sample(
+ masks[None],
+ proj_ndc,
+ mode=sampling_mode,
+ align_corners=False,
+ )[0, 0, 0, :][:, None]
+ return feats_sampled, masks_sampled
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grid_implicit_function.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grid_implicit_function.py
new file mode 100644
index 0000000000000000000000000000000000000000..9727ba982adcfca916728d51bf280be59735988c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grid_implicit_function.py
@@ -0,0 +1,227 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+
+import torch
+
+from omegaconf import DictConfig, OmegaConf
+from pytorch3d.implicitron.models.implicit_function.voxel_grid_implicit_function import (
+ VoxelGridImplicitFunction,
+)
+from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
+
+from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
+from pytorch3d.renderer import ray_bundle_to_ray_points
+from tests.common_testing import TestCaseMixin
+
+
+class TestVoxelGridImplicitFunction(TestCaseMixin, unittest.TestCase):
+ def setUp(self) -> None:
+ torch.manual_seed(42)
+ expand_args_fields(VoxelGridImplicitFunction)
+
+ def _get_simple_implicit_function(self, scaffold_res=16):
+ default_cfg = get_default_args(VoxelGridImplicitFunction)
+ custom_cfg = DictConfig(
+ {
+ "voxel_grid_density_args": {
+ "voxel_grid_FullResolutionVoxelGrid_args": {"n_features": 7}
+ },
+ "decoder_density_class_type": "ElementwiseDecoder",
+ "decoder_color_class_type": "MLPDecoder",
+ "decoder_color_MLPDecoder_args": {
+ "network_args": {
+ "n_layers": 2,
+ "output_dim": 3,
+ "hidden_dim": 128,
+ }
+ },
+ "scaffold_resolution": (scaffold_res, scaffold_res, scaffold_res),
+ }
+ )
+ cfg = OmegaConf.merge(default_cfg, custom_cfg)
+ return VoxelGridImplicitFunction(**cfg)
+
+ def test_forward(self) -> None:
+ """
+ Test one forward of VoxelGridImplicitFunction.
+ """
+ func = self._get_simple_implicit_function()
+
+ n_grids, n_points = 10, 9
+ raybundle = ImplicitronRayBundle(
+ origins=torch.randn(n_grids, 2, 3, 3),
+ directions=torch.randn(n_grids, 2, 3, 3),
+ lengths=torch.randn(n_grids, 2, 3, n_points),
+ xys=0,
+ )
+ func(raybundle)
+
+ def test_scaffold_formation(self):
+ """
+ Test calculating the scaffold.
+
+ We define a custom density function and make the implicit function use it
+ After calculating the scaffold we compare the density of our custom
+ density function with densities from the scaffold.
+ """
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ func = self._get_simple_implicit_function().to(device)
+ func.scaffold_max_pool_kernel_size = 1
+
+ def new_density(points):
+ """
+ Density function which returns 1 if p>(0.5, 0.5, 0.5) or
+ p < (-0.5, -0.5, -0.5) else 0
+ """
+ inshape = points.shape
+ points = points.view(-1, 3)
+ out = []
+ for p in points:
+ if torch.all(p > 0.5) or torch.all(p < -0.5):
+ out.append(torch.tensor([[1.0]]))
+ else:
+ out.append(torch.tensor([[0.0]]))
+ return torch.cat(out).view(*inshape[:-1], 1).to(device)
+
+ func._get_density = new_density
+ func._get_scaffold(0)
+
+ points = torch.tensor(
+ [
+ [0, 0, 0],
+ [1, 1, 1],
+ [1, 0, 0],
+ [0.1, 0, 0],
+ [10, 1, -1],
+ [-0.8, -0.7, -0.9],
+ ]
+ ).to(device)
+ expected = new_density(points).float().to(device)
+ assert torch.allclose(func.voxel_grid_scaffold(points), expected), (
+ func.voxel_grid_scaffold(points),
+ expected,
+ )
+
+ def test_scaffold_filtering(self, n_test_points=100):
+ """
+ Test that filtering points with scaffold works.
+
+ We define a scaffold and make the implicit function use it. We also
+ define new density and color functions which check that all passed
+ points are not in empty space (with scaffold function). In the end
+ we compare the result from the implicit function with one calculated
+ simple python, this checks that the points were merged correectly.
+ """
+ device = "cuda"
+ func = self._get_simple_implicit_function().to(device)
+
+ def scaffold(points):
+ """'
+ Function to deterministically and randomly enough assign a point
+ to empty or occupied space.
+ Return 1 if second digit of sum after 0 is odd else 0
+ """
+ return (
+ ((points.sum(dim=-1, keepdim=True) * 10**2 % 10).long() % 2) == 1
+ ).float()
+
+ def new_density(points):
+ # check if all passed points should be passed here
+ assert torch.all(scaffold(points)), (scaffold(points), points.shape)
+ return points.sum(dim=-1, keepdim=True)
+
+ def new_color(points, camera, directions, non_empty_points, num_points_per_ray):
+ # check if all passed points should be passed here
+ assert torch.all(scaffold(points)) # , (scaffold(points), points)
+ return points * 2
+
+ # check both computation paths that they contain only points
+ # which are not in empty space
+ func._get_density = new_density
+ func._get_color = new_color
+ func.voxel_grid_scaffold.forward = scaffold
+ func._scaffold_ready = True
+
+ bundle = ImplicitronRayBundle(
+ origins=torch.rand((n_test_points, 2, 1, 3), device=device),
+ directions=torch.rand((n_test_points, 2, 1, 3), device=device),
+ lengths=torch.rand((n_test_points, 2, 1, 4), device=device),
+ xys=None,
+ )
+ points = ray_bundle_to_ray_points(bundle)
+ result_density, result_color, _ = func(bundle)
+
+ # construct the wanted result 'by hand'
+ flat_points = points.view(-1, 3)
+ expected_result_density, expected_result_color = [], []
+ for point in flat_points:
+ if scaffold(point) == 1:
+ expected_result_density.append(point.sum(dim=-1, keepdim=True))
+ expected_result_color.append(point * 2)
+ else:
+ expected_result_density.append(point.new_zeros((1,)))
+ expected_result_color.append(point.new_zeros((3,)))
+ expected_result_density = torch.stack(expected_result_density, dim=0).view(
+ *points.shape[:-1], 1
+ )
+ expected_result_color = torch.stack(expected_result_color, dim=0).view(
+ *points.shape[:-1], 3
+ )
+
+ # check that thre result is expected
+ assert torch.allclose(result_density, expected_result_density), (
+ result_density,
+ expected_result_density,
+ )
+ assert torch.allclose(result_color, expected_result_color), (
+ result_color,
+ expected_result_color,
+ )
+
+ def test_cropping(self, scaffold_res=9):
+ """
+ Tests whether implicit function finds the bounding box of the object and sends
+ correct min and max points to voxel grids for rescaling.
+ """
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ func = self._get_simple_implicit_function(scaffold_res=scaffold_res).to(device)
+
+ assert scaffold_res >= 8
+ div = (scaffold_res - 1) / 2
+ true_min_point = torch.tensor(
+ [-3 / div, 0 / div, -3 / div],
+ device=device,
+ )
+ true_max_point = torch.tensor(
+ [1 / div, 2 / div, 3 / div],
+ device=device,
+ )
+
+ def new_scaffold(points):
+ # 1 if between true_min and true_max point else 0
+ # return points.new_ones((*points.shape[:-1], 1))
+ return (
+ torch.logical_and(true_min_point <= points, points <= true_max_point)
+ .all(dim=-1)
+ .float()[..., None]
+ )
+
+ called_crop = []
+
+ def assert_min_max_points(min_point, max_point):
+ called_crop.append(1)
+ self.assertClose(min_point, true_min_point)
+ self.assertClose(max_point, true_max_point)
+
+ func.voxel_grid_density.crop_self = assert_min_max_points
+ func.voxel_grid_color.crop_self = assert_min_max_points
+ func.voxel_grid_scaffold.forward = new_scaffold
+ func._scaffold_ready = True
+ func._crop(epoch=0)
+ assert len(called_crop) == 2
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grids.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grids.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b47de7f952e7848a33898457936516c82bf92f8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/implicitron/test_voxel_grids.py
@@ -0,0 +1,860 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+from typing import Optional, Tuple
+
+import torch
+from omegaconf import DictConfig, OmegaConf
+
+from pytorch3d.implicitron.models.implicit_function.utils import (
+ interpolate_line,
+ interpolate_plane,
+ interpolate_volume,
+)
+from pytorch3d.implicitron.models.implicit_function.voxel_grid import (
+ CPFactorizedVoxelGrid,
+ FullResolutionVoxelGrid,
+ VMFactorizedVoxelGrid,
+ VoxelGridModule,
+)
+
+from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
+from tests.common_testing import TestCaseMixin
+
+
+class TestVoxelGrids(TestCaseMixin, unittest.TestCase):
+ """
+ Tests Voxel grids, tests them by setting all elements to zero (after retrieving
+ they should also return zero) and by setting all of the elements to one and
+ getting the result. Also tests the interpolation by 'manually' interpolating
+ one by one sample and comparing with the batched implementation.
+ """
+
+ def get_random_normalized_points(
+ self, n_grids, n_points=None, dimension=3
+ ) -> torch.Tensor:
+ middle_shape = torch.randint(1, 4, tuple(torch.randint(1, 5, size=(1,))))
+ # create random query points
+ return (
+ torch.rand(
+ n_grids, *(middle_shape if n_points is None else [n_points]), dimension
+ )
+ * 2
+ - 1
+ )
+
+ def _test_query_with_constant_init_cp(
+ self,
+ n_grids: int,
+ n_features: int,
+ n_components: int,
+ resolution: Tuple[int],
+ value: float = 1,
+ ) -> None:
+ # set everything to 'value' and do query for elementsthe result should
+ # be of shape (n_grids, n_points, n_features) and be filled with n_components
+ # * value
+ grid = CPFactorizedVoxelGrid(
+ resolution_changes={0: resolution},
+ n_components=n_components,
+ n_features=n_features,
+ )
+ shapes = grid.get_shapes(epoch=0)
+
+ params = grid.values_type(
+ **{k: torch.ones(n_grids, *shapes[k]) * value for k in shapes}
+ )
+ points = self.get_random_normalized_points(n_grids)
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ torch.ones(n_grids, *points.shape[1:-1], n_features) * n_components * value,
+ rtol=0.0001,
+ )
+
+ def _test_query_with_constant_init_vm(
+ self,
+ n_grids: int,
+ n_features: int,
+ resolution: Tuple[int],
+ n_components: Optional[int] = None,
+ distribution: Optional[Tuple[int]] = None,
+ value: float = 1,
+ n_points: int = 1,
+ ) -> None:
+ # set everything to 'value' and do query for elements
+ grid = VMFactorizedVoxelGrid(
+ n_features=n_features,
+ resolution_changes={0: resolution},
+ n_components=n_components,
+ distribution_of_components=distribution,
+ )
+ shapes = grid.get_shapes(epoch=0)
+ params = grid.values_type(
+ **{k: torch.ones(n_grids, *shapes[k]) * value for k in shapes}
+ )
+
+ expected_element = (
+ n_components * value if distribution is None else sum(distribution) * value
+ )
+ points = self.get_random_normalized_points(n_grids)
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ torch.ones(n_grids, *points.shape[1:-1], n_features) * expected_element,
+ )
+
+ def _test_query_with_constant_init_full(
+ self,
+ n_grids: int,
+ n_features: int,
+ resolution: Tuple[int],
+ value: int = 1,
+ n_points: int = 1,
+ ) -> None:
+ # set everything to 'value' and do query for elements
+ grid = FullResolutionVoxelGrid(
+ n_features=n_features, resolution_changes={0: resolution}
+ )
+ shapes = grid.get_shapes(epoch=0)
+ params = grid.values_type(
+ **{k: torch.ones(n_grids, *shapes[k]) * value for k in shapes}
+ )
+
+ expected_element = value
+ points = self.get_random_normalized_points(n_grids)
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ torch.ones(n_grids, *points.shape[1:-1], n_features) * expected_element,
+ )
+
+ def test_query_with_constant_init(self):
+ with self.subTest("Full"):
+ self._test_query_with_constant_init_full(
+ n_grids=5, n_features=6, resolution=(3, 4, 5)
+ )
+ with self.subTest("Full with 1 in dimensions"):
+ self._test_query_with_constant_init_full(
+ n_grids=5, n_features=1, resolution=(33, 41, 1)
+ )
+ with self.subTest("CP"):
+ self._test_query_with_constant_init_cp(
+ n_grids=5,
+ n_features=6,
+ n_components=7,
+ resolution=(3, 4, 5),
+ )
+ with self.subTest("CP with 1 in dimensions"):
+ self._test_query_with_constant_init_cp(
+ n_grids=2,
+ n_features=1,
+ n_components=3,
+ resolution=(3, 1, 1),
+ )
+ with self.subTest("VM with symetric distribution"):
+ self._test_query_with_constant_init_vm(
+ n_grids=6,
+ n_features=9,
+ resolution=(2, 12, 2),
+ n_components=12,
+ )
+ with self.subTest("VM with distribution"):
+ self._test_query_with_constant_init_vm(
+ n_grids=5,
+ n_features=1,
+ resolution=(5, 9, 7),
+ distribution=(33, 41, 1),
+ )
+
+ def test_query_with_zero_init(self):
+ with self.subTest("Query testing with zero init CPFactorizedVoxelGrid"):
+ self._test_query_with_constant_init_cp(
+ n_grids=5,
+ n_features=6,
+ n_components=7,
+ resolution=(3, 2, 5),
+ value=0,
+ )
+ with self.subTest("Query testing with zero init VMFactorizedVoxelGrid"):
+ self._test_query_with_constant_init_vm(
+ n_grids=2,
+ n_features=9,
+ resolution=(2, 11, 3),
+ n_components=3,
+ value=0,
+ )
+ with self.subTest("Query testing with zero init FullResolutionVoxelGrid"):
+ self._test_query_with_constant_init_full(
+ n_grids=4, n_features=2, resolution=(3, 3, 5), value=0
+ )
+
+ def setUp(self):
+ torch.manual_seed(42)
+ expand_args_fields(FullResolutionVoxelGrid)
+ expand_args_fields(CPFactorizedVoxelGrid)
+ expand_args_fields(VMFactorizedVoxelGrid)
+ expand_args_fields(VoxelGridModule)
+
+ def _interpolate_1D(
+ self, points: torch.Tensor, vectors: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ interpolate vector from points, which are (batch, 1) and individual point is in [-1, 1]
+ """
+ result = []
+ _, _, width = vectors.shape
+ # transform from [-1, 1] to [0, width-1]
+ points = (points + 1) / 2 * (width - 1)
+ for vector, row in zip(vectors, points):
+ newrow = []
+ for x in row:
+ xf, xc = int(torch.floor(x)), int(torch.ceil(x))
+ itemf, itemc = vector[:, xf], vector[:, xc]
+ tmp = itemf * (xc - x) + itemc * (x - xf)
+ newrow.append(tmp[None, None, :])
+ result.append(torch.cat(newrow, dim=1))
+ return torch.cat(result)
+
+ def _interpolate_2D(
+ self, points: torch.Tensor, matrices: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ interpolate matrix from points, which are (batch, 2) and individual point is in [-1, 1]
+ """
+ result = []
+ n_grids, _, width, height = matrices.shape
+ points = (points + 1) / 2 * (torch.tensor([[[width, height]]]) - 1)
+ for matrix, row in zip(matrices, points):
+ newrow = []
+ for x, y in row:
+ xf, xc = int(torch.floor(x)), int(torch.ceil(x))
+ yf, yc = int(torch.floor(y)), int(torch.ceil(y))
+ itemff, itemfc = matrix[:, xf, yf], matrix[:, xf, yc]
+ itemcf, itemcc = matrix[:, xc, yf], matrix[:, xc, yc]
+ itemf = itemff * (xc - x) + itemcf * (x - xf)
+ itemc = itemfc * (xc - x) + itemcc * (x - xf)
+ tmp = itemf * (yc - y) + itemc * (y - yf)
+ newrow.append(tmp[None, None, :])
+ result.append(torch.cat(newrow, dim=1))
+ return torch.cat(result)
+
+ def _interpolate_3D(
+ self, points: torch.Tensor, tensors: torch.Tensor
+ ) -> torch.Tensor:
+ """
+ interpolate tensors from points, which are (batch, 3) and individual point is in [-1, 1]
+ """
+ result = []
+ _, _, width, height, depth = tensors.shape
+ batch_normalized_points = (
+ (points + 1) / 2 * (torch.tensor([[[width, height, depth]]]) - 1)
+ )
+ batch_points = points
+
+ for tensor, points, normalized_points in zip(
+ tensors, batch_points, batch_normalized_points
+ ):
+ newrow = []
+ for (x, y, z), (_, _, nz) in zip(points, normalized_points):
+ zf, zc = int(torch.floor(nz)), int(torch.ceil(nz))
+ itemf = self._interpolate_2D(
+ points=torch.tensor([[[x, y]]]), matrices=tensor[None, :, :, :, zf]
+ )
+ itemc = self._interpolate_2D(
+ points=torch.tensor([[[x, y]]]), matrices=tensor[None, :, :, :, zc]
+ )
+ tmp = self._interpolate_1D(
+ points=torch.tensor([[[z]]]),
+ vectors=torch.cat((itemf, itemc), dim=1).permute(0, 2, 1),
+ )
+ newrow.append(tmp)
+ result.append(torch.cat(newrow, dim=1))
+ return torch.cat(result)
+
+ def test_interpolation(self):
+
+ with self.subTest("1D interpolation"):
+ points = self.get_random_normalized_points(
+ n_grids=4, n_points=5, dimension=1
+ )
+ vector = torch.randn(size=(4, 3, 2))
+ assert torch.allclose(
+ self._interpolate_1D(points, vector),
+ interpolate_line(
+ points,
+ vector,
+ align_corners=True,
+ padding_mode="zeros",
+ mode="bilinear",
+ ),
+ rtol=0.0001,
+ atol=0.0001,
+ )
+ with self.subTest("2D interpolation"):
+ points = self.get_random_normalized_points(
+ n_grids=4, n_points=5, dimension=2
+ )
+ matrix = torch.randn(size=(4, 2, 3, 5))
+ assert torch.allclose(
+ self._interpolate_2D(points, matrix),
+ interpolate_plane(
+ points,
+ matrix,
+ align_corners=True,
+ padding_mode="zeros",
+ mode="bilinear",
+ ),
+ rtol=0.0001,
+ atol=0.0001,
+ )
+
+ with self.subTest("3D interpolation"):
+ points = self.get_random_normalized_points(
+ n_grids=4, n_points=5, dimension=3
+ )
+ tensor = torch.randn(size=(4, 5, 2, 7, 2))
+ assert torch.allclose(
+ self._interpolate_3D(points, tensor),
+ interpolate_volume(
+ points,
+ tensor,
+ align_corners=True,
+ padding_mode="zeros",
+ mode="bilinear",
+ ),
+ rtol=0.0001,
+ atol=0.0001,
+ )
+
+ def test_floating_point_query(self):
+ """
+ test querying the voxel grids on some float positions
+ """
+ with self.subTest("FullResolution"):
+ grid = FullResolutionVoxelGrid(
+ n_features=1, resolution_changes={0: (1, 1, 1)}
+ )
+ params = grid.values_type(**grid.get_shapes(epoch=0))
+ params.voxel_grid = torch.tensor(
+ [
+ [
+ [[[1, 3], [5, 7]], [[9, 11], [13, 15]]],
+ [[[2, 4], [6, 8]], [[10, 12], [14, 16]]],
+ ],
+ [
+ [[[17, 18], [19, 20]], [[21, 22], [23, 24]]],
+ [[[25, 26], [27, 28]], [[29, 30], [31, 32]]],
+ ],
+ ],
+ dtype=torch.float,
+ )
+ points = (
+ torch.tensor(
+ [
+ [
+ [1, 0, 1],
+ [0.5, 1, 1],
+ [1 / 3, 1 / 3, 2 / 3],
+ ],
+ [
+ [0, 1, 1],
+ [0, 0.5, 1],
+ [1 / 4, 1 / 4, 3 / 4],
+ ],
+ ]
+ )
+ / torch.tensor([[1.0, 1, 1]])
+ * 2
+ - 1
+ )
+ expected_result = torch.tensor(
+ [
+ [[11, 12], [11, 12], [6.333333, 7.3333333]],
+ [[20, 28], [19, 27], [19.25, 27.25]],
+ ]
+ )
+
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ expected_result,
+ rtol=0.0001,
+ atol=0.0001,
+ ), grid.evaluate_local(points, params)
+ with self.subTest("CP"):
+ grid = CPFactorizedVoxelGrid(
+ n_features=1, resolution_changes={0: (1, 1, 1)}, n_components=3
+ )
+ params = grid.values_type(**grid.get_shapes(epoch=0))
+ params.vector_components_x = torch.tensor(
+ [
+ [[1, 2], [10.5, 20.5]],
+ [[10, 20], [2, 4]],
+ ]
+ )
+ params.vector_components_y = torch.tensor(
+ [
+ [[3, 4, 5], [30.5, 40.5, 50.5]],
+ [[30, 40, 50], [1, 3, 5]],
+ ]
+ )
+ params.vector_components_z = torch.tensor(
+ [
+ [[6, 7, 8, 9], [60.5, 70.5, 80.5, 90.5]],
+ [[60, 70, 80, 90], [6, 7, 8, 9]],
+ ]
+ )
+ params.basis_matrix = torch.tensor(
+ [
+ [[2.0], [2.0]],
+ [[1.0], [2.0]],
+ ]
+ )
+ points = (
+ torch.tensor(
+ [
+ [
+ [0, 2, 2],
+ [1, 2, 0.25],
+ [0.5, 0.5, 1],
+ [1 / 3, 2 / 3, 2 + 1 / 3],
+ ],
+ [
+ [1, 0, 1],
+ [0.5, 2, 2],
+ [1, 0.5, 0.5],
+ [1 / 4, 3 / 4, 2 + 1 / 4],
+ ],
+ ]
+ )
+ / torch.tensor([[[1.0, 2, 3]]])
+ * 2
+ - 1
+ )
+ expected_result_matrix = torch.tensor(
+ [
+ [[85450.25], [130566.5], [77658.75], [86285.422]],
+ [[42056], [60240], [45604], [38775]],
+ ]
+ )
+ expected_result_sum = torch.tensor(
+ [
+ [[42725.125], [65283.25], [38829.375], [43142.711]],
+ [[42028], [60120], [45552], [38723.4375]],
+ ]
+ )
+ with self.subTest("CP with basis_matrix reduction"):
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ expected_result_matrix,
+ rtol=0.0001,
+ atol=0.0001,
+ )
+ del params.basis_matrix
+ with self.subTest("CP with sum reduction"):
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ expected_result_sum,
+ rtol=0.0001,
+ atol=0.0001,
+ )
+
+ with self.subTest("VM"):
+ grid = VMFactorizedVoxelGrid(
+ n_features=1, resolution_changes={0: (1, 1, 1)}, n_components=3
+ )
+ params = VMFactorizedVoxelGrid.values_type(**grid.get_shapes(epoch=0))
+ params.matrix_components_xy = torch.tensor(
+ [
+ [[[1, 2], [3, 4]], [[19, 20], [21, 22.0]]],
+ [[[35, 36], [37, 38]], [[39, 40], [41, 42]]],
+ ]
+ )
+ params.matrix_components_xz = torch.tensor(
+ [
+ [[[7, 8], [9, 10]], [[25, 26], [27, 28.0]]],
+ [[[43, 44], [45, 46]], [[47, 48], [49, 50]]],
+ ]
+ )
+ params.matrix_components_yz = torch.tensor(
+ [
+ [[[13, 14], [15, 16]], [[31, 32], [33, 34.0]]],
+ [[[51, 52], [53, 54]], [[55, 56], [57, 58.0]]],
+ ]
+ )
+
+ params.vector_components_z = torch.tensor(
+ [
+ [[5, 6], [23, 24.0]],
+ [[59, 60], [61, 62]],
+ ]
+ )
+ params.vector_components_y = torch.tensor(
+ [
+ [[11, 12], [29, 30.0]],
+ [[63, 64], [65, 66]],
+ ]
+ )
+ params.vector_components_x = torch.tensor(
+ [
+ [[17, 18], [35, 36.0]],
+ [[67, 68], [69, 70.0]],
+ ]
+ )
+
+ params.basis_matrix = torch.tensor(
+ [
+ [2, 2, 2, 2, 2, 2.0],
+ [1, 2, 1, 2, 1, 2.0],
+ ]
+ )[:, :, None]
+ points = (
+ torch.tensor(
+ [
+ [
+ [1, 0, 1],
+ [0.5, 1, 1],
+ [1 / 3, 1 / 3, 2 / 3],
+ ],
+ [
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 1, 0],
+ ],
+ ]
+ )
+ / torch.tensor([[[1.0, 1, 1]]])
+ * 2
+ - 1
+ )
+ expected_result_matrix = torch.tensor(
+ [
+ [[5696], [5854], [5484.888]],
+ [[27377], [26649], [27377]],
+ ]
+ )
+ expected_result_sum = torch.tensor(
+ [
+ [[2848], [2927], [2742.444]],
+ [[17902], [17420], [17902]],
+ ]
+ )
+ with self.subTest("VM with basis_matrix reduction"):
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ expected_result_matrix,
+ rtol=0.0001,
+ atol=0.0001,
+ )
+ del params.basis_matrix
+ with self.subTest("VM with sum reduction"):
+ assert torch.allclose(
+ grid.evaluate_local(points, params),
+ expected_result_sum,
+ rtol=0.0001,
+ atol=0.0001,
+ ), grid.evaluate_local(points, params)
+
+ def test_forward_with_small_init_std(self):
+ """
+ Test does the grid return small values if it is initialized with small
+ mean and small standard deviation.
+ """
+
+ def test(cls, **kwargs):
+ with self.subTest(cls.__name__):
+ n_grids = 3
+ grid = cls(**kwargs)
+ shapes = grid.get_shapes(epoch=0)
+ params = cls.values_type(
+ **{
+ k: torch.normal(mean=torch.zeros(n_grids, *shape), std=0.0001)
+ for k, shape in shapes.items()
+ }
+ )
+ points = self.get_random_normalized_points(n_grids=n_grids, n_points=3)
+ max_expected_result = torch.zeros((len(points), 10)) + 1e-2
+ assert torch.all(
+ grid.evaluate_local(points, params) < max_expected_result
+ )
+
+ test(
+ FullResolutionVoxelGrid,
+ resolution_changes={0: (4, 6, 9)},
+ n_features=10,
+ )
+ test(
+ CPFactorizedVoxelGrid,
+ resolution_changes={0: (4, 6, 9)},
+ n_features=10,
+ n_components=3,
+ )
+ test(
+ VMFactorizedVoxelGrid,
+ resolution_changes={0: (4, 6, 9)},
+ n_features=10,
+ n_components=3,
+ )
+
+ def test_voxel_grid_module_location(self, n_times=10):
+ """
+ This checks the module uses locator correctly etc..
+
+ If we know that voxel grids work for (x, y, z) in local coordinates
+ to test if the VoxelGridModule does not have permuted dimensions we
+ create local coordinates, pass them through verified voxelgrids and
+ compare the result with the result that we get when we convert
+ coordinates to world and pass them through the VoxelGridModule
+ """
+ for _ in range(n_times):
+ extents = tuple(torch.randint(1, 50, size=(3,)).tolist())
+
+ grid = VoxelGridModule(extents=extents)
+ local_point = torch.rand(1, 3) * 2 - 1
+ world_point = local_point * torch.tensor(extents) / 2
+ grid_values = grid.voxel_grid.values_type(**grid.params)
+
+ assert torch.allclose(
+ grid(world_point)[0, 0],
+ grid.voxel_grid.evaluate_local(local_point[None], grid_values)[0, 0, 0],
+ rtol=0.0001,
+ atol=0.0001,
+ )
+
+ def test_resolution_change(self, n_times=10):
+ for _ in range(n_times):
+ n_grids, n_features, n_components = torch.randint(1, 3, (3,)).tolist()
+ resolution = torch.randint(3, 10, (3,)).tolist()
+ resolution2 = torch.randint(3, 10, (3,)).tolist()
+ resolution_changes = {0: resolution, 1: resolution2}
+ n_components *= 3
+ for cls, kwargs in (
+ (
+ FullResolutionVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ },
+ ),
+ (
+ CPFactorizedVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ "n_components": n_components,
+ },
+ ),
+ (
+ VMFactorizedVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ "n_components": n_components,
+ },
+ ),
+ ):
+ with self.subTest(cls.__name__):
+ grid = cls(**kwargs)
+ self.assertEqual(grid.get_resolution(epoch=0), resolution)
+ shapes = grid.get_shapes(epoch=0)
+ params = {
+ name: torch.randn((n_grids, *shape))
+ for name, shape in shapes.items()
+ }
+ grid_values = grid.values_type(**params)
+ grid_values_changed_resolution, change = grid.change_resolution(
+ epoch=1,
+ grid_values=grid_values,
+ mode="linear",
+ )
+ assert change
+ self.assertEqual(grid.get_resolution(epoch=1), resolution2)
+ shapes_changed_resolution = grid.get_shapes(epoch=1)
+ for name, expected_shape in shapes_changed_resolution.items():
+ shape = getattr(grid_values_changed_resolution, name).shape
+ self.assertEqual(expected_shape, shape[1:])
+
+ with self.subTest("VoxelGridModule"):
+ n_changes = 10
+ grid = VoxelGridModule()
+ resolution_changes = {i: (i + 2, i + 2, i + 2) for i in range(n_changes)}
+ grid.voxel_grid = FullResolutionVoxelGrid(
+ resolution_changes=resolution_changes
+ )
+ epochs, apply_func = grid.subscribe_to_epochs()
+ self.assertEqual(list(range(n_changes)), list(epochs))
+ for epoch in epochs:
+ change = apply_func(epoch)
+ assert change
+ self.assertEqual(
+ resolution_changes[epoch],
+ grid.voxel_grid.get_resolution(epoch=epoch),
+ )
+
+ def _get_min_max_tuple(
+ self, n=4, denominator_base=2, max_exponent=6, add_edge_cases=True
+ ):
+ if add_edge_cases:
+ n -= 2
+
+ def get_pair():
+ def get_one():
+ sign = -1 if torch.rand((1,)) < 0.5 else 1
+ exponent = int(torch.randint(1, max_exponent, (1,)))
+ denominator = denominator_base**exponent
+ numerator = int(torch.randint(1, denominator, (1,)))
+ return sign * numerator / denominator * 1.0
+
+ while True:
+ a, b = get_one(), get_one()
+ if a < b:
+ return a, b
+
+ for _ in range(n):
+ a, b, c = get_pair(), get_pair(), get_pair()
+ yield torch.tensor((a[0], b[0], c[0])), torch.tensor((a[1], b[1], c[1]))
+ if add_edge_cases:
+ yield torch.tensor((-1.0, -1.0, -1.0)), torch.tensor((1.0, 1.0, 1.0))
+ yield torch.tensor([0.0, 0.0, 0.0]), torch.tensor([1.0, 1.0, 1.0])
+
+ def test_cropping_voxel_grids(self, n_times=1):
+ """
+ If the grid is 1d and we crop at A and B
+ ---------A---------B---
+ and choose point p between them
+ ---------A-----p---B---
+ it can be represented as
+ p = A + (B-A) * p_c
+ where p_c is local coordinate of p in cropped grid. So we now just see
+ if grid evaluated at p and cropped grid evaluated at p_c agree.
+ """
+ for points_min, points_max in self._get_min_max_tuple(n=10):
+ n_grids, n_features, n_components = torch.randint(1, 3, (3,)).tolist()
+ n_grids = 1
+ n_components *= 3
+ resolution_changes = {0: (128 + 1, 128 + 1, 128 + 1)}
+ for cls, kwargs in (
+ (
+ FullResolutionVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ },
+ ),
+ (
+ CPFactorizedVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ "n_components": n_components,
+ },
+ ),
+ (
+ VMFactorizedVoxelGrid,
+ {
+ "n_features": n_features,
+ "resolution_changes": resolution_changes,
+ "n_components": n_components,
+ },
+ ),
+ ):
+ with self.subTest(
+ cls.__name__ + f" points {points_min} and {points_max}"
+ ):
+ grid = cls(**kwargs)
+ shapes = grid.get_shapes(epoch=0)
+ params = {
+ name: torch.normal(
+ mean=torch.zeros((n_grids, *shape)),
+ std=1,
+ )
+ for name, shape in shapes.items()
+ }
+ grid_values = grid.values_type(**params)
+
+ grid_values_cropped = grid.crop_local(
+ points_min, points_max, grid_values
+ )
+
+ points_local_cropped = torch.rand((1, n_times, 3))
+ points_local = (
+ points_min[None, None]
+ + (points_max - points_min)[None, None] * points_local_cropped
+ )
+ points_local_cropped = (points_local_cropped - 0.5) * 2
+
+ pred = grid.evaluate_local(points_local, grid_values)
+ pred_cropped = grid.evaluate_local(
+ points_local_cropped, grid_values_cropped
+ )
+
+ assert torch.allclose(pred, pred_cropped, rtol=1e-4, atol=1e-4), (
+ pred,
+ pred_cropped,
+ points_local,
+ points_local_cropped,
+ )
+
+ def test_cropping_voxel_grid_module(self, n_times=1):
+ for points_min, points_max in self._get_min_max_tuple(n=5, max_exponent=5):
+ extents = torch.ones((3,)) * 2
+ translation = torch.ones((3,)) * 0.2
+ points_min += translation
+ points_max += translation
+
+ default_cfg = get_default_args(VoxelGridModule)
+ custom_cfg = DictConfig(
+ {
+ "extents": tuple(float(e) for e in extents),
+ "translation": tuple(float(t) for t in translation),
+ "voxel_grid_FullResolutionVoxelGrid_args": {
+ "resolution_changes": {0: (128 + 1, 128 + 1, 128 + 1)}
+ },
+ }
+ )
+ cfg = OmegaConf.merge(default_cfg, custom_cfg)
+ grid = VoxelGridModule(**cfg)
+
+ points = (torch.rand(3) * (points_max - points_min) + points_min)[None]
+ result = grid(points)
+ grid.crop_self(points_min, points_max)
+ result_cropped = grid(points)
+
+ assert torch.allclose(result, result_cropped, rtol=0.001, atol=0.001), (
+ result,
+ result_cropped,
+ )
+
+ def test_loading_state_dict(self):
+ """
+ Test loading state dict after rescaling.
+
+ Create a voxel grid, rescale it and get the state_dict.
+ Create a new voxel grid with the same args as the first one and load
+ the state_dict and check if everything is ok.
+ """
+ n_changes = 10
+
+ resolution_changes = {i: (i + 2, i + 2, i + 2) for i in range(n_changes)}
+ cfg = DictConfig(
+ {
+ "voxel_grid_class_type": "VMFactorizedVoxelGrid",
+ "voxel_grid_VMFactorizedVoxelGrid_args": {
+ "resolution_changes": resolution_changes,
+ "n_components": 48,
+ },
+ }
+ )
+ grid = VoxelGridModule(**cfg)
+ epochs, apply_func = grid.subscribe_to_epochs()
+ for epoch in epochs:
+ apply_func(epoch)
+
+ loaded_grid = VoxelGridModule(**cfg)
+ loaded_grid.load_state_dict(grid.state_dict())
+ for name_loaded, param_loaded in loaded_grid.named_parameters():
+ for name, param in grid.named_parameters():
+ if name_loaded == name:
+ torch.allclose(param_loaded, param)
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e41cd717f6a439a9c08d76a9d0e4a54e190fc5a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/create_multiview.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/create_multiview.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dfc9c5b77c116d73445a39729b1ee68c0a53e08
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/create_multiview.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Create multiview data."""
+import sys
+from os import path
+
+
+# Making sure you can run this, even if pulsar hasn't been installed yet.
+sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
+
+
+def create_multiview():
+ """Test multiview optimization."""
+ import imageio
+
+ # import cv2
+ # import skvideo.io
+ import numpy as np
+ import torch
+ from pytorch3d.renderer.points.pulsar import Renderer
+ from torch import nn
+ from torch.autograd import Variable
+
+ # Constructor.
+ n_points = 10
+ width = 1000
+ height = 1000
+
+ class Model(nn.Module):
+ """A dummy model to test the integration into a stacked model."""
+
+ def __init__(self):
+ super(Model, self).__init__()
+ self.gamma = 0.1
+ self.renderer = Renderer(width, height, n_points)
+
+ def forward(self, vp, vc, vr, cam_params):
+ # self.gamma *= 0.995
+ # print("gamma: ", self.gamma)
+ return self.renderer.forward(vp, vc, vr, cam_params, self.gamma, 45.0)
+
+ # Generate sample data.
+ torch.manual_seed(1)
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
+ vert_pos[:, 2] += 25.0
+ vert_pos[:, :2] -= 5.0
+ # print(vert_pos[0])
+ vert_col = torch.rand(n_points, 3, dtype=torch.float32)
+ vert_rad = torch.rand(n_points, dtype=torch.float32)
+
+ # Distortion.
+ # vert_pos[:, 1] += 0.5
+ vert_col *= 0.5
+ # vert_rad *= 0.7
+
+ for device in [torch.device("cuda")]:
+ model = Model().to(device)
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ for angle_idx, angle in enumerate([-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]):
+ vert_pos_v = Variable(vert_pos, requires_grad=False)
+ vert_col_v = Variable(vert_col, requires_grad=False)
+ vert_rad_v = Variable(vert_rad, requires_grad=False)
+ cam_params = torch.tensor(
+ [
+ np.sin(angle) * 35.0,
+ 0.0,
+ 30.0 - np.cos(angle) * 35.0,
+ 0.0,
+ -angle,
+ 0.0,
+ 5.0,
+ 2.0,
+ ],
+ dtype=torch.float32,
+ ).to(device)
+ cam_params_v = Variable(cam_params, requires_grad=False)
+ result = model.forward(vert_pos_v, vert_col_v, vert_rad_v, cam_params_v)
+ result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
+ imageio.imsave(
+ "reference/examples_TestRenderer_test_multiview_%d.png" % (angle_idx),
+ result_im,
+ )
+
+
+if __name__ == "__main__":
+ create_multiview()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_channels.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_channels.py
new file mode 100644
index 0000000000000000000000000000000000000000..adbf4e56a9880e5b89db9e34f5424744f473d388
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_channels.py
@@ -0,0 +1,149 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Test number of channels."""
+import logging
+import sys
+import unittest
+from os import path
+
+import torch
+
+from ..common_testing import TestCaseMixin
+
+
+sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
+devices = [torch.device("cuda"), torch.device("cpu")]
+
+
+class TestChannels(TestCaseMixin, unittest.TestCase):
+ """Test different numbers of channels."""
+
+ def test_basic(self):
+ """Basic forward test."""
+ import torch
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ n_points = 10
+ width = 1_000
+ height = 1_000
+ renderer_1 = Renderer(width, height, n_points, n_channels=1)
+ renderer_3 = Renderer(width, height, n_points, n_channels=3)
+ renderer_8 = Renderer(width, height, n_points, n_channels=8)
+ # Generate sample data.
+ torch.manual_seed(1)
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
+ vert_pos[:, 2] += 25.0
+ vert_pos[:, :2] -= 5.0
+ vert_col = torch.rand(n_points, 8, dtype=torch.float32)
+ vert_rad = torch.rand(n_points, dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer_1 = renderer_1.to(device)
+ renderer_3 = renderer_3.to(device)
+ renderer_8 = renderer_8.to(device)
+ result_1 = (
+ renderer_1.forward(
+ vert_pos,
+ vert_col[:, :1],
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_1 = (
+ renderer_1.forward(
+ vert_pos,
+ vert_col[:, :1],
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ result_3 = (
+ renderer_3.forward(
+ vert_pos,
+ vert_col[:, :3],
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_3 = (
+ renderer_3.forward(
+ vert_pos,
+ vert_col[:, :3],
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ result_8 = (
+ renderer_8.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_8 = (
+ renderer_8.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ self.assertClose(result_1, result_3[:, :, :1])
+ self.assertClose(result_3, result_8[:, :, :3])
+ self.assertClose(hits_1, hits_3)
+ self.assertClose(hits_8, hits_3)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_depth.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..023571ac7d5de7d239194b349cd8afbd3bfde1ed
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_depth.py
@@ -0,0 +1,94 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Test the sorting of the closest spheres."""
+import logging
+import os
+import sys
+import unittest
+from os import path
+
+import imageio
+import numpy as np
+import torch
+
+from ..common_testing import TestCaseMixin
+
+# Making sure you can run this, even if pulsar hasn't been installed yet.
+sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
+
+devices = [torch.device("cuda"), torch.device("cpu")]
+IN_REF_FP = path.join(path.dirname(__file__), "reference", "nr0000-in.pth")
+OUT_REF_FP = path.join(path.dirname(__file__), "reference", "nr0000-out.pth")
+
+
+class TestDepth(TestCaseMixin, unittest.TestCase):
+ """Test different numbers of channels."""
+
+ def test_basic(self):
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ for device in devices:
+ gamma = 1e-5
+ max_depth = 15.0
+ min_depth = 5.0
+ renderer = Renderer(
+ 256,
+ 256,
+ 10000,
+ orthogonal_projection=True,
+ right_handed_system=False,
+ n_channels=1,
+ ).to(device)
+ data = torch.load(IN_REF_FP, map_location="cpu")
+ # For creating the reference files.
+ # Use in case of updates.
+ # data["pos"] = torch.rand_like(data["pos"])
+ # data["pos"][:, 0] = data["pos"][:, 0] * 2. - 1.
+ # data["pos"][:, 1] = data["pos"][:, 1] * 2. - 1.
+ # data["pos"][:, 2] = data["pos"][:, 2] + 9.5
+ result, result_info = renderer.forward(
+ data["pos"].to(device),
+ data["col"].to(device),
+ data["rad"].to(device),
+ data["cam_params"].to(device),
+ gamma,
+ min_depth=min_depth,
+ max_depth=max_depth,
+ return_forward_info=True,
+ bg_col=torch.zeros(1, device=device, dtype=torch.float32),
+ percent_allowed_difference=0.01,
+ )
+ depth_map = Renderer.depth_map_from_result_info_nograd(result_info)
+ depth_vis = (depth_map - depth_map[depth_map > 0].min()) * 200 / (
+ depth_map.max() - depth_map[depth_map > 0.0].min()
+ ) + 50
+ if not os.environ.get("FB_TEST", False):
+ imageio.imwrite(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_depth_test_basic_depth.png",
+ ),
+ depth_vis.cpu().numpy().astype(np.uint8),
+ )
+ # For creating the reference files.
+ # Use in case of updates.
+ # torch.save(
+ # data, path.join(path.dirname(__file__), "reference", "nr0000-in.pth")
+ # )
+ # torch.save(
+ # {"sphere_ids": sphere_ids, "depth_map": depth_map},
+ # path.join(path.dirname(__file__), "reference", "nr0000-out.pth"),
+ # )
+ # sys.exit(0)
+ reference = torch.load(OUT_REF_FP, map_location="cpu")
+ self.assertClose(reference["depth_map"].to(device), depth_map)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_forward.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_forward.py
new file mode 100644
index 0000000000000000000000000000000000000000..da79028cc5f192849dfc5b05fec430a7a005fab6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_forward.py
@@ -0,0 +1,358 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Basic rendering test."""
+import logging
+import os
+import sys
+import unittest
+from os import path
+
+import imageio
+import numpy as np
+import torch
+
+
+# Making sure you can run this, even if pulsar hasn't been installed yet.
+sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
+LOGGER = logging.getLogger(__name__)
+devices = [torch.device("cuda"), torch.device("cpu")]
+
+
+class TestForward(unittest.TestCase):
+ """Rendering tests."""
+
+ def test_bg_weight(self):
+ """Test background reweighting."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ LOGGER.info("Setting up rendering test for 3 channels...")
+ n_points = 1
+ width = 1_000
+ height = 1_000
+ renderer = Renderer(width, height, n_points, background_normalized_depth=0.999)
+ vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
+ vert_col = torch.tensor([[0.3, 0.5, 0.7]], dtype=torch.float32)
+ vert_rad = torch.tensor([1.0], dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ hits = renderer.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_bg_weight.png",
+ ),
+ (result * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_bg_weight_hits.png",
+ ),
+ (hits * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertEqual(hits[500, 500, 0].item(), 1.0)
+ self.assertTrue(
+ np.allclose(
+ result[500, 500, :].cpu().numpy(),
+ [1.0, 1.0, 1.0],
+ rtol=1e-2,
+ atol=1e-2,
+ )
+ )
+
+ def test_basic_3chan(self):
+ """Test rendering one image with one sphere, 3 channels."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ LOGGER.info("Setting up rendering test for 3 channels...")
+ n_points = 1
+ width = 1_000
+ height = 1_000
+ renderer = Renderer(width, height, n_points)
+ vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
+ vert_col = torch.tensor([[0.3, 0.5, 0.7]], dtype=torch.float32)
+ vert_rad = torch.tensor([1.0], dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ hits = renderer.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_3chan.png",
+ ),
+ (result * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_3chan_hits.png",
+ ),
+ (hits * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertEqual(hits[500, 500, 0].item(), 1.0)
+ self.assertTrue(
+ np.allclose(
+ result[500, 500, :].cpu().numpy(),
+ [0.3, 0.5, 0.7],
+ rtol=1e-2,
+ atol=1e-2,
+ )
+ )
+
+ def test_basic_1chan(self):
+ """Test rendering one image with one sphere, 1 channel."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ LOGGER.info("Setting up rendering test for 1 channel...")
+ n_points = 1
+ width = 1_000
+ height = 1_000
+ renderer = Renderer(width, height, n_points, n_channels=1)
+ vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
+ vert_col = torch.tensor([[0.3]], dtype=torch.float32)
+ vert_rad = torch.tensor([1.0], dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ hits = renderer.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_1chan.png",
+ ),
+ (result * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_1chan_hits.png",
+ ),
+ (hits * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertEqual(hits[500, 500, 0].item(), 1.0)
+ self.assertTrue(
+ np.allclose(
+ result[500, 500, :].cpu().numpy(), [0.3], rtol=1e-2, atol=1e-2
+ )
+ )
+
+ def test_basic_8chan(self):
+ """Test rendering one image with one sphere, 8 channels."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ LOGGER.info("Setting up rendering test for 8 channels...")
+ n_points = 1
+ width = 1_000
+ height = 1_000
+ renderer = Renderer(width, height, n_points, n_channels=8)
+ vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
+ vert_col = torch.tensor(
+ [[1.0, 1.0, 1.0, 1.0, 1.0, 0.3, 0.5, 0.7]], dtype=torch.float32
+ )
+ vert_rad = torch.tensor([1.0], dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ hits = renderer.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_8chan.png",
+ ),
+ (result[:, :, 5:8] * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_basic_8chan_hits.png",
+ ),
+ (hits * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertEqual(hits[500, 500, 0].item(), 1.0)
+ self.assertTrue(
+ np.allclose(
+ result[500, 500, 5:8].cpu().numpy(),
+ [0.3, 0.5, 0.7],
+ rtol=1e-2,
+ atol=1e-2,
+ )
+ )
+ self.assertTrue(
+ np.allclose(
+ result[500, 500, :5].cpu().numpy(), 1.0, rtol=1e-2, atol=1e-2
+ )
+ )
+
+ def test_principal_point(self):
+ """Test shifting the principal point."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ LOGGER.info("Setting up rendering test for shifted principal point...")
+ n_points = 1
+ width = 1_000
+ height = 1_000
+ renderer = Renderer(width, height, n_points, n_channels=1)
+ vert_pos = torch.tensor([[0.0, 0.0, 25.0]], dtype=torch.float32)
+ vert_col = torch.tensor([[0.0]], dtype=torch.float32)
+ vert_rad = torch.tensor([1.0], dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0, 0.0, 0.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ cam_params[-2] = -250.0
+ cam_params[-1] = -250.0
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_principal_point.png",
+ ),
+ (result * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertTrue(
+ np.allclose(
+ result[750, 750, :].cpu().numpy(), [0.0], rtol=1e-2, atol=1e-2
+ )
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ cam_params[-2] = 250.0
+ cam_params[-1] = 250.0
+ renderer = renderer.to(device)
+ LOGGER.info("Rendering...")
+ # Measurements.
+ result = renderer.forward(
+ vert_pos, vert_col, vert_rad, cam_params, 1.0e-1, 45.0
+ )
+ if not os.environ.get("FB_TEST", False):
+ imageio.imsave(
+ path.join(
+ path.dirname(__file__),
+ "test_out",
+ "test_forward_TestForward_test_principal_point.png",
+ ),
+ (result * 255.0).cpu().to(torch.uint8).numpy(),
+ )
+ self.assertTrue(
+ np.allclose(
+ result[250, 250, :].cpu().numpy(), [0.0], rtol=1e-2, atol=1e-2
+ )
+ )
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("pulsar.renderer").setLevel(logging.WARN)
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_hands.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_hands.py
new file mode 100644
index 0000000000000000000000000000000000000000..128b228553514781cd9d4a019f12844115328929
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_hands.py
@@ -0,0 +1,120 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Test right hand/left hand system compatibility."""
+import logging
+import sys
+import unittest
+from os import path
+
+import torch
+
+from ..common_testing import TestCaseMixin
+
+
+# Making sure you can run this, even if pulsar hasn't been installed yet.
+sys.path.insert(0, path.join(path.dirname(__file__), "..", ".."))
+devices = [torch.device("cuda"), torch.device("cpu")]
+
+
+class TestHands(TestCaseMixin, unittest.TestCase):
+ """Test right hand/left hand system compatibility."""
+
+ def test_basic(self):
+ """Basic forward test."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ n_points = 10
+ width = 1000
+ height = 1000
+ renderer_left = Renderer(width, height, n_points, right_handed_system=False)
+ renderer_right = Renderer(width, height, n_points, right_handed_system=True)
+ # Generate sample data.
+ torch.manual_seed(1)
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
+ vert_pos[:, 2] += 25.0
+ vert_pos[:, :2] -= 5.0
+ vert_pos_neg = vert_pos.clone()
+ vert_pos_neg[:, 2] *= -1.0
+ vert_col = torch.rand(n_points, 3, dtype=torch.float32)
+ vert_rad = torch.rand(n_points, dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 2.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_pos_neg = vert_pos_neg.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer_left = renderer_left.to(device)
+ renderer_right = renderer_right.to(device)
+ result_left = (
+ renderer_left.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_left = (
+ renderer_left.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ result_right = (
+ renderer_right.forward(
+ vert_pos_neg,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_right = (
+ renderer_right.forward(
+ vert_pos_neg,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ self.assertClose(result_left, result_right)
+ self.assertClose(hits_left, hits_right)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("pulsar.renderer").setLevel(logging.WARN)
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_ortho.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_ortho.py
new file mode 100644
index 0000000000000000000000000000000000000000..3da5ea7037af37184ee14dadc57722ca29d48939
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_ortho.py
@@ -0,0 +1,131 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Tests for the orthogonal projection."""
+import logging
+import sys
+import unittest
+from os import path
+
+import numpy as np
+import torch
+
+
+# Making sure you can run this, even if pulsar hasn't been installed yet.
+sys.path.insert(0, path.join(path.dirname(__file__), ".."))
+devices = [torch.device("cuda"), torch.device("cpu")]
+
+
+class TestOrtho(unittest.TestCase):
+ """Test the orthogonal projection."""
+
+ def test_basic(self):
+ """Basic forward test of the orthogonal projection."""
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ n_points = 10
+ width = 1000
+ height = 1000
+ renderer_left = Renderer(
+ width,
+ height,
+ n_points,
+ right_handed_system=False,
+ orthogonal_projection=True,
+ )
+ renderer_right = Renderer(
+ width,
+ height,
+ n_points,
+ right_handed_system=True,
+ orthogonal_projection=True,
+ )
+ # Generate sample data.
+ torch.manual_seed(1)
+ vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
+ vert_pos[:, 2] += 25.0
+ vert_pos[:, :2] -= 5.0
+ vert_pos_neg = vert_pos.clone()
+ vert_pos_neg[:, 2] *= -1.0
+ vert_col = torch.rand(n_points, 3, dtype=torch.float32)
+ vert_rad = torch.rand(n_points, dtype=torch.float32)
+ cam_params = torch.tensor(
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0], dtype=torch.float32
+ )
+ for device in devices:
+ vert_pos = vert_pos.to(device)
+ vert_pos_neg = vert_pos_neg.to(device)
+ vert_col = vert_col.to(device)
+ vert_rad = vert_rad.to(device)
+ cam_params = cam_params.to(device)
+ renderer_left = renderer_left.to(device)
+ renderer_right = renderer_right.to(device)
+ result_left = (
+ renderer_left.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_left = (
+ renderer_left.forward(
+ vert_pos,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ result_right = (
+ renderer_right.forward(
+ vert_pos_neg,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ hits_right = (
+ renderer_right.forward(
+ vert_pos_neg,
+ vert_col,
+ vert_rad,
+ cam_params,
+ 1.0e-1,
+ 45.0,
+ percent_allowed_difference=0.01,
+ mode=1,
+ )
+ .cpu()
+ .detach()
+ .numpy()
+ )
+ self.assertTrue(np.allclose(result_left, result_right))
+ self.assertTrue(np.allclose(hits_left, hits_right))
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("pulsar.renderer").setLevel(logging.WARN)
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_out/empty.txt b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_out/empty.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_small_spheres.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_small_spheres.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1282c70a863abbcf024333c99887db42069f9a6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/tests/pulsar/test_small_spheres.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""Test right hand/left hand system compatibility."""
+import sys
+import unittest
+from os import path
+
+import numpy as np
+import torch
+from torch import nn
+
+
+sys.path.insert(0, path.join(path.dirname(__file__), ".."))
+devices = [torch.device("cuda"), torch.device("cpu")]
+
+
+n_points = 10
+width = 1_000
+height = 1_000
+
+
+class SceneModel(nn.Module):
+ """A simple model to demonstrate use in Modules."""
+
+ def __init__(self):
+ super(SceneModel, self).__init__()
+ from pytorch3d.renderer.points.pulsar import Renderer
+
+ self.gamma = 1.0
+ # Points.
+ torch.manual_seed(1)
+ vert_pos = torch.rand((1, n_points, 3), dtype=torch.float32) * 10.0
+ vert_pos[:, :, 2] += 25.0
+ vert_pos[:, :, :2] -= 5.0
+ self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
+ self.register_parameter(
+ "vert_col",
+ nn.Parameter(
+ torch.zeros(1, n_points, 3, dtype=torch.float32), requires_grad=True
+ ),
+ )
+ self.register_parameter(
+ "vert_rad",
+ nn.Parameter(
+ torch.ones(1, n_points, dtype=torch.float32) * 0.001,
+ requires_grad=False,
+ ),
+ )
+ self.register_parameter(
+ "vert_opy",
+ nn.Parameter(
+ torch.ones(1, n_points, dtype=torch.float32), requires_grad=False
+ ),
+ )
+ self.register_buffer(
+ "cam_params",
+ torch.tensor(
+ [
+ [
+ np.sin(angle) * 35.0,
+ 0.0,
+ 30.0 - np.cos(angle) * 35.0,
+ 0.0,
+ -angle,
+ 0.0,
+ 5.0,
+ 2.0,
+ ]
+ for angle in [-1.5, -0.8, -0.4, -0.1, 0.1, 0.4, 0.8, 1.5]
+ ],
+ dtype=torch.float32,
+ ),
+ )
+ self.renderer = Renderer(width, height, n_points)
+
+ def forward(self, cam=None):
+ if cam is None:
+ cam = self.cam_params
+ n_views = 8
+ else:
+ n_views = 1
+ return self.renderer.forward(
+ self.vert_pos.expand(n_views, -1, -1),
+ self.vert_col.expand(n_views, -1, -1),
+ self.vert_rad.expand(n_views, -1),
+ cam,
+ self.gamma,
+ 45.0,
+ return_forward_info=True,
+ )
+
+
+class TestSmallSpheres(unittest.TestCase):
+ """Test small sphere rendering and gradients."""
+
+ def test_basic(self):
+ for device in devices:
+ # Set up model.
+ model = SceneModel().to(device)
+ angle = 0.0
+ for _ in range(50):
+ cam_control = torch.tensor(
+ [
+ [
+ np.sin(angle) * 35.0,
+ 0.0,
+ 30.0 - np.cos(angle) * 35.0,
+ 0.0,
+ -angle,
+ 0.0,
+ 5.0,
+ 2.0,
+ ]
+ ],
+ dtype=torch.float32,
+ ).to(device)
+ result, forw_info = model(cam=cam_control)
+ sphere_ids = model.renderer.sphere_ids_from_result_info_nograd(
+ forw_info
+ )
+ # Assert all spheres are rendered.
+ for idx in range(n_points):
+ self.assertTrue(
+ (sphere_ids == idx).sum() > 0, "Sphere ID %d missing!" % (idx)
+ )
+ # Visualization code. Activate for debugging.
+ # result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
+ # cv2.imshow("res", result_im[0, :, :, ::-1])
+ # cv2.waitKey(0)
+ # Back-propagate some dummy gradients.
+ loss = ((result - torch.ones_like(result)).abs()).sum()
+ loss.backward()
+ # Now check whether the gradient arrives at every sphere.
+ self.assertTrue(torch.all(model.vert_col.grad[:, :, 0].abs() > 0.0))
+ angle += 0.15
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/batching.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/batching.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2c160f97e6524b6eca9d8fad0cb48d5e9f4b977b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/batching.svg
@@ -0,0 +1,16 @@
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/favicon.ico b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..56093a9417fe17aaca9d5b54bed35cf975e88be5
Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/favicon.ico differ
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/ops.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/ops.svg
new file mode 100644
index 0000000000000000000000000000000000000000..b934e539df7535774dde3ec26f20a07926d8ff86
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/ops.svg
@@ -0,0 +1,23 @@
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dicon.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dicon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..e6927b791d546373528f6895362f48721f34e806
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dicon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogo.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..b914e1b28a3b173c4e492985e7638e34c376c2ff
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogowhite.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogowhite.svg
new file mode 100644
index 0000000000000000000000000000000000000000..f16b3dfbb63934dfa45c42556efb250e5ca30ad2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/pytorch3dlogowhite.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/rendering.svg b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/rendering.svg
new file mode 100644
index 0000000000000000000000000000000000000000..30605d8a246a35f9e58cc180f19ca6d22b97dc58
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/website/static/img/rendering.svg
@@ -0,0 +1,19 @@
+
+
\ No newline at end of file