Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_conda.py +145 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_wheel.sh +22 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/build_pytorch3d.sh +218 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/install_conda.bat +7 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/switch_cuda_version.sh +35 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/README.md +26 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/cub/meta.yaml +12 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/README.md +31 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/after.sh +10 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/go.sh +17 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/inside.sh +163 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/publish.py +87 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pkg_helpers.bash +390 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pytorch3d/meta.yaml +59 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/activate.bat +50 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/conda_build_config.yaml +24 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_activate.bat +35 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_runtime.bat +55 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/meta.yaml +45 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/activate.bat +50 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/conda_build_config.yaml +24 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_activate.bat +35 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_runtime.bat +55 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/meta.yaml +45 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/__init__.py +5 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml +18 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml +18 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml +19 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml +8 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml +65 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml +12 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml +35 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml +11 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml +41 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml +55 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml +10 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml +18 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml +11 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml +22 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/__init__.py +12 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py +160 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json +59 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json +57 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/__init__.py +7 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/blender_dataset_map_provider.py +55 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_base.py +147 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/frame_data.py +780 -0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_conda.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import os.path
|
| 8 |
+
import runpy
|
| 9 |
+
import subprocess
|
| 10 |
+
from typing import List
|
| 11 |
+
|
| 12 |
+
# required env vars:
|
| 13 |
+
# CU_VERSION: E.g. cu112
|
| 14 |
+
# JUST_TESTRUN: 1 to not set nvcc flags
|
| 15 |
+
# PYTORCH_VERSION: e.g. 1.12.0
|
| 16 |
+
# PYTHON_VERSION: e.g. 3.9
|
| 17 |
+
|
| 18 |
+
# should be run from pytorch3d root
|
| 19 |
+
|
| 20 |
+
CU_VERSION = os.environ["CU_VERSION"]
|
| 21 |
+
PYTORCH_VERSION = os.environ["PYTORCH_VERSION"]
|
| 22 |
+
pytorch_major_minor = tuple(int(i) for i in PYTORCH_VERSION.split(".")[:2])
|
| 23 |
+
source_root_dir = os.environ["PWD"]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def version_constraint(version):
|
| 27 |
+
"""
|
| 28 |
+
Given version "11.3" returns " >=11.3,<11.4"
|
| 29 |
+
"""
|
| 30 |
+
last_part = version.rindex(".") + 1
|
| 31 |
+
upper = version[:last_part] + str(1 + int(version[last_part:]))
|
| 32 |
+
return f" >={version},<{upper}"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_cuda_major_minor():
|
| 36 |
+
if CU_VERSION == "cpu":
|
| 37 |
+
raise ValueError("fn only for cuda builds")
|
| 38 |
+
if len(CU_VERSION) != 5 or CU_VERSION[:2] != "cu":
|
| 39 |
+
raise ValueError(f"Bad CU_VERSION {CU_VERSION}")
|
| 40 |
+
major = CU_VERSION[2:4]
|
| 41 |
+
minor = CU_VERSION[4]
|
| 42 |
+
return major, minor
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def setup_cuda():
|
| 46 |
+
if CU_VERSION == "cpu":
|
| 47 |
+
return
|
| 48 |
+
major, minor = get_cuda_major_minor()
|
| 49 |
+
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
| 50 |
+
os.environ["FORCE_CUDA"] = "1"
|
| 51 |
+
|
| 52 |
+
basic_nvcc_flags = (
|
| 53 |
+
"-gencode=arch=compute_50,code=sm_50 "
|
| 54 |
+
"-gencode=arch=compute_60,code=sm_60 "
|
| 55 |
+
"-gencode=arch=compute_70,code=sm_70 "
|
| 56 |
+
"-gencode=arch=compute_75,code=sm_75 "
|
| 57 |
+
"-gencode=arch=compute_50,code=compute_50"
|
| 58 |
+
)
|
| 59 |
+
if CU_VERSION == "cu102":
|
| 60 |
+
nvcc_flags = "-gencode=arch=compute_35,code=sm_35 " + basic_nvcc_flags
|
| 61 |
+
elif CU_VERSION < ("cu118"):
|
| 62 |
+
nvcc_flags = (
|
| 63 |
+
"-gencode=arch=compute_35,code=sm_35 "
|
| 64 |
+
+ "-gencode=arch=compute_80,code=sm_80 "
|
| 65 |
+
+ "-gencode=arch=compute_86,code=sm_86 "
|
| 66 |
+
+ basic_nvcc_flags
|
| 67 |
+
)
|
| 68 |
+
else:
|
| 69 |
+
nvcc_flags = (
|
| 70 |
+
"-gencode=arch=compute_80,code=sm_80 "
|
| 71 |
+
+ "-gencode=arch=compute_86,code=sm_86 "
|
| 72 |
+
+ "-gencode=arch=compute_90,code=sm_90 "
|
| 73 |
+
+ basic_nvcc_flags
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
if os.environ.get("JUST_TESTRUN", "0") != "1":
|
| 77 |
+
os.environ["NVCC_FLAGS"] = nvcc_flags
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def setup_conda_pytorch_constraint() -> List[str]:
|
| 81 |
+
pytorch_constraint = f"- pytorch=={PYTORCH_VERSION}"
|
| 82 |
+
os.environ["CONDA_PYTORCH_CONSTRAINT"] = pytorch_constraint
|
| 83 |
+
if pytorch_major_minor < (2, 2):
|
| 84 |
+
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = "- mkl!=2024.1.0"
|
| 85 |
+
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools<70"
|
| 86 |
+
else:
|
| 87 |
+
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = ""
|
| 88 |
+
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools"
|
| 89 |
+
os.environ["CONDA_PYTORCH_BUILD_CONSTRAINT"] = pytorch_constraint
|
| 90 |
+
os.environ["PYTORCH_VERSION_NODOT"] = PYTORCH_VERSION.replace(".", "")
|
| 91 |
+
|
| 92 |
+
if pytorch_major_minor < (1, 13):
|
| 93 |
+
return ["-c", "pytorch"]
|
| 94 |
+
else:
|
| 95 |
+
return ["-c", "pytorch", "-c", "nvidia"]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def setup_conda_cudatoolkit_constraint():
|
| 99 |
+
if CU_VERSION == "cpu":
|
| 100 |
+
os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
|
| 101 |
+
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
|
| 102 |
+
return
|
| 103 |
+
os.environ["CONDA_CPUONLY_FEATURE"] = ""
|
| 104 |
+
|
| 105 |
+
if CU_VERSION in ("cu102", "cu110"):
|
| 106 |
+
os.environ["CONDA_CUB_CONSTRAINT"] = "- nvidiacub"
|
| 107 |
+
else:
|
| 108 |
+
os.environ["CONDA_CUB_CONSTRAINT"] = ""
|
| 109 |
+
|
| 110 |
+
major, minor = get_cuda_major_minor()
|
| 111 |
+
version_clause = version_constraint(f"{major}.{minor}")
|
| 112 |
+
if pytorch_major_minor < (1, 13):
|
| 113 |
+
toolkit = f"- cudatoolkit {version_clause}"
|
| 114 |
+
else:
|
| 115 |
+
toolkit = f"- pytorch-cuda {version_clause}"
|
| 116 |
+
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def do_build(start_args: List[str]):
|
| 120 |
+
args = start_args.copy()
|
| 121 |
+
|
| 122 |
+
test_flag = os.environ.get("TEST_FLAG")
|
| 123 |
+
if test_flag is not None:
|
| 124 |
+
args.append(test_flag)
|
| 125 |
+
|
| 126 |
+
args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
|
| 127 |
+
args.append("--no-anaconda-upload")
|
| 128 |
+
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
| 129 |
+
args.append("packaging/pytorch3d")
|
| 130 |
+
print(args)
|
| 131 |
+
subprocess.check_call(args)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
args = ["conda", "build"]
|
| 136 |
+
setup_cuda()
|
| 137 |
+
|
| 138 |
+
init_path = source_root_dir + "/pytorch3d/__init__.py"
|
| 139 |
+
build_version = runpy.run_path(init_path)["__version__"]
|
| 140 |
+
os.environ["BUILD_VERSION"] = build_version
|
| 141 |
+
|
| 142 |
+
os.environ["SOURCE_ROOT_DIR"] = source_root_dir
|
| 143 |
+
args += setup_conda_pytorch_constraint()
|
| 144 |
+
setup_conda_cudatoolkit_constraint()
|
| 145 |
+
do_build(args)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/build_wheel.sh
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
set -ex
|
| 9 |
+
|
| 10 |
+
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
| 11 |
+
. "$script_dir/pkg_helpers.bash"
|
| 12 |
+
|
| 13 |
+
VERSION=$(python -c "exec(open('${script_dir}/../pytorch3d/__init__.py').read()); print(__version__)")
|
| 14 |
+
|
| 15 |
+
export BUILD_TYPE=wheel
|
| 16 |
+
setup_env "$VERSION"
|
| 17 |
+
setup_wheel_python
|
| 18 |
+
pip_install numpy
|
| 19 |
+
setup_pip_pytorch_version
|
| 20 |
+
download_nvidiacub_if_needed
|
| 21 |
+
python setup.py clean
|
| 22 |
+
IS_WHEEL=1 python setup.py bdist_wheel
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/build_pytorch3d.sh
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
if [[ -x "/remote/anaconda_token" ]]; then
|
| 9 |
+
. /remote/anaconda_token || true
|
| 10 |
+
fi
|
| 11 |
+
|
| 12 |
+
set -ex
|
| 13 |
+
|
| 14 |
+
# Function to retry functions that sometimes timeout or have flaky failures
|
| 15 |
+
retry () {
|
| 16 |
+
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
# Parse arguments and determmine version
|
| 20 |
+
###########################################################
|
| 21 |
+
|
| 22 |
+
if [ "$#" -ne 3 ]; then
|
| 23 |
+
echo "Illegal number of parameters. Pass cuda version, pytorch3d version, pytorch3d build number"
|
| 24 |
+
echo "CUDA version should be Mm with no dot, e.g. '80'"
|
| 25 |
+
echo "DESIRED_PYTHON should be M.m, e.g. '2.7'"
|
| 26 |
+
exit 1
|
| 27 |
+
fi
|
| 28 |
+
|
| 29 |
+
desired_cuda="$1"
|
| 30 |
+
build_version="$2"
|
| 31 |
+
build_number="$3"
|
| 32 |
+
|
| 33 |
+
if [[ "$desired_cuda" != cpu ]]; then
|
| 34 |
+
desired_cuda="$(echo $desired_cuda | tr -d cuda. )"
|
| 35 |
+
fi
|
| 36 |
+
echo "Building cuda version $desired_cuda and pytorch3d version: $build_version build_number: $build_number"
|
| 37 |
+
|
| 38 |
+
if [[ "$desired_cuda" == 'cpu' ]]; then
|
| 39 |
+
cpu_only=1
|
| 40 |
+
cuver="cpu"
|
| 41 |
+
else
|
| 42 |
+
# Switch desired_cuda to be M.m to be consistent with other scripts in
|
| 43 |
+
# pytorch/builder
|
| 44 |
+
export FORCE_CUDA=1
|
| 45 |
+
cuda_nodot="$desired_cuda"
|
| 46 |
+
|
| 47 |
+
if [[ ${#cuda_nodot} -eq 2 ]]; then
|
| 48 |
+
desired_cuda="${desired_cuda:0:1}.${desired_cuda:1:1}"
|
| 49 |
+
elif [[ ${#cuda_nodot} -eq 3 ]]; then
|
| 50 |
+
desired_cuda="${desired_cuda:0:2}.${desired_cuda:2:1}"
|
| 51 |
+
else
|
| 52 |
+
echo "unknown cuda version $cuda_nodot"
|
| 53 |
+
exit 1
|
| 54 |
+
fi
|
| 55 |
+
|
| 56 |
+
cuver="cu$cuda_nodot"
|
| 57 |
+
fi
|
| 58 |
+
|
| 59 |
+
export PYTORCH3D_BUILD_VERSION=$build_version
|
| 60 |
+
export PYTORCH3D_BUILD_NUMBER=$build_number
|
| 61 |
+
|
| 62 |
+
if [[ -z "$DESIRED_PYTHON" ]]; then
|
| 63 |
+
DESIRED_PYTHON=('3.5' '3.6' '3.7')
|
| 64 |
+
fi
|
| 65 |
+
|
| 66 |
+
SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
| 67 |
+
|
| 68 |
+
if [[ -z "$WIN_PACKAGE_WORK_DIR" ]]; then
|
| 69 |
+
WIN_PACKAGE_WORK_DIR="$(echo $(pwd -W) | tr '/' '\\')\\tmp_conda_$(date +%H%M%S)"
|
| 70 |
+
fi
|
| 71 |
+
|
| 72 |
+
mkdir -p "$WIN_PACKAGE_WORK_DIR" || true
|
| 73 |
+
pytorch3d_rootdir="$(realpath ${WIN_PACKAGE_WORK_DIR})/pytorch3d-src"
|
| 74 |
+
git config --system core.longpaths true
|
| 75 |
+
|
| 76 |
+
if [[ ! -d "$pytorch3d_rootdir" ]]; then
|
| 77 |
+
rm -rf "$pytorch3d_rootdir"
|
| 78 |
+
git clone SOURCE_DIR/../.. "$pytorch3d_rootdir"
|
| 79 |
+
|
| 80 |
+
fi
|
| 81 |
+
|
| 82 |
+
cd "$SOURCE_DIR"
|
| 83 |
+
|
| 84 |
+
export tmp_conda="${WIN_PACKAGE_WORK_DIR}\\conda"
|
| 85 |
+
export miniconda_exe="${WIN_PACKAGE_WORK_DIR}\\miniconda.exe"
|
| 86 |
+
rm -rf "$tmp_conda"
|
| 87 |
+
rm -f "$miniconda_exe"
|
| 88 |
+
curl -sSk https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe -o "$miniconda_exe"
|
| 89 |
+
"$SOURCE_DIR/install_conda.bat" && rm "$miniconda_exe"
|
| 90 |
+
pushd $tmp_conda
|
| 91 |
+
export PATH="$(pwd):$(pwd)/Library/usr/bin:$(pwd)/Library/bin:$(pwd)/Scripts:$(pwd)/bin:$PATH"
|
| 92 |
+
popd
|
| 93 |
+
retry conda install -yq conda-build
|
| 94 |
+
|
| 95 |
+
ANACONDA_USER=pytorch-nightly
|
| 96 |
+
conda config --set anaconda_upload no
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
if [[ "$desired_cuda" == 'cpu' ]]; then
|
| 100 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
| 101 |
+
export CONDA_CPUONLY_FEATURE="- cpuonly # [not osx]"
|
| 102 |
+
export CUDA_VERSION="None"
|
| 103 |
+
else
|
| 104 |
+
export CONDA_CPUONLY_FEATURE=""
|
| 105 |
+
. ./switch_cuda_version.sh $desired_cuda
|
| 106 |
+
if [[ "$desired_cuda" == "10.1" ]]; then
|
| 107 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.1,<10.2 # [not osx]"
|
| 108 |
+
elif [[ "$desired_cuda" == "10.0" ]]; then
|
| 109 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]"
|
| 110 |
+
elif [[ "$desired_cuda" == "9.2" ]]; then
|
| 111 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]"
|
| 112 |
+
elif [[ "$desired_cuda" == "9.0" ]]; then
|
| 113 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.0,<9.1 # [not osx]"
|
| 114 |
+
elif [[ "$desired_cuda" == "8.0" ]]; then
|
| 115 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=8.0,<8.1 # [not osx]"
|
| 116 |
+
else
|
| 117 |
+
echo "unhandled desired_cuda: $desired_cuda"
|
| 118 |
+
exit 1
|
| 119 |
+
fi
|
| 120 |
+
fi
|
| 121 |
+
|
| 122 |
+
if [[ -z "$PYTORCH_VERSION" ]]; then
|
| 123 |
+
export CONDA_CHANNEL_FLAGS="-c pytorch-nightly"
|
| 124 |
+
export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | \
|
| 125 |
+
python -c "import os, sys, json, re; cuver = '$cuver'; \
|
| 126 |
+
cuver = cuver.replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
|
| 127 |
+
print(re.sub(r'\\+.*$', '', \
|
| 128 |
+
[x['version'] for x in json.load(sys.stdin)['pytorch'] \
|
| 129 |
+
if (x['platform'] == 'darwin' or cuver in x['fn']) \
|
| 130 |
+
and 'py' + os.environ['DESIRED_PYTHON'] in x['fn']][-1]))")"
|
| 131 |
+
if [[ -z "$PYTORCH_VERSION" ]]; then
|
| 132 |
+
echo "PyTorch version auto detection failed"
|
| 133 |
+
echo "No package found for desired_cuda=$desired_cuda and DESIRED_PYTHON=$DESIRED_PYTHON"
|
| 134 |
+
exit 1
|
| 135 |
+
fi
|
| 136 |
+
else
|
| 137 |
+
export CONDA_CHANNEL_FLAGS="-c pytorch -c pytorch-nightly"
|
| 138 |
+
fi
|
| 139 |
+
if [[ "$desired_cuda" == 'cpu' ]]; then
|
| 140 |
+
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
|
| 141 |
+
export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
|
| 142 |
+
else
|
| 143 |
+
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}"
|
| 144 |
+
export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}"
|
| 145 |
+
fi
|
| 146 |
+
|
| 147 |
+
export PYTORCH_VERSION_NODOT=${PYTORCH_VERSION//./}
|
| 148 |
+
|
| 149 |
+
# Loop through all Python versions to build a package for each
|
| 150 |
+
for py_ver in "${DESIRED_PYTHON[@]}"; do
|
| 151 |
+
build_string="py${py_ver}_${build_string_suffix}"
|
| 152 |
+
folder_tag="${build_string}_$(date +'%Y%m%d')"
|
| 153 |
+
|
| 154 |
+
# Create the conda package into this temporary folder. This is so we can find
|
| 155 |
+
# the package afterwards, as there's no easy way to extract the final filename
|
| 156 |
+
# from conda-build
|
| 157 |
+
output_folder="out_$folder_tag"
|
| 158 |
+
rm -rf "$output_folder"
|
| 159 |
+
mkdir "$output_folder"
|
| 160 |
+
|
| 161 |
+
export VSTOOLCHAIN_PACKAGE=vs2017
|
| 162 |
+
|
| 163 |
+
# We need to build the compiler activation scripts first on Windows
|
| 164 |
+
time VSDEVCMD_ARGS=${VSDEVCMD_ARGS[@]} \
|
| 165 |
+
conda build -c "$ANACONDA_USER" \
|
| 166 |
+
--no-anaconda-upload \
|
| 167 |
+
--output-folder "$output_folder" \
|
| 168 |
+
../$VSTOOLCHAIN_PACKAGE
|
| 169 |
+
|
| 170 |
+
cp ../$VSTOOLCHAIN_PACKAGE/conda_build_config.yaml ../pytorch3d/conda_build_config.yaml
|
| 171 |
+
|
| 172 |
+
conda config --set anaconda_upload no
|
| 173 |
+
echo "Calling conda-build at $(date)"
|
| 174 |
+
if [[ "$desired_cuda" == "9.2" ]]; then
|
| 175 |
+
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
| 176 |
+
BUILD_VERSION="$PYTORCH3D_BUILD_VERSION" \
|
| 177 |
+
CU_VERSION="$cuver" \
|
| 178 |
+
SOURCE_ROOT_DIR="$pytorch3d_rootdir" \
|
| 179 |
+
conda build -c "$ANACONDA_USER" \
|
| 180 |
+
-c defaults \
|
| 181 |
+
-c conda-forge \
|
| 182 |
+
-c "numba/label/dev" \
|
| 183 |
+
--no-anaconda-upload \
|
| 184 |
+
--python "$py_ver" \
|
| 185 |
+
--output-folder "$output_folder" \
|
| 186 |
+
--no-verify \
|
| 187 |
+
--no-test \
|
| 188 |
+
../pytorch3d
|
| 189 |
+
else
|
| 190 |
+
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
| 191 |
+
BUILD_VERSION="$PYTORCH3D_BUILD_VERSION" \
|
| 192 |
+
CU_VERSION="$cuver" \
|
| 193 |
+
SOURCE_ROOT_DIR="$pytorch3d_rootdir" \
|
| 194 |
+
conda build -c "$ANACONDA_USER" \
|
| 195 |
+
-c defaults \
|
| 196 |
+
-c conda-forge \
|
| 197 |
+
--no-anaconda-upload \
|
| 198 |
+
--python "$py_ver" \
|
| 199 |
+
--output-folder "$output_folder" \
|
| 200 |
+
--no-verify \
|
| 201 |
+
--no-test \
|
| 202 |
+
../pytorch3d
|
| 203 |
+
fi
|
| 204 |
+
echo "Finished conda-build at $(date)"
|
| 205 |
+
|
| 206 |
+
# Extract the package for testing
|
| 207 |
+
ls -lah "$output_folder"
|
| 208 |
+
built_package="$(find $output_folder/ -name '*pytorch3d*.tar.bz2')"
|
| 209 |
+
|
| 210 |
+
# Copy the built package to the host machine for persistence before testing
|
| 211 |
+
if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then
|
| 212 |
+
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
|
| 213 |
+
cp "$built_package" "$PYTORCH_FINAL_PACKAGE_DIR/"
|
| 214 |
+
fi
|
| 215 |
+
done
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
set +e
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/install_conda.bat
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/conda/switch_cuda_version.sh
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 9 |
+
CUDA_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v$1"
|
| 10 |
+
else
|
| 11 |
+
CUDA_DIR="/usr/local/cuda-$1"
|
| 12 |
+
fi
|
| 13 |
+
|
| 14 |
+
if ! ls "$CUDA_DIR"
|
| 15 |
+
then
|
| 16 |
+
echo "folder $CUDA_DIR not found to switch"
|
| 17 |
+
fi
|
| 18 |
+
|
| 19 |
+
echo "Switching symlink to $CUDA_DIR"
|
| 20 |
+
mkdir -p /usr/local
|
| 21 |
+
rm -fr /usr/local/cuda
|
| 22 |
+
ln -s "$CUDA_DIR" /usr/local/cuda
|
| 23 |
+
|
| 24 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 25 |
+
export CUDA_VERSION=`ls /usr/local/cuda/bin/cudart64*.dll | head -1 | tr '._' ' ' | cut -d ' ' -f2`
|
| 26 |
+
export CUDNN_VERSION=`ls /usr/local/cuda/bin/cudnn64*.dll | head -1 | tr '._' ' ' | cut -d ' ' -f2`
|
| 27 |
+
else
|
| 28 |
+
export CUDA_VERSION=$(ls /usr/local/cuda/lib64/libcudart.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev)
|
| 29 |
+
export CUDNN_VERSION=$(ls /usr/local/cuda/lib64/libcudnn.so.*|sort|tac | head -1 | rev | cut -d"." -f -3 | rev)
|
| 30 |
+
fi
|
| 31 |
+
|
| 32 |
+
ls -alh /usr/local/cuda
|
| 33 |
+
|
| 34 |
+
echo "CUDA_VERSION=$CUDA_VERSION"
|
| 35 |
+
echo "CUDNN_VERSION=$CUDNN_VERSION"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/README.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## For building conda package for NVIDIA CUB
|
| 2 |
+
|
| 3 |
+
CUB is required for building PyTorch3D so it makes sense
|
| 4 |
+
to provide a conda package to make its header files available.
|
| 5 |
+
This directory is used to do that, it is independent of the rest
|
| 6 |
+
of this repo.
|
| 7 |
+
|
| 8 |
+
Make sure you are in a conda environment with
|
| 9 |
+
anaconda-client and conda-build installed.
|
| 10 |
+
|
| 11 |
+
From this directory, build the package with the following.
|
| 12 |
+
```
|
| 13 |
+
mkdir -p ./out
|
| 14 |
+
conda build --no-anaconda-upload --output-folder ./out cub
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
You can then upload the package with the following.
|
| 18 |
+
```
|
| 19 |
+
retry () {
|
| 20 |
+
# run a command, and try again if it fails
|
| 21 |
+
$* || (echo && sleep 8 && echo retrying && $*)
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
file=out/linux-64/nvidiacub-1.10.0-0.tar.bz2
|
| 25 |
+
retry anaconda --verbose -t ${TOKEN} upload -u pytorch3d --force ${file} --no-progress
|
| 26 |
+
```
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/cub_conda/cub/meta.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
package:
|
| 2 |
+
name: nvidiacub
|
| 3 |
+
version: 1.10.0
|
| 4 |
+
source:
|
| 5 |
+
url: https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
| 6 |
+
folder: source
|
| 7 |
+
build:
|
| 8 |
+
script: mkdir $PREFIX/include && cp -r source/cub $PREFIX/include/cub
|
| 9 |
+
|
| 10 |
+
about:
|
| 11 |
+
home: https://github.com/NVIDIA/cub
|
| 12 |
+
summary: CUB provides state-of-the-art, reusable software components for every layer of the CUDA programming model.
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/README.md
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Building Linux pip Packages
|
| 2 |
+
|
| 3 |
+
1. Make sure this directory is on a filesystem which docker can
|
| 4 |
+
use - e.g. not NFS. If you are using a local hard drive there is
|
| 5 |
+
nothing to do here.
|
| 6 |
+
|
| 7 |
+
2. You may want to `docker pull pytorch/conda-cuda:latest`.
|
| 8 |
+
|
| 9 |
+
3. Run `bash go.sh` in this directory. This takes ages
|
| 10 |
+
and writes packages to `inside/output`.
|
| 11 |
+
|
| 12 |
+
4. You can upload the packages to s3, along with basic html files
|
| 13 |
+
which enable them to be used, with `bash after.sh`.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
In particular, if you are in a jupyter/colab notebook you can
|
| 17 |
+
then install using these wheels with the following series of
|
| 18 |
+
commands.
|
| 19 |
+
|
| 20 |
+
```
|
| 21 |
+
import sys
|
| 22 |
+
import torch
|
| 23 |
+
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
| 24 |
+
version_str="".join([
|
| 25 |
+
f"py3{sys.version_info.minor}_cu",
|
| 26 |
+
torch.version.cuda.replace(".",""),
|
| 27 |
+
f"_pyt{pyt_version_str}"
|
| 28 |
+
])
|
| 29 |
+
!pip install iopath
|
| 30 |
+
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
| 31 |
+
```
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/after.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
set -ex
|
| 9 |
+
sudo chown -R "$USER" output
|
| 10 |
+
python publish.py
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/go.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
# Some directory to persist downloaded conda packages
|
| 9 |
+
conda_cache=/raid/$USER/building_conda_cache
|
| 10 |
+
|
| 11 |
+
mkdir -p "$conda_cache"
|
| 12 |
+
|
| 13 |
+
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
|
| 14 |
+
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
|
| 15 |
+
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
|
| 16 |
+
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu117 pytorch/conda-builder:cuda117 bash inside/packaging/linux_wheels/inside.sh
|
| 17 |
+
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu118 pytorch/conda-builder:cuda118 bash inside/packaging/linux_wheels/inside.sh
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/inside.sh
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
set -ex
|
| 9 |
+
|
| 10 |
+
conda init bash
|
| 11 |
+
# shellcheck source=/dev/null
|
| 12 |
+
source ~/.bashrc
|
| 13 |
+
|
| 14 |
+
cd /inside
|
| 15 |
+
VERSION=$(python -c "exec(open('pytorch3d/__init__.py').read()); print(__version__)")
|
| 16 |
+
|
| 17 |
+
export BUILD_VERSION=$VERSION
|
| 18 |
+
export FORCE_CUDA=1
|
| 19 |
+
export MAX_JOBS=8
|
| 20 |
+
export CONDA_PKGS_DIRS=/conda_cache
|
| 21 |
+
|
| 22 |
+
if false
|
| 23 |
+
then
|
| 24 |
+
# We used to have to do this for old versions of CUDA
|
| 25 |
+
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
| 26 |
+
tar xzf 1.10.0.tar.gz
|
| 27 |
+
CUB_HOME=$(realpath ./cub-1.10.0)
|
| 28 |
+
export CUB_HOME
|
| 29 |
+
echo "CUB_HOME is now $CUB_HOME"
|
| 30 |
+
fi
|
| 31 |
+
|
| 32 |
+
# As a rule, we want to build for any combination of dependencies which is supported by
|
| 33 |
+
# PyTorch3D and not older than the current Google Colab set up.
|
| 34 |
+
|
| 35 |
+
PYTHON_VERSIONS="3.8 3.9 3.10"
|
| 36 |
+
# the keys are pytorch versions
|
| 37 |
+
declare -A CONDA_CUDA_VERSIONS=(
|
| 38 |
+
# ["1.11.0"]="cu113"
|
| 39 |
+
# ["1.12.0"]="cu113"
|
| 40 |
+
# ["1.12.1"]="cu113"
|
| 41 |
+
# ["1.13.0"]="cu116"
|
| 42 |
+
# ["1.13.1"]="cu116 cu117"
|
| 43 |
+
# ["2.0.0"]="cu117 cu118"
|
| 44 |
+
["2.0.1"]="cu117 cu118"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
for python_version in $PYTHON_VERSIONS
|
| 50 |
+
do
|
| 51 |
+
for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
|
| 52 |
+
do
|
| 53 |
+
if [[ "3.7 3.8 3.9" != *$python_version* ]] && [[ "1.7.0 1.7.1 1.8.0 1.8.1 1.9.0 1.9.1 1.10.0 1.10.1 1.10.2" == *$pytorch_version* ]]
|
| 54 |
+
then
|
| 55 |
+
#python 3.10 and later not supported by pytorch 1.10.2 and before
|
| 56 |
+
continue
|
| 57 |
+
fi
|
| 58 |
+
|
| 59 |
+
extra_channel="-c nvidia"
|
| 60 |
+
cudatools="pytorch-cuda"
|
| 61 |
+
if [[ "1.11.0" == "$pytorch_version" ]]
|
| 62 |
+
then
|
| 63 |
+
extra_channel=""
|
| 64 |
+
cudatools="cudatoolkit"
|
| 65 |
+
fi
|
| 66 |
+
if [[ "1.12.0" == "$pytorch_version" ]] || [[ "1.12.1" == "$pytorch_version" ]]
|
| 67 |
+
then
|
| 68 |
+
extra_channel="-c conda-forge"
|
| 69 |
+
cudatools="cudatoolkit"
|
| 70 |
+
fi
|
| 71 |
+
|
| 72 |
+
for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
|
| 73 |
+
do
|
| 74 |
+
if [[ $SELECTED_CUDA != "$cu_version" ]]
|
| 75 |
+
then
|
| 76 |
+
continue
|
| 77 |
+
fi
|
| 78 |
+
|
| 79 |
+
case "$cu_version" in
|
| 80 |
+
cu118)
|
| 81 |
+
export CUDA_HOME=/usr/local/cuda-11.8/
|
| 82 |
+
export CUDA_TAG=11.8
|
| 83 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 84 |
+
;;
|
| 85 |
+
cu117)
|
| 86 |
+
export CUDA_HOME=/usr/local/cuda-11.7/
|
| 87 |
+
export CUDA_TAG=11.7
|
| 88 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 89 |
+
;;
|
| 90 |
+
cu116)
|
| 91 |
+
export CUDA_HOME=/usr/local/cuda-11.6/
|
| 92 |
+
export CUDA_TAG=11.6
|
| 93 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 94 |
+
;;
|
| 95 |
+
cu115)
|
| 96 |
+
export CUDA_HOME=/usr/local/cuda-11.5/
|
| 97 |
+
export CUDA_TAG=11.5
|
| 98 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 99 |
+
;;
|
| 100 |
+
cu113)
|
| 101 |
+
export CUDA_HOME=/usr/local/cuda-11.3/
|
| 102 |
+
export CUDA_TAG=11.3
|
| 103 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 104 |
+
;;
|
| 105 |
+
cu112)
|
| 106 |
+
export CUDA_HOME=/usr/local/cuda-11.2/
|
| 107 |
+
export CUDA_TAG=11.2
|
| 108 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 109 |
+
;;
|
| 110 |
+
cu111)
|
| 111 |
+
export CUDA_HOME=/usr/local/cuda-11.1/
|
| 112 |
+
export CUDA_TAG=11.1
|
| 113 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 114 |
+
;;
|
| 115 |
+
cu110)
|
| 116 |
+
export CUDA_HOME=/usr/local/cuda-11.0/
|
| 117 |
+
export CUDA_TAG=11.0
|
| 118 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_50,code=compute_50"
|
| 119 |
+
;;
|
| 120 |
+
cu102)
|
| 121 |
+
export CUDA_HOME=/usr/local/cuda-10.2/
|
| 122 |
+
export CUDA_TAG=10.2
|
| 123 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
|
| 124 |
+
;;
|
| 125 |
+
cu101)
|
| 126 |
+
export CUDA_HOME=/usr/local/cuda-10.1/
|
| 127 |
+
export CUDA_TAG=10.1
|
| 128 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
|
| 129 |
+
;;
|
| 130 |
+
*)
|
| 131 |
+
echo "Unrecognized cu_version=$cu_version"
|
| 132 |
+
exit 1
|
| 133 |
+
;;
|
| 134 |
+
esac
|
| 135 |
+
tag=py"${python_version//./}"_"${cu_version}"_pyt"${pytorch_version//./}"
|
| 136 |
+
|
| 137 |
+
outdir="/inside/packaging/linux_wheels/output/$tag"
|
| 138 |
+
if [[ -d "$outdir" ]]
|
| 139 |
+
then
|
| 140 |
+
continue
|
| 141 |
+
fi
|
| 142 |
+
|
| 143 |
+
conda create -y -n "$tag" "python=$python_version"
|
| 144 |
+
conda activate "$tag"
|
| 145 |
+
# shellcheck disable=SC2086
|
| 146 |
+
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
| 147 |
+
pip install iopath
|
| 148 |
+
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
| 149 |
+
|
| 150 |
+
rm -rf dist
|
| 151 |
+
|
| 152 |
+
python setup.py clean
|
| 153 |
+
python setup.py bdist_wheel
|
| 154 |
+
|
| 155 |
+
rm -rf "$outdir"
|
| 156 |
+
mkdir -p "$outdir"
|
| 157 |
+
cp dist/*whl "$outdir"
|
| 158 |
+
|
| 159 |
+
conda deactivate
|
| 160 |
+
done
|
| 161 |
+
done
|
| 162 |
+
done
|
| 163 |
+
echo "DONE"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/linux_wheels/publish.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import subprocess
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import List
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
dest = "s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/"
|
| 13 |
+
|
| 14 |
+
output = Path("output")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def aws_s3_cmd(args) -> List[str]:
|
| 18 |
+
"""
|
| 19 |
+
This function returns the full args for subprocess to do a command
|
| 20 |
+
with aws.
|
| 21 |
+
"""
|
| 22 |
+
cmd_args = ["aws", "s3", "--profile", "saml"] + args
|
| 23 |
+
return cmd_args
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def fs3_exists(path) -> bool:
|
| 27 |
+
"""
|
| 28 |
+
Returns True if the path exists inside dest on S3.
|
| 29 |
+
In fact, will also return True if there is a file which has the given
|
| 30 |
+
path as a prefix, but we are careful about this.
|
| 31 |
+
"""
|
| 32 |
+
out = subprocess.check_output(aws_s3_cmd(["ls", path]))
|
| 33 |
+
return len(out) != 0
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_html_wrappers() -> None:
|
| 37 |
+
for directory in sorted(output.iterdir()):
|
| 38 |
+
output_wrapper = directory / "download.html"
|
| 39 |
+
assert not output_wrapper.exists()
|
| 40 |
+
dest_wrapper = dest + directory.name + "/download.html"
|
| 41 |
+
if fs3_exists(dest_wrapper):
|
| 42 |
+
subprocess.check_call(aws_s3_cmd(["cp", dest_wrapper, str(output_wrapper)]))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def write_html_wrappers() -> None:
|
| 46 |
+
html = """
|
| 47 |
+
<a href="$">$</a><br>
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
for directory in sorted(output.iterdir()):
|
| 51 |
+
files = list(directory.glob("*.whl"))
|
| 52 |
+
assert len(files) == 1, files
|
| 53 |
+
[wheel] = files
|
| 54 |
+
|
| 55 |
+
this_html = html.replace("$", wheel.name)
|
| 56 |
+
output_wrapper = directory / "download.html"
|
| 57 |
+
if output_wrapper.exists():
|
| 58 |
+
contents = output_wrapper.read_text()
|
| 59 |
+
if this_html not in contents:
|
| 60 |
+
with open(output_wrapper, "a") as f:
|
| 61 |
+
f.write(this_html)
|
| 62 |
+
else:
|
| 63 |
+
output_wrapper.write_text(this_html)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def to_aws() -> None:
|
| 67 |
+
for directory in output.iterdir():
|
| 68 |
+
for file in directory.iterdir():
|
| 69 |
+
print(file)
|
| 70 |
+
subprocess.check_call(
|
| 71 |
+
aws_s3_cmd(["cp", str(file), dest + str(file.relative_to(output))])
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
# Uncomment this for subsequent releases.
|
| 77 |
+
# get_html_wrappers()
|
| 78 |
+
write_html_wrappers()
|
| 79 |
+
to_aws()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# see all files with
|
| 83 |
+
# aws s3 --profile saml ls --recursive s3://dl.fbaipublicfiles.com/pytorch3d/
|
| 84 |
+
|
| 85 |
+
# empty current with
|
| 86 |
+
# aws s3 --profile saml rm --recursive
|
| 87 |
+
# s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pkg_helpers.bash
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# shellcheck shell=bash
|
| 8 |
+
# A set of useful bash functions for common functionality we need to do in
|
| 9 |
+
# many build scripts
|
| 10 |
+
|
| 11 |
+
# Setup CUDA environment variables, based on CU_VERSION
|
| 12 |
+
#
|
| 13 |
+
# Inputs:
|
| 14 |
+
# CU_VERSION (cu92, cu100, cu101, cu102)
|
| 15 |
+
# NO_CUDA_PACKAGE (bool)
|
| 16 |
+
# BUILD_TYPE (conda, wheel)
|
| 17 |
+
#
|
| 18 |
+
# Outputs:
|
| 19 |
+
# VERSION_SUFFIX (e.g., "")
|
| 20 |
+
# PYTORCH_VERSION_SUFFIX (e.g., +cpu)
|
| 21 |
+
# WHEEL_DIR (e.g., cu100/)
|
| 22 |
+
# CUDA_HOME (e.g., /usr/local/cuda-9.2, respected by torch.utils.cpp_extension)
|
| 23 |
+
# FORCE_CUDA (respected by pytorch3d setup.py)
|
| 24 |
+
# NVCC_FLAGS (respected by pytorch3d setup.py)
|
| 25 |
+
#
|
| 26 |
+
# Precondition: CUDA versions are installed in their conventional locations in
|
| 27 |
+
# /usr/local/cuda-*
|
| 28 |
+
#
|
| 29 |
+
# NOTE: Why VERSION_SUFFIX versus PYTORCH_VERSION_SUFFIX? If you're building
|
| 30 |
+
# a package with CUDA on a platform we support CUDA on, VERSION_SUFFIX ==
|
| 31 |
+
# PYTORCH_VERSION_SUFFIX and everyone is happy. However, if you are building a
|
| 32 |
+
# package with only CPU bits (e.g., torchaudio), then VERSION_SUFFIX is always
|
| 33 |
+
# empty, but PYTORCH_VERSION_SUFFIX is +cpu (because that's how you get a CPU
|
| 34 |
+
# version of a Python package. But that doesn't apply if you're on OS X,
|
| 35 |
+
# since the default CU_VERSION on OS X is cpu.
|
| 36 |
+
setup_cuda() {
|
| 37 |
+
|
| 38 |
+
# First, compute version suffixes. By default, assume no version suffixes
|
| 39 |
+
export VERSION_SUFFIX=""
|
| 40 |
+
export PYTORCH_VERSION_SUFFIX=""
|
| 41 |
+
export WHEEL_DIR=""
|
| 42 |
+
# Wheel builds need suffixes (but not if they're on OS X, which never has suffix)
|
| 43 |
+
if [[ "$BUILD_TYPE" == "wheel" ]] && [[ "$(uname)" != Darwin ]]; then
|
| 44 |
+
# The default CUDA has no suffix
|
| 45 |
+
if [[ "$CU_VERSION" != "cu102" ]]; then
|
| 46 |
+
export PYTORCH_VERSION_SUFFIX="+$CU_VERSION"
|
| 47 |
+
fi
|
| 48 |
+
# Match the suffix scheme of pytorch, unless this package does not have
|
| 49 |
+
# CUDA builds (in which case, use default)
|
| 50 |
+
if [[ -z "$NO_CUDA_PACKAGE" ]]; then
|
| 51 |
+
export VERSION_SUFFIX="$PYTORCH_VERSION_SUFFIX"
|
| 52 |
+
export WHEEL_DIR="$CU_VERSION/"
|
| 53 |
+
fi
|
| 54 |
+
fi
|
| 55 |
+
|
| 56 |
+
# Now work out the CUDA settings
|
| 57 |
+
case "$CU_VERSION" in
|
| 58 |
+
cu116)
|
| 59 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 60 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6"
|
| 61 |
+
else
|
| 62 |
+
export CUDA_HOME=/usr/local/cuda-11.6/
|
| 63 |
+
fi
|
| 64 |
+
export FORCE_CUDA=1
|
| 65 |
+
# Hard-coding gencode flags is temporary situation until
|
| 66 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 67 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 68 |
+
;;
|
| 69 |
+
cu115)
|
| 70 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 71 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5"
|
| 72 |
+
else
|
| 73 |
+
export CUDA_HOME=/usr/local/cuda-11.5/
|
| 74 |
+
fi
|
| 75 |
+
export FORCE_CUDA=1
|
| 76 |
+
# Hard-coding gencode flags is temporary situation until
|
| 77 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 78 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 79 |
+
;;
|
| 80 |
+
cu113)
|
| 81 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 82 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3"
|
| 83 |
+
else
|
| 84 |
+
export CUDA_HOME=/usr/local/cuda-11.3/
|
| 85 |
+
fi
|
| 86 |
+
export FORCE_CUDA=1
|
| 87 |
+
# Hard-coding gencode flags is temporary situation until
|
| 88 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 89 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 90 |
+
;;
|
| 91 |
+
cu112)
|
| 92 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 93 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2"
|
| 94 |
+
else
|
| 95 |
+
export CUDA_HOME=/usr/local/cuda-11.2/
|
| 96 |
+
fi
|
| 97 |
+
export FORCE_CUDA=1
|
| 98 |
+
# Hard-coding gencode flags is temporary situation until
|
| 99 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 100 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 101 |
+
;;
|
| 102 |
+
cu111)
|
| 103 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 104 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1"
|
| 105 |
+
else
|
| 106 |
+
export CUDA_HOME=/usr/local/cuda-11.1/
|
| 107 |
+
fi
|
| 108 |
+
export FORCE_CUDA=1
|
| 109 |
+
# Hard-coding gencode flags is temporary situation until
|
| 110 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 111 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
| 112 |
+
;;
|
| 113 |
+
cu110)
|
| 114 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 115 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0"
|
| 116 |
+
else
|
| 117 |
+
export CUDA_HOME=/usr/local/cuda-11.0/
|
| 118 |
+
fi
|
| 119 |
+
export FORCE_CUDA=1
|
| 120 |
+
# Hard-coding gencode flags is temporary situation until
|
| 121 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 122 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_50,code=compute_50"
|
| 123 |
+
;;
|
| 124 |
+
cu102)
|
| 125 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 126 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2"
|
| 127 |
+
else
|
| 128 |
+
export CUDA_HOME=/usr/local/cuda-10.2/
|
| 129 |
+
fi
|
| 130 |
+
export FORCE_CUDA=1
|
| 131 |
+
# Hard-coding gencode flags is temporary situation until
|
| 132 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 133 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
|
| 134 |
+
;;
|
| 135 |
+
cu101)
|
| 136 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 137 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.1"
|
| 138 |
+
else
|
| 139 |
+
export CUDA_HOME=/usr/local/cuda-10.1/
|
| 140 |
+
fi
|
| 141 |
+
export FORCE_CUDA=1
|
| 142 |
+
# Hard-coding gencode flags is temporary situation until
|
| 143 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 144 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
|
| 145 |
+
;;
|
| 146 |
+
cu100)
|
| 147 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 148 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.0"
|
| 149 |
+
else
|
| 150 |
+
export CUDA_HOME=/usr/local/cuda-10.0/
|
| 151 |
+
fi
|
| 152 |
+
export FORCE_CUDA=1
|
| 153 |
+
# Hard-coding gencode flags is temporary situation until
|
| 154 |
+
# https://github.com/pytorch/pytorch/pull/23408 lands
|
| 155 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_50,code=compute_50"
|
| 156 |
+
;;
|
| 157 |
+
cu92)
|
| 158 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 159 |
+
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.2"
|
| 160 |
+
else
|
| 161 |
+
export CUDA_HOME=/usr/local/cuda-9.2/
|
| 162 |
+
fi
|
| 163 |
+
export FORCE_CUDA=1
|
| 164 |
+
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_50,code=compute_50"
|
| 165 |
+
;;
|
| 166 |
+
cpu)
|
| 167 |
+
;;
|
| 168 |
+
*)
|
| 169 |
+
echo "Unrecognized CU_VERSION=$CU_VERSION"
|
| 170 |
+
exit 1
|
| 171 |
+
;;
|
| 172 |
+
esac
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
# Populate build version if necessary, and add version suffix
|
| 176 |
+
#
|
| 177 |
+
# Inputs:
|
| 178 |
+
# BUILD_VERSION (e.g., 0.2.0 or empty)
|
| 179 |
+
# VERSION_SUFFIX (e.g., +cpu)
|
| 180 |
+
#
|
| 181 |
+
# Outputs:
|
| 182 |
+
# BUILD_VERSION (e.g., 0.2.0.dev20190807+cpu)
|
| 183 |
+
#
|
| 184 |
+
# Fill BUILD_VERSION if it doesn't exist already with a nightly string
|
| 185 |
+
# Usage: setup_build_version 0.2.0
|
| 186 |
+
setup_build_version() {
|
| 187 |
+
if [[ -z "$BUILD_VERSION" ]]; then
|
| 188 |
+
export BUILD_VERSION="$1.dev$(date "+%Y%m%d")$VERSION_SUFFIX"
|
| 189 |
+
else
|
| 190 |
+
export BUILD_VERSION="$BUILD_VERSION$VERSION_SUFFIX"
|
| 191 |
+
fi
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
# Set some useful variables for OS X, if applicable
|
| 195 |
+
setup_macos() {
|
| 196 |
+
if [[ "$(uname)" == Darwin ]]; then
|
| 197 |
+
export MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++
|
| 198 |
+
fi
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
# Top-level entry point for things every package will need to do
|
| 202 |
+
#
|
| 203 |
+
# Usage: setup_env 0.2.0
|
| 204 |
+
setup_env() {
|
| 205 |
+
setup_cuda
|
| 206 |
+
setup_build_version "$1"
|
| 207 |
+
setup_macos
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
# Function to retry functions that sometimes timeout or have flaky failures
|
| 211 |
+
retry () {
|
| 212 |
+
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
# Inputs:
|
| 216 |
+
# PYTHON_VERSION (2.7, 3.5, 3.6, 3.7)
|
| 217 |
+
# UNICODE_ABI (bool)
|
| 218 |
+
#
|
| 219 |
+
# Outputs:
|
| 220 |
+
# PATH modified to put correct Python version in PATH
|
| 221 |
+
#
|
| 222 |
+
# Precondition: If Linux, you are in a soumith/manylinux-cuda* Docker image
|
| 223 |
+
setup_wheel_python() {
|
| 224 |
+
if [[ "$(uname)" == Darwin ]]; then
|
| 225 |
+
eval "$(conda shell.bash hook)"
|
| 226 |
+
conda env remove -n "env$PYTHON_VERSION" || true
|
| 227 |
+
conda create -yn "env$PYTHON_VERSION" python="$PYTHON_VERSION"
|
| 228 |
+
conda activate "env$PYTHON_VERSION"
|
| 229 |
+
else
|
| 230 |
+
case "$PYTHON_VERSION" in
|
| 231 |
+
2.7)
|
| 232 |
+
if [[ -n "$UNICODE_ABI" ]]; then
|
| 233 |
+
python_abi=cp27-cp27mu
|
| 234 |
+
else
|
| 235 |
+
python_abi=cp27-cp27m
|
| 236 |
+
fi
|
| 237 |
+
;;
|
| 238 |
+
3.5) python_abi=cp35-cp35m ;;
|
| 239 |
+
3.6) python_abi=cp36-cp36m ;;
|
| 240 |
+
3.7) python_abi=cp37-cp37m ;;
|
| 241 |
+
3.8) python_abi=cp38-cp38 ;;
|
| 242 |
+
*)
|
| 243 |
+
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
|
| 244 |
+
exit 1
|
| 245 |
+
;;
|
| 246 |
+
esac
|
| 247 |
+
export PATH="/opt/python/$python_abi/bin:$PATH"
|
| 248 |
+
fi
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
# Install with pip a bit more robustly than the default
|
| 252 |
+
pip_install() {
|
| 253 |
+
retry pip install --progress-bar off "$@"
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
# Install torch with pip, respecting PYTORCH_VERSION, and record the installed
|
| 257 |
+
# version into PYTORCH_VERSION, if applicable
|
| 258 |
+
setup_pip_pytorch_version() {
|
| 259 |
+
if [[ -z "$PYTORCH_VERSION" ]]; then
|
| 260 |
+
# Install latest prerelease version of torch, per our nightlies, consistent
|
| 261 |
+
# with the requested cuda version
|
| 262 |
+
pip_install --pre torch -f "https://download.pytorch.org/whl/nightly/${WHEEL_DIR}torch_nightly.html"
|
| 263 |
+
if [[ "$CUDA_VERSION" == "cpu" ]]; then
|
| 264 |
+
# CUDA and CPU are ABI compatible on the CPU-only parts, so strip
|
| 265 |
+
# in this case
|
| 266 |
+
export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//' | sed 's/+.\+//')"
|
| 267 |
+
else
|
| 268 |
+
export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//')"
|
| 269 |
+
fi
|
| 270 |
+
else
|
| 271 |
+
pip_install "torch==$PYTORCH_VERSION$CUDA_SUFFIX" \
|
| 272 |
+
-f https://download.pytorch.org/whl/torch_stable.html \
|
| 273 |
+
-f https://download.pytorch.org/whl/nightly/torch_nightly.html
|
| 274 |
+
fi
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
# Fill PYTORCH_VERSION with the latest conda nightly version, and
|
| 278 |
+
# CONDA_CHANNEL_FLAGS with appropriate flags to retrieve these versions
|
| 279 |
+
#
|
| 280 |
+
# You MUST have populated CUDA_SUFFIX before hand.
|
| 281 |
+
setup_conda_pytorch_constraint() {
|
| 282 |
+
if [[ -z "$PYTORCH_VERSION" ]]; then
|
| 283 |
+
export CONDA_CHANNEL_FLAGS="-c pytorch-nightly"
|
| 284 |
+
export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | \
|
| 285 |
+
python -c "import os, sys, json, re; cuver = os.environ.get('CU_VERSION'); \
|
| 286 |
+
cuver_1 = cuver.replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
|
| 287 |
+
cuver_2 = (cuver[:-1] + '.' + cuver[-1]).replace('cu', 'cuda') if cuver != 'cpu' else cuver; \
|
| 288 |
+
print(re.sub(r'\\+.*$', '', \
|
| 289 |
+
[x['version'] for x in json.load(sys.stdin)['pytorch'] \
|
| 290 |
+
if (x['platform'] == 'darwin' or cuver_1 in x['fn'] or cuver_2 in x['fn']) \
|
| 291 |
+
and 'py' + os.environ['PYTHON_VERSION'] in x['fn']][-1]))")"
|
| 292 |
+
if [[ -z "$PYTORCH_VERSION" ]]; then
|
| 293 |
+
echo "PyTorch version auto detection failed"
|
| 294 |
+
echo "No package found for CU_VERSION=$CU_VERSION and PYTHON_VERSION=$PYTHON_VERSION"
|
| 295 |
+
exit 1
|
| 296 |
+
fi
|
| 297 |
+
else
|
| 298 |
+
export CONDA_CHANNEL_FLAGS="-c pytorch"
|
| 299 |
+
fi
|
| 300 |
+
if [[ "$CU_VERSION" == cpu ]]; then
|
| 301 |
+
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION${PYTORCH_VERSION_SUFFIX}"
|
| 302 |
+
export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION"
|
| 303 |
+
else
|
| 304 |
+
export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
|
| 305 |
+
export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}"
|
| 306 |
+
fi
|
| 307 |
+
export PYTORCH_VERSION_NODOT=${PYTORCH_VERSION//./}
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
# Translate CUDA_VERSION into CUDA_CUDATOOLKIT_CONSTRAINT
|
| 311 |
+
setup_conda_cudatoolkit_constraint() {
|
| 312 |
+
export CONDA_CPUONLY_FEATURE=""
|
| 313 |
+
export CONDA_CUB_CONSTRAINT=""
|
| 314 |
+
if [[ "$(uname)" == Darwin ]]; then
|
| 315 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
| 316 |
+
else
|
| 317 |
+
case "$CU_VERSION" in
|
| 318 |
+
cu116)
|
| 319 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.6,<11.7 # [not osx]"
|
| 320 |
+
;;
|
| 321 |
+
cu115)
|
| 322 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.5,<11.6 # [not osx]"
|
| 323 |
+
;;
|
| 324 |
+
cu113)
|
| 325 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.3,<11.4 # [not osx]"
|
| 326 |
+
;;
|
| 327 |
+
cu112)
|
| 328 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.2,<11.3 # [not osx]"
|
| 329 |
+
;;
|
| 330 |
+
cu111)
|
| 331 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.1,<11.2 # [not osx]"
|
| 332 |
+
;;
|
| 333 |
+
cu110)
|
| 334 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]"
|
| 335 |
+
# Even though cudatoolkit 11.0 provides CUB we need our own, to control the
|
| 336 |
+
# version, because the built-in 1.9.9 in the cudatoolkit causes problems.
|
| 337 |
+
export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
| 338 |
+
;;
|
| 339 |
+
cu102)
|
| 340 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]"
|
| 341 |
+
export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
| 342 |
+
;;
|
| 343 |
+
cu101)
|
| 344 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.1,<10.2 # [not osx]"
|
| 345 |
+
export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
| 346 |
+
;;
|
| 347 |
+
cu100)
|
| 348 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]"
|
| 349 |
+
export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
| 350 |
+
;;
|
| 351 |
+
cu92)
|
| 352 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]"
|
| 353 |
+
export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
| 354 |
+
;;
|
| 355 |
+
cpu)
|
| 356 |
+
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
| 357 |
+
export CONDA_CPUONLY_FEATURE="- cpuonly"
|
| 358 |
+
;;
|
| 359 |
+
*)
|
| 360 |
+
echo "Unrecognized CU_VERSION=$CU_VERSION"
|
| 361 |
+
exit 1
|
| 362 |
+
;;
|
| 363 |
+
esac
|
| 364 |
+
fi
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
# Build the proper compiler package before building the final package
|
| 368 |
+
setup_visual_studio_constraint() {
|
| 369 |
+
if [[ "$OSTYPE" == "msys" ]]; then
|
| 370 |
+
export VSTOOLCHAIN_PACKAGE=vs2019
|
| 371 |
+
export VSDEVCMD_ARGS=''
|
| 372 |
+
# shellcheck disable=SC2086
|
| 373 |
+
conda build $CONDA_CHANNEL_FLAGS --no-anaconda-upload packaging/$VSTOOLCHAIN_PACKAGE
|
| 374 |
+
cp packaging/$VSTOOLCHAIN_PACKAGE/conda_build_config.yaml packaging/pytorch3d/conda_build_config.yaml
|
| 375 |
+
fi
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
download_nvidiacub_if_needed() {
|
| 379 |
+
case "$CU_VERSION" in
|
| 380 |
+
cu110|cu102|cu101|cu100|cu92)
|
| 381 |
+
echo "Downloading cub"
|
| 382 |
+
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
| 383 |
+
tar xzf 1.10.0.tar.gz
|
| 384 |
+
CUB_HOME=$(realpath ./cub-1.10.0)
|
| 385 |
+
export CUB_HOME
|
| 386 |
+
echo "CUB_HOME is now $CUB_HOME"
|
| 387 |
+
;;
|
| 388 |
+
esac
|
| 389 |
+
# We don't need CUB for a cpu build or if cuda is 11.1 or higher
|
| 390 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/pytorch3d/meta.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
package:
|
| 2 |
+
name: pytorch3d
|
| 3 |
+
version: "{{ environ.get('BUILD_VERSION') }}"
|
| 4 |
+
|
| 5 |
+
source:
|
| 6 |
+
path: "{{ environ.get('SOURCE_ROOT_DIR') }}"
|
| 7 |
+
|
| 8 |
+
requirements:
|
| 9 |
+
build:
|
| 10 |
+
- {{ compiler('c') }} # [win]
|
| 11 |
+
{{ environ.get('CONDA_CUB_CONSTRAINT') }}
|
| 12 |
+
|
| 13 |
+
host:
|
| 14 |
+
- python
|
| 15 |
+
{{ environ.get('SETUPTOOLS_CONSTRAINT') }}
|
| 16 |
+
{{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }}
|
| 17 |
+
{{ environ.get('CONDA_PYTORCH_MKL_CONSTRAINT') }}
|
| 18 |
+
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
| 19 |
+
{{ environ.get('CONDA_CPUONLY_FEATURE') }}
|
| 20 |
+
|
| 21 |
+
run:
|
| 22 |
+
- python
|
| 23 |
+
- numpy >=1.11
|
| 24 |
+
- torchvision >=0.5
|
| 25 |
+
- iopath
|
| 26 |
+
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
| 27 |
+
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
| 28 |
+
|
| 29 |
+
build:
|
| 30 |
+
string: py{{py}}_{{ environ['CU_VERSION'] }}_pyt{{ environ['PYTORCH_VERSION_NODOT']}}
|
| 31 |
+
script: python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
| 32 |
+
script_env:
|
| 33 |
+
- CUDA_HOME
|
| 34 |
+
- FORCE_CUDA
|
| 35 |
+
- NVCC_FLAGS
|
| 36 |
+
- MAX_JOBS
|
| 37 |
+
features:
|
| 38 |
+
{{ environ.get('CONDA_CPUONLY_FEATURE') }}
|
| 39 |
+
|
| 40 |
+
test:
|
| 41 |
+
imports:
|
| 42 |
+
- pytorch3d
|
| 43 |
+
source_files:
|
| 44 |
+
- tests
|
| 45 |
+
- docs
|
| 46 |
+
requires:
|
| 47 |
+
- imageio
|
| 48 |
+
- hydra-core
|
| 49 |
+
- accelerate
|
| 50 |
+
commands:
|
| 51 |
+
#pytest .
|
| 52 |
+
python -m unittest discover -v -s tests -t .
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
about:
|
| 56 |
+
home: https://github.com/facebookresearch/pytorch3d
|
| 57 |
+
license: BSD
|
| 58 |
+
license_file: LICENSE
|
| 59 |
+
summary: '3d Geometry for pytorch'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/activate.bat
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
:: Set env vars that tell distutils to use the compiler that we put on path
|
| 8 |
+
SET DISTUTILS_USE_SDK=1
|
| 9 |
+
SET MSSdk=1
|
| 10 |
+
|
| 11 |
+
SET "VS_VERSION=15.0"
|
| 12 |
+
SET "VS_MAJOR=15"
|
| 13 |
+
SET "VS_YEAR=2017"
|
| 14 |
+
|
| 15 |
+
set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out"
|
| 16 |
+
set "MSYS2_ENV_CONV_EXCL=CL"
|
| 17 |
+
|
| 18 |
+
:: For Python 3.5+, ensure that we link with the dynamic runtime. See
|
| 19 |
+
:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
|
| 20 |
+
set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll"
|
| 21 |
+
|
| 22 |
+
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do (
|
| 23 |
+
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
| 24 |
+
set "VSINSTALLDIR=%%i\"
|
| 25 |
+
goto :vswhere
|
| 26 |
+
)
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
:vswhere
|
| 30 |
+
|
| 31 |
+
:: Shorten PATH to avoid the `input line too long` error.
|
| 32 |
+
SET MyPath=%PATH%
|
| 33 |
+
|
| 34 |
+
setlocal EnableDelayedExpansion
|
| 35 |
+
|
| 36 |
+
SET TempPath="%MyPath:;=";"%"
|
| 37 |
+
SET var=
|
| 38 |
+
FOR %%a IN (%TempPath%) DO (
|
| 39 |
+
IF EXIST %%~sa (
|
| 40 |
+
SET "var=!var!;%%~sa"
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
set "TempPath=!var:~1!"
|
| 45 |
+
endlocal & set "PATH=%TempPath%"
|
| 46 |
+
|
| 47 |
+
:: Shorten current directory too
|
| 48 |
+
FOR %%A IN (.) DO CD "%%~sA"
|
| 49 |
+
|
| 50 |
+
:: other things added by install_activate.bat at package build time
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/conda_build_config.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
blas_impl:
|
| 2 |
+
- mkl # [x86_64]
|
| 3 |
+
c_compiler:
|
| 4 |
+
- vs2017 # [win]
|
| 5 |
+
cxx_compiler:
|
| 6 |
+
- vs2017 # [win]
|
| 7 |
+
python:
|
| 8 |
+
- 3.5
|
| 9 |
+
- 3.6
|
| 10 |
+
# This differs from target_platform in that it determines what subdir the compiler
|
| 11 |
+
# will target, not what subdir the compiler package will be itself.
|
| 12 |
+
# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32
|
| 13 |
+
# code on win-64 miniconda.
|
| 14 |
+
cross_compiler_target_platform:
|
| 15 |
+
- win-64 # [win]
|
| 16 |
+
target_platform:
|
| 17 |
+
- win-64 # [win]
|
| 18 |
+
vc:
|
| 19 |
+
- 14
|
| 20 |
+
zip_keys:
|
| 21 |
+
- # [win]
|
| 22 |
+
- vc # [win]
|
| 23 |
+
- c_compiler # [win]
|
| 24 |
+
- cxx_compiler # [win]
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_activate.bat
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
set YEAR=2017
|
| 8 |
+
set VER=15
|
| 9 |
+
|
| 10 |
+
mkdir "%PREFIX%\etc\conda\activate.d"
|
| 11 |
+
COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 12 |
+
|
| 13 |
+
IF "%cross_compiler_target_platform%" == "win-64" (
|
| 14 |
+
set "target_platform=amd64"
|
| 15 |
+
echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 16 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 17 |
+
IF "%VSDEVCMD_ARGS%" == "" (
|
| 18 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 19 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 20 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 21 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 22 |
+
) ELSE (
|
| 23 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 24 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 25 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 26 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 27 |
+
)
|
| 28 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 29 |
+
) else (
|
| 30 |
+
set "target_platform=x86"
|
| 31 |
+
echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 32 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 33 |
+
echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 34 |
+
echo popd
|
| 35 |
+
)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/install_runtime.bat
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
set VC_PATH=x86
|
| 8 |
+
if "%ARCH%"=="64" (
|
| 9 |
+
set VC_PATH=x64
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
set MSC_VER=2017
|
| 13 |
+
|
| 14 |
+
rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015
|
| 15 |
+
rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO (
|
| 16 |
+
rem set SP=%%A
|
| 17 |
+
rem )
|
| 18 |
+
|
| 19 |
+
rem if not "%SP%" == "%PKG_VERSION%" (
|
| 20 |
+
rem echo "Version detected from registry: %SP%"
|
| 21 |
+
rem echo "does not match version of package being built (%PKG_VERSION%)"
|
| 22 |
+
rem echo "Do you have current updates for VS 2015 installed?"
|
| 23 |
+
rem exit 1
|
| 24 |
+
rem )
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below!
|
| 28 |
+
robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E
|
| 29 |
+
robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E
|
| 30 |
+
if %ERRORLEVEL% GEQ 8 exit 1
|
| 31 |
+
|
| 32 |
+
REM ========== This one comes from visual studio 2017
|
| 33 |
+
set "VC_VER=141"
|
| 34 |
+
|
| 35 |
+
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do (
|
| 36 |
+
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
| 37 |
+
set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
|
| 38 |
+
goto :eof
|
| 39 |
+
)
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
@setlocal
|
| 43 |
+
call "%VS15VARSALL%" x64
|
| 44 |
+
|
| 45 |
+
set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%"
|
| 46 |
+
|
| 47 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E
|
| 48 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 49 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E
|
| 50 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 51 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E
|
| 52 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 53 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E
|
| 54 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 55 |
+
@endlocal
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2017/meta.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% set vcver="14.1" %}
|
| 2 |
+
{% set vcfeature="14" %}
|
| 3 |
+
{% set vsyear="2017" %}
|
| 4 |
+
{% set fullver="15.4.27004.2010" %}
|
| 5 |
+
|
| 6 |
+
package:
|
| 7 |
+
name: vs{{ vsyear }}
|
| 8 |
+
version: {{ fullver }}
|
| 9 |
+
|
| 10 |
+
build:
|
| 11 |
+
skip: True [not win]
|
| 12 |
+
script_env:
|
| 13 |
+
- VSDEVCMD_ARGS # [win]
|
| 14 |
+
|
| 15 |
+
outputs:
|
| 16 |
+
- name: vs{{ vsyear }}_{{ cross_compiler_target_platform }}
|
| 17 |
+
script: install_activate.bat
|
| 18 |
+
track_features:
|
| 19 |
+
# VS 2017 is binary-compatible with VS 2015/vc14. Tools are "v141".
|
| 20 |
+
strong:
|
| 21 |
+
- vc{{ vcfeature }}
|
| 22 |
+
run_exports:
|
| 23 |
+
- vc {{ vcver }}
|
| 24 |
+
about:
|
| 25 |
+
summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler
|
| 26 |
+
license: BSD 3-clause
|
| 27 |
+
- name: vs{{ vsyear }}_runtime
|
| 28 |
+
script: install_runtime.bat
|
| 29 |
+
- name: vc
|
| 30 |
+
version: {{ vcver }}
|
| 31 |
+
track_features:
|
| 32 |
+
- vc{{ vcfeature }}
|
| 33 |
+
requirements:
|
| 34 |
+
run:
|
| 35 |
+
- {{ pin_subpackage('vs' ~ vsyear ~ '_runtime') }}
|
| 36 |
+
about:
|
| 37 |
+
home: https://github.com/conda/conda/wiki/VC-features
|
| 38 |
+
license: Modified BSD License (3-clause)
|
| 39 |
+
license_family: BSD
|
| 40 |
+
summary: A meta-package to track VC features.
|
| 41 |
+
description: |
|
| 42 |
+
This metapackage is used to activate vc features without
|
| 43 |
+
depending on Python.
|
| 44 |
+
doc_url: https://github.com/conda/conda/wiki/VC-features
|
| 45 |
+
dev_url: https://github.com/conda/conda/wiki/VC-features
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/activate.bat
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
:: Set env vars that tell distutils to use the compiler that we put on path
|
| 8 |
+
SET DISTUTILS_USE_SDK=1
|
| 9 |
+
SET MSSdk=1
|
| 10 |
+
|
| 11 |
+
SET "VS_VERSION=16.0"
|
| 12 |
+
SET "VS_MAJOR=16"
|
| 13 |
+
SET "VS_YEAR=2019"
|
| 14 |
+
|
| 15 |
+
set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out"
|
| 16 |
+
set "MSYS2_ENV_CONV_EXCL=CL"
|
| 17 |
+
|
| 18 |
+
:: For Python 3.5+, ensure that we link with the dynamic runtime. See
|
| 19 |
+
:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info
|
| 20 |
+
set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll"
|
| 21 |
+
|
| 22 |
+
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do (
|
| 23 |
+
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
| 24 |
+
set "VSINSTALLDIR=%%i\"
|
| 25 |
+
goto :vswhere
|
| 26 |
+
)
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
:vswhere
|
| 30 |
+
|
| 31 |
+
:: Shorten PATH to avoid the `input line too long` error.
|
| 32 |
+
SET MyPath=%PATH%
|
| 33 |
+
|
| 34 |
+
setlocal EnableDelayedExpansion
|
| 35 |
+
|
| 36 |
+
SET TempPath="%MyPath:;=";"%"
|
| 37 |
+
SET var=
|
| 38 |
+
FOR %%a IN (%TempPath%) DO (
|
| 39 |
+
IF EXIST %%~sa (
|
| 40 |
+
SET "var=!var!;%%~sa"
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
set "TempPath=!var:~1!"
|
| 45 |
+
endlocal & set "PATH=%TempPath%"
|
| 46 |
+
|
| 47 |
+
:: Shorten current directory too
|
| 48 |
+
FOR %%A IN (.) DO CD "%%~sA"
|
| 49 |
+
|
| 50 |
+
:: other things added by install_activate.bat at package build time
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/conda_build_config.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
blas_impl:
|
| 2 |
+
- mkl # [x86_64]
|
| 3 |
+
c_compiler:
|
| 4 |
+
- vs2019 # [win]
|
| 5 |
+
cxx_compiler:
|
| 6 |
+
- vs2019 # [win]
|
| 7 |
+
python:
|
| 8 |
+
- 3.5
|
| 9 |
+
- 3.6
|
| 10 |
+
# This differs from target_platform in that it determines what subdir the compiler
|
| 11 |
+
# will target, not what subdir the compiler package will be itself.
|
| 12 |
+
# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32
|
| 13 |
+
# code on win-64 miniconda.
|
| 14 |
+
cross_compiler_target_platform:
|
| 15 |
+
- win-64 # [win]
|
| 16 |
+
target_platform:
|
| 17 |
+
- win-64 # [win]
|
| 18 |
+
vc:
|
| 19 |
+
- 14
|
| 20 |
+
zip_keys:
|
| 21 |
+
- # [win]
|
| 22 |
+
- vc # [win]
|
| 23 |
+
- c_compiler # [win]
|
| 24 |
+
- cxx_compiler # [win]
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_activate.bat
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
set YEAR=2019
|
| 8 |
+
set VER=16
|
| 9 |
+
|
| 10 |
+
mkdir "%PREFIX%\etc\conda\activate.d"
|
| 11 |
+
COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 12 |
+
|
| 13 |
+
IF "%cross_compiler_target_platform%" == "win-64" (
|
| 14 |
+
set "target_platform=amd64"
|
| 15 |
+
echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 16 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 17 |
+
IF "%VSDEVCMD_ARGS%" == "" (
|
| 18 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 19 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 20 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 21 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 22 |
+
) ELSE (
|
| 23 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 24 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 25 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 26 |
+
echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 27 |
+
)
|
| 28 |
+
echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 29 |
+
) else (
|
| 30 |
+
set "target_platform=x86"
|
| 31 |
+
echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 32 |
+
echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 33 |
+
echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat"
|
| 34 |
+
echo popd
|
| 35 |
+
)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/install_runtime.bat
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
@REM All rights reserved.
|
| 3 |
+
@REM
|
| 4 |
+
@REM This source code is licensed under the BSD-style license found in the
|
| 5 |
+
@REM LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
set VC_PATH=x86
|
| 8 |
+
if "%ARCH%"=="64" (
|
| 9 |
+
set VC_PATH=x64
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
set MSC_VER=2019
|
| 13 |
+
|
| 14 |
+
rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015
|
| 15 |
+
rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO (
|
| 16 |
+
rem set SP=%%A
|
| 17 |
+
rem )
|
| 18 |
+
|
| 19 |
+
rem if not "%SP%" == "%PKG_VERSION%" (
|
| 20 |
+
rem echo "Version detected from registry: %SP%"
|
| 21 |
+
rem echo "does not match version of package being built (%PKG_VERSION%)"
|
| 22 |
+
rem echo "Do you have current updates for VS 2015 installed?"
|
| 23 |
+
rem exit 1
|
| 24 |
+
rem )
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below!
|
| 28 |
+
robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E
|
| 29 |
+
robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E
|
| 30 |
+
if %ERRORLEVEL% GEQ 8 exit 1
|
| 31 |
+
|
| 32 |
+
REM ========== This one comes from visual studio 2019
|
| 33 |
+
set "VC_VER=142"
|
| 34 |
+
|
| 35 |
+
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do (
|
| 36 |
+
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
| 37 |
+
set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat"
|
| 38 |
+
goto :eof
|
| 39 |
+
)
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
@setlocal
|
| 43 |
+
call "%VS15VARSALL%" x64
|
| 44 |
+
|
| 45 |
+
set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%"
|
| 46 |
+
|
| 47 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E
|
| 48 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 49 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E
|
| 50 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 51 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E
|
| 52 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 53 |
+
robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E
|
| 54 |
+
if %ERRORLEVEL% LSS 8 exit 0
|
| 55 |
+
@endlocal
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/packaging/vs2019/meta.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% set vcver="14.2" %}
|
| 2 |
+
{% set vcfeature="14" %}
|
| 3 |
+
{% set vsyear="2019" %}
|
| 4 |
+
{% set fullver="15.4.27004.2010" %}
|
| 5 |
+
|
| 6 |
+
package:
|
| 7 |
+
name: vs{{ vsyear }}
|
| 8 |
+
version: {{ fullver }}
|
| 9 |
+
|
| 10 |
+
build:
|
| 11 |
+
skip: True [not win]
|
| 12 |
+
script_env:
|
| 13 |
+
- VSDEVCMD_ARGS # [win]
|
| 14 |
+
|
| 15 |
+
outputs:
|
| 16 |
+
- name: vs{{ vsyear }}_{{ cross_compiler_target_platform }}
|
| 17 |
+
script: install_activate.bat
|
| 18 |
+
track_features:
|
| 19 |
+
# VS 2019 is binary-compatible with VS 2017/vc 14.1 and 2015/vc14. Tools are "v142".
|
| 20 |
+
strong:
|
| 21 |
+
- vc{{ vcfeature }}
|
| 22 |
+
run_exports:
|
| 23 |
+
- vc {{ vcver }}
|
| 24 |
+
about:
|
| 25 |
+
summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler
|
| 26 |
+
license: BSD 3-clause
|
| 27 |
+
- name: vs{{ vsyear }}_runtime
|
| 28 |
+
script: install_runtime.bat
|
| 29 |
+
- name: vc
|
| 30 |
+
version: {{ vcver }}
|
| 31 |
+
track_features:
|
| 32 |
+
- vc{{ vcfeature }}
|
| 33 |
+
requirements:
|
| 34 |
+
run:
|
| 35 |
+
- {{ pin_subpackage('vs' ~ vsyear ~ '_runtime') }}
|
| 36 |
+
about:
|
| 37 |
+
home: https://github.com/conda/conda/wiki/VC-features
|
| 38 |
+
license: Modified BSD License (3-clause)
|
| 39 |
+
license_family: BSD
|
| 40 |
+
summary: A meta-package to track VC features.
|
| 41 |
+
description: |
|
| 42 |
+
This metapackage is used to activate vc features without
|
| 43 |
+
depending on Python.
|
| 44 |
+
doc_url: https://github.com/conda/conda/wiki/VC-features
|
| 45 |
+
dev_url: https://github.com/conda/conda/wiki/VC-features
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_factory_ImplicitronModelFactory_args:
|
| 2 |
+
model_GenericModel_args:
|
| 3 |
+
image_feature_extractor_class_type: ResNetFeatureExtractor
|
| 4 |
+
image_feature_extractor_ResNetFeatureExtractor_args:
|
| 5 |
+
add_images: true
|
| 6 |
+
add_masks: true
|
| 7 |
+
first_max_pool: true
|
| 8 |
+
image_rescale: 0.375
|
| 9 |
+
l2_norm: true
|
| 10 |
+
name: resnet34
|
| 11 |
+
normalize_image: true
|
| 12 |
+
pretrained: true
|
| 13 |
+
stages:
|
| 14 |
+
- 1
|
| 15 |
+
- 2
|
| 16 |
+
- 3
|
| 17 |
+
- 4
|
| 18 |
+
proj_dim: 32
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_factory_ImplicitronModelFactory_args:
|
| 2 |
+
model_GenericModel_args:
|
| 3 |
+
image_feature_extractor_class_type: ResNetFeatureExtractor
|
| 4 |
+
image_feature_extractor_ResNetFeatureExtractor_args:
|
| 5 |
+
add_images: true
|
| 6 |
+
add_masks: true
|
| 7 |
+
first_max_pool: false
|
| 8 |
+
image_rescale: 0.375
|
| 9 |
+
l2_norm: true
|
| 10 |
+
name: resnet34
|
| 11 |
+
normalize_image: true
|
| 12 |
+
pretrained: true
|
| 13 |
+
stages:
|
| 14 |
+
- 1
|
| 15 |
+
- 2
|
| 16 |
+
- 3
|
| 17 |
+
- 4
|
| 18 |
+
proj_dim: 16
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_factory_ImplicitronModelFactory_args:
|
| 2 |
+
model_GenericModel_args:
|
| 3 |
+
image_feature_extractor_class_type: ResNetFeatureExtractor
|
| 4 |
+
image_feature_extractor_ResNetFeatureExtractor_args:
|
| 5 |
+
stages:
|
| 6 |
+
- 1
|
| 7 |
+
- 2
|
| 8 |
+
- 3
|
| 9 |
+
first_max_pool: false
|
| 10 |
+
proj_dim: -1
|
| 11 |
+
l2_norm: false
|
| 12 |
+
image_rescale: 0.375
|
| 13 |
+
name: resnet34
|
| 14 |
+
normalize_image: true
|
| 15 |
+
pretrained: true
|
| 16 |
+
view_pooler_args:
|
| 17 |
+
feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
|
| 18 |
+
reduction_functions:
|
| 19 |
+
- AVG
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data_source_ImplicitronDataSource_args:
|
| 2 |
+
dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2
|
| 3 |
+
dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
|
| 4 |
+
category: teddybear
|
| 5 |
+
subset_name: fewview_dev
|
| 6 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 7 |
+
evaluator_ImplicitronEvaluator_args:
|
| 8 |
+
is_multisequence: true
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
loss_weights:
|
| 7 |
+
loss_mask_bce: 100.0
|
| 8 |
+
loss_kl: 0.0
|
| 9 |
+
loss_rgb_mse: 1.0
|
| 10 |
+
loss_eikonal: 0.1
|
| 11 |
+
chunk_size_grid: 65536
|
| 12 |
+
num_passes: 1
|
| 13 |
+
output_rasterized_mc: true
|
| 14 |
+
sampling_mode_training: mask_sample
|
| 15 |
+
global_encoder_class_type: SequenceAutodecoder
|
| 16 |
+
global_encoder_SequenceAutodecoder_args:
|
| 17 |
+
autodecoder_args:
|
| 18 |
+
n_instances: 20000
|
| 19 |
+
init_scale: 1.0
|
| 20 |
+
encoding_dim: 256
|
| 21 |
+
implicit_function_IdrFeatureField_args:
|
| 22 |
+
n_harmonic_functions_xyz: 6
|
| 23 |
+
bias: 0.6
|
| 24 |
+
d_in: 3
|
| 25 |
+
d_out: 1
|
| 26 |
+
dims:
|
| 27 |
+
- 512
|
| 28 |
+
- 512
|
| 29 |
+
- 512
|
| 30 |
+
- 512
|
| 31 |
+
- 512
|
| 32 |
+
- 512
|
| 33 |
+
- 512
|
| 34 |
+
- 512
|
| 35 |
+
geometric_init: true
|
| 36 |
+
pooled_feature_dim: 0
|
| 37 |
+
skip_in:
|
| 38 |
+
- 6
|
| 39 |
+
weight_norm: true
|
| 40 |
+
renderer_SignedDistanceFunctionRenderer_args:
|
| 41 |
+
ray_tracer_args:
|
| 42 |
+
line_search_step: 0.5
|
| 43 |
+
line_step_iters: 3
|
| 44 |
+
n_secant_steps: 8
|
| 45 |
+
n_steps: 100
|
| 46 |
+
sdf_threshold: 5.0e-05
|
| 47 |
+
ray_normal_coloring_network_args:
|
| 48 |
+
d_in: 9
|
| 49 |
+
d_out: 3
|
| 50 |
+
dims:
|
| 51 |
+
- 512
|
| 52 |
+
- 512
|
| 53 |
+
- 512
|
| 54 |
+
- 512
|
| 55 |
+
mode: idr
|
| 56 |
+
n_harmonic_functions_dir: 4
|
| 57 |
+
pooled_feature_dim: 0
|
| 58 |
+
weight_norm: true
|
| 59 |
+
raysampler_AdaptiveRaySampler_args:
|
| 60 |
+
n_rays_per_image_sampled_from_mask: 1024
|
| 61 |
+
n_pts_per_ray_training: 0
|
| 62 |
+
n_pts_per_ray_evaluation: 0
|
| 63 |
+
scene_extent: 8.0
|
| 64 |
+
renderer_class_type: SignedDistanceFunctionRenderer
|
| 65 |
+
implicit_function_class_type: IdrFeatureField
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- repro_feat_extractor_unnormed.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
chunk_size_grid: 16000
|
| 8 |
+
view_pooler_enabled: true
|
| 9 |
+
raysampler_AdaptiveRaySampler_args:
|
| 10 |
+
n_rays_per_image_sampled_from_mask: 850
|
| 11 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 12 |
+
clip_grad: 1.0
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
chunk_size_grid: 16000
|
| 7 |
+
view_pooler_enabled: false
|
| 8 |
+
n_train_target_views: -1
|
| 9 |
+
num_passes: 1
|
| 10 |
+
loss_weights:
|
| 11 |
+
loss_rgb_mse: 200.0
|
| 12 |
+
loss_prev_stage_rgb_mse: 0.0
|
| 13 |
+
loss_mask_bce: 1.0
|
| 14 |
+
loss_prev_stage_mask_bce: 0.0
|
| 15 |
+
loss_autodecoder_norm: 0.001
|
| 16 |
+
depth_neg_penalty: 10000.0
|
| 17 |
+
global_encoder_class_type: SequenceAutodecoder
|
| 18 |
+
global_encoder_SequenceAutodecoder_args:
|
| 19 |
+
autodecoder_args:
|
| 20 |
+
encoding_dim: 256
|
| 21 |
+
n_instances: 20000
|
| 22 |
+
raysampler_class_type: NearFarRaySampler
|
| 23 |
+
raysampler_NearFarRaySampler_args:
|
| 24 |
+
n_rays_per_image_sampled_from_mask: 2048
|
| 25 |
+
min_depth: 0.05
|
| 26 |
+
max_depth: 0.05
|
| 27 |
+
n_pts_per_ray_training: 1
|
| 28 |
+
n_pts_per_ray_evaluation: 1
|
| 29 |
+
stratified_point_sampling_training: false
|
| 30 |
+
stratified_point_sampling_evaluation: false
|
| 31 |
+
renderer_class_type: LSTMRenderer
|
| 32 |
+
implicit_function_class_type: SRNHyperNetImplicitFunction
|
| 33 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 34 |
+
breed: Adam
|
| 35 |
+
lr: 5.0e-05
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_srn_wce.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
num_passes: 1
|
| 7 |
+
implicit_function_SRNImplicitFunction_args:
|
| 8 |
+
pixel_generator_args:
|
| 9 |
+
n_harmonic_functions: 0
|
| 10 |
+
raymarch_function_args:
|
| 11 |
+
n_harmonic_functions: 0
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_base
|
| 3 |
+
- _self_
|
| 4 |
+
data_source_ImplicitronDataSource_args:
|
| 5 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 6 |
+
batch_size: 1
|
| 7 |
+
dataset_length_train: 1000
|
| 8 |
+
dataset_length_val: 1
|
| 9 |
+
num_workers: 8
|
| 10 |
+
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
| 11 |
+
assert_single_seq: true
|
| 12 |
+
n_frames_per_sequence: -1
|
| 13 |
+
test_restrict_sequence_id: 0
|
| 14 |
+
test_on_train: false
|
| 15 |
+
model_factory_ImplicitronModelFactory_args:
|
| 16 |
+
model_GenericModel_args:
|
| 17 |
+
render_image_height: 800
|
| 18 |
+
render_image_width: 800
|
| 19 |
+
log_vars:
|
| 20 |
+
- loss_rgb_psnr_fg
|
| 21 |
+
- loss_rgb_psnr
|
| 22 |
+
- loss_eikonal
|
| 23 |
+
- loss_prev_stage_rgb_psnr
|
| 24 |
+
- loss_mask_bce
|
| 25 |
+
- loss_prev_stage_mask_bce
|
| 26 |
+
- loss_rgb_mse
|
| 27 |
+
- loss_prev_stage_rgb_mse
|
| 28 |
+
- loss_depth_abs
|
| 29 |
+
- loss_depth_abs_fg
|
| 30 |
+
- loss_kl
|
| 31 |
+
- loss_mask_neg_iou
|
| 32 |
+
- objective
|
| 33 |
+
- epoch
|
| 34 |
+
- sec/it
|
| 35 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 36 |
+
lr: 0.0005
|
| 37 |
+
multistep_lr_milestones:
|
| 38 |
+
- 200
|
| 39 |
+
- 300
|
| 40 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 41 |
+
max_epochs: 400
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_base
|
| 3 |
+
- _self_
|
| 4 |
+
exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
|
| 5 |
+
data_source_ImplicitronDataSource_args:
|
| 6 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 7 |
+
dataset_length_train: 100
|
| 8 |
+
dataset_map_provider_class_type: BlenderDatasetMapProvider
|
| 9 |
+
dataset_map_provider_BlenderDatasetMapProvider_args:
|
| 10 |
+
base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
|
| 11 |
+
n_known_frames_for_test: null
|
| 12 |
+
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
|
| 13 |
+
path_manager_factory_class_type: PathManagerFactory
|
| 14 |
+
path_manager_factory_PathManagerFactory_args:
|
| 15 |
+
silence_logs: true
|
| 16 |
+
|
| 17 |
+
model_factory_ImplicitronModelFactory_args:
|
| 18 |
+
model_GenericModel_args:
|
| 19 |
+
mask_images: false
|
| 20 |
+
raysampler_class_type: AdaptiveRaySampler
|
| 21 |
+
raysampler_AdaptiveRaySampler_args:
|
| 22 |
+
n_pts_per_ray_training: 64
|
| 23 |
+
n_pts_per_ray_evaluation: 64
|
| 24 |
+
n_rays_per_image_sampled_from_mask: 4096
|
| 25 |
+
stratified_point_sampling_training: true
|
| 26 |
+
stratified_point_sampling_evaluation: false
|
| 27 |
+
scene_extent: 2.0
|
| 28 |
+
scene_center:
|
| 29 |
+
- 0.0
|
| 30 |
+
- 0.0
|
| 31 |
+
- 0.0
|
| 32 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 33 |
+
density_noise_std_train: 0.0
|
| 34 |
+
n_pts_per_ray_fine_training: 128
|
| 35 |
+
n_pts_per_ray_fine_evaluation: 128
|
| 36 |
+
raymarcher_EmissionAbsorptionRaymarcher_args:
|
| 37 |
+
blend_output: false
|
| 38 |
+
loss_weights:
|
| 39 |
+
loss_rgb_mse: 1.0
|
| 40 |
+
loss_prev_stage_rgb_mse: 1.0
|
| 41 |
+
loss_mask_bce: 0.0
|
| 42 |
+
loss_prev_stage_mask_bce: 0.0
|
| 43 |
+
loss_autodecoder_norm: 0.00
|
| 44 |
+
|
| 45 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 46 |
+
exponential_lr_step_size: 3001
|
| 47 |
+
lr_policy: LinearExponential
|
| 48 |
+
linear_exponential_lr_milestone: 200
|
| 49 |
+
|
| 50 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 51 |
+
max_epochs: 6000
|
| 52 |
+
metric_print_interval: 10
|
| 53 |
+
store_checkpoints_purge: 3
|
| 54 |
+
test_when_finished: true
|
| 55 |
+
validation_interval: 100
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_wce_base.yaml
|
| 3 |
+
- repro_feat_extractor_unnormed.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
chunk_size_grid: 16000
|
| 8 |
+
view_pooler_enabled: true
|
| 9 |
+
raysampler_AdaptiveRaySampler_args:
|
| 10 |
+
n_rays_per_image_sampled_from_mask: 850
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_wce_base.yaml
|
| 3 |
+
- repro_feat_extractor_transformer.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
chunk_size_grid: 16000
|
| 8 |
+
view_pooler_enabled: true
|
| 9 |
+
implicit_function_class_type: NeRFormerImplicitFunction
|
| 10 |
+
raysampler_AdaptiveRaySampler_args:
|
| 11 |
+
n_rays_per_image_sampled_from_mask: 800
|
| 12 |
+
n_pts_per_ray_training: 32
|
| 13 |
+
n_pts_per_ray_evaluation: 32
|
| 14 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 15 |
+
n_pts_per_ray_fine_training: 16
|
| 16 |
+
n_pts_per_ray_fine_evaluation: 16
|
| 17 |
+
view_pooler_args:
|
| 18 |
+
feature_aggregator_class_type: IdentityFeatureAggregator
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_srn.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
num_passes: 1
|
| 7 |
+
implicit_function_SRNImplicitFunction_args:
|
| 8 |
+
pixel_generator_args:
|
| 9 |
+
n_harmonic_functions: 0
|
| 10 |
+
raymarch_function_args:
|
| 11 |
+
n_harmonic_functions: 0
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_idr.yaml
|
| 3 |
+
- repro_singleseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_nerformer.yaml
|
| 3 |
+
- repro_singleseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_srn_noharm.yaml
|
| 3 |
+
- repro_singleseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_base
|
| 3 |
+
- _self_
|
| 4 |
+
data_source_ImplicitronDataSource_args:
|
| 5 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 6 |
+
batch_size: 10
|
| 7 |
+
dataset_length_train: 1000
|
| 8 |
+
dataset_length_val: 1
|
| 9 |
+
num_workers: 8
|
| 10 |
+
train_conditioning_type: SAME
|
| 11 |
+
val_conditioning_type: SAME
|
| 12 |
+
test_conditioning_type: SAME
|
| 13 |
+
images_per_seq_options:
|
| 14 |
+
- 2
|
| 15 |
+
- 3
|
| 16 |
+
- 4
|
| 17 |
+
- 5
|
| 18 |
+
- 6
|
| 19 |
+
- 7
|
| 20 |
+
- 8
|
| 21 |
+
- 9
|
| 22 |
+
- 10
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
| 8 |
+
|
| 9 |
+
from .shapenet_core import ShapeNetCore
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import os
|
| 11 |
+
import warnings
|
| 12 |
+
from os import path
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Dict
|
| 15 |
+
|
| 16 |
+
from pytorch3d.datasets.shapenet_base import ShapeNetBase
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
SYNSET_DICT_DIR = Path(__file__).resolve().parent
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ShapeNetCore(ShapeNetBase): # pragma: no cover
|
| 23 |
+
"""
|
| 24 |
+
This class loads ShapeNetCore from a given directory into a Dataset object.
|
| 25 |
+
ShapeNetCore is a subset of the ShapeNet dataset and can be downloaded from
|
| 26 |
+
https://www.shapenet.org/.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
data_dir,
|
| 32 |
+
synsets=None,
|
| 33 |
+
version: int = 1,
|
| 34 |
+
load_textures: bool = True,
|
| 35 |
+
texture_resolution: int = 4,
|
| 36 |
+
) -> None:
|
| 37 |
+
"""
|
| 38 |
+
Store each object's synset id and models id from data_dir.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
data_dir: Path to ShapeNetCore data.
|
| 42 |
+
synsets: List of synset categories to load from ShapeNetCore in the form of
|
| 43 |
+
synset offsets or labels. A combination of both is also accepted.
|
| 44 |
+
When no category is specified, all categories in data_dir are loaded.
|
| 45 |
+
version: (int) version of ShapeNetCore data in data_dir, 1 or 2.
|
| 46 |
+
Default is set to be 1. Version 1 has 57 categories and version 2 has 55
|
| 47 |
+
categories.
|
| 48 |
+
Note: version 1 has two categories 02858304(boat) and 02992529(cellphone)
|
| 49 |
+
that are hyponyms of categories 04530566(watercraft) and 04401088(telephone)
|
| 50 |
+
respectively. You can combine the categories manually if needed.
|
| 51 |
+
Version 2 doesn't have 02858304(boat) or 02834778(bicycle) compared to
|
| 52 |
+
version 1.
|
| 53 |
+
load_textures: Boolean indicating whether textures should loaded for the model.
|
| 54 |
+
Textures will be of type TexturesAtlas i.e. a texture map per face.
|
| 55 |
+
texture_resolution: Int specifying the resolution of the texture map per face
|
| 56 |
+
created using the textures in the obj file. A
|
| 57 |
+
(texture_resolution, texture_resolution, 3) map is created per face.
|
| 58 |
+
"""
|
| 59 |
+
super().__init__()
|
| 60 |
+
self.shapenet_dir = data_dir
|
| 61 |
+
self.load_textures = load_textures
|
| 62 |
+
self.texture_resolution = texture_resolution
|
| 63 |
+
|
| 64 |
+
if version not in [1, 2]:
|
| 65 |
+
raise ValueError("Version number must be either 1 or 2.")
|
| 66 |
+
self.model_dir = "model.obj" if version == 1 else "models/model_normalized.obj"
|
| 67 |
+
|
| 68 |
+
# Synset dictionary mapping synset offsets to corresponding labels.
|
| 69 |
+
dict_file = "shapenet_synset_dict_v%d.json" % version
|
| 70 |
+
with open(path.join(SYNSET_DICT_DIR, dict_file), "r") as read_dict:
|
| 71 |
+
self.synset_dict = json.load(read_dict)
|
| 72 |
+
# Inverse dictionary mapping synset labels to corresponding offsets.
|
| 73 |
+
self.synset_inv = {label: offset for offset, label in self.synset_dict.items()}
|
| 74 |
+
|
| 75 |
+
# If categories are specified, check if each category is in the form of either
|
| 76 |
+
# synset offset or synset label, and if the category exists in the given directory.
|
| 77 |
+
if synsets is not None:
|
| 78 |
+
# Set of categories to load in the form of synset offsets.
|
| 79 |
+
synset_set = set()
|
| 80 |
+
for synset in synsets:
|
| 81 |
+
if (synset in self.synset_dict.keys()) and (
|
| 82 |
+
path.isdir(path.join(data_dir, synset))
|
| 83 |
+
):
|
| 84 |
+
synset_set.add(synset)
|
| 85 |
+
elif (synset in self.synset_inv.keys()) and (
|
| 86 |
+
(path.isdir(path.join(data_dir, self.synset_inv[synset])))
|
| 87 |
+
):
|
| 88 |
+
synset_set.add(self.synset_inv[synset])
|
| 89 |
+
else:
|
| 90 |
+
msg = (
|
| 91 |
+
"Synset category %s either not part of ShapeNetCore dataset "
|
| 92 |
+
"or cannot be found in %s."
|
| 93 |
+
) % (synset, data_dir)
|
| 94 |
+
warnings.warn(msg)
|
| 95 |
+
# If no category is given, load every category in the given directory.
|
| 96 |
+
# Ignore synset folders not included in the official mapping.
|
| 97 |
+
else:
|
| 98 |
+
synset_set = {
|
| 99 |
+
synset
|
| 100 |
+
for synset in os.listdir(data_dir)
|
| 101 |
+
if path.isdir(path.join(data_dir, synset))
|
| 102 |
+
and synset in self.synset_dict
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
# Check if there are any categories in the official mapping that are not loaded.
|
| 106 |
+
# Update self.synset_inv so that it only includes the loaded categories.
|
| 107 |
+
synset_not_present = set(self.synset_dict.keys()).difference(synset_set)
|
| 108 |
+
[self.synset_inv.pop(self.synset_dict[synset]) for synset in synset_not_present]
|
| 109 |
+
|
| 110 |
+
if len(synset_not_present) > 0:
|
| 111 |
+
msg = (
|
| 112 |
+
"The following categories are included in ShapeNetCore ver.%d's "
|
| 113 |
+
"official mapping but not found in the dataset location %s: %s"
|
| 114 |
+
""
|
| 115 |
+
) % (version, data_dir, ", ".join(synset_not_present))
|
| 116 |
+
warnings.warn(msg)
|
| 117 |
+
|
| 118 |
+
# Extract model_id of each object from directory names.
|
| 119 |
+
# Each grandchildren directory of data_dir contains an object, and the name
|
| 120 |
+
# of the directory is the object's model_id.
|
| 121 |
+
for synset in synset_set:
|
| 122 |
+
self.synset_start_idxs[synset] = len(self.synset_ids)
|
| 123 |
+
for model in os.listdir(path.join(data_dir, synset)):
|
| 124 |
+
if not path.exists(path.join(data_dir, synset, model, self.model_dir)):
|
| 125 |
+
msg = (
|
| 126 |
+
"Object file not found in the model directory %s "
|
| 127 |
+
"under synset directory %s."
|
| 128 |
+
) % (model, synset)
|
| 129 |
+
warnings.warn(msg)
|
| 130 |
+
continue
|
| 131 |
+
self.synset_ids.append(synset)
|
| 132 |
+
self.model_ids.append(model)
|
| 133 |
+
model_count = len(self.synset_ids) - self.synset_start_idxs[synset]
|
| 134 |
+
self.synset_num_models[synset] = model_count
|
| 135 |
+
|
| 136 |
+
def __getitem__(self, idx: int) -> Dict:
|
| 137 |
+
"""
|
| 138 |
+
Read a model by the given index.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
idx: The idx of the model to be retrieved in the dataset.
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
dictionary with following keys:
|
| 145 |
+
- verts: FloatTensor of shape (V, 3).
|
| 146 |
+
- faces: LongTensor of shape (F, 3) which indexes into the verts tensor.
|
| 147 |
+
- synset_id (str): synset id
|
| 148 |
+
- model_id (str): model id
|
| 149 |
+
- label (str): synset label.
|
| 150 |
+
"""
|
| 151 |
+
model = self._get_item_ids(idx)
|
| 152 |
+
model_path = path.join(
|
| 153 |
+
self.shapenet_dir, model["synset_id"], model["model_id"], self.model_dir
|
| 154 |
+
)
|
| 155 |
+
verts, faces, textures = self._load_mesh(model_path)
|
| 156 |
+
model["verts"] = verts
|
| 157 |
+
model["faces"] = faces
|
| 158 |
+
model["textures"] = textures
|
| 159 |
+
model["label"] = self.synset_dict[model["synset_id"]]
|
| 160 |
+
return model
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"04379243": "table",
|
| 3 |
+
"02958343": "car",
|
| 4 |
+
"03001627": "chair",
|
| 5 |
+
"02691156": "airplane",
|
| 6 |
+
"04256520": "sofa",
|
| 7 |
+
"04090263": "rifle",
|
| 8 |
+
"03636649": "lamp",
|
| 9 |
+
"04530566": "watercraft",
|
| 10 |
+
"02828884": "bench",
|
| 11 |
+
"03691459": "loudspeaker",
|
| 12 |
+
"02933112": "cabinet",
|
| 13 |
+
"03211117": "display",
|
| 14 |
+
"04401088": "telephone",
|
| 15 |
+
"02924116": "bus",
|
| 16 |
+
"02808440": "bathtub",
|
| 17 |
+
"03467517": "guitar",
|
| 18 |
+
"03325088": "faucet",
|
| 19 |
+
"03046257": "clock",
|
| 20 |
+
"03991062": "flowerpot",
|
| 21 |
+
"03593526": "jar",
|
| 22 |
+
"02876657": "bottle",
|
| 23 |
+
"02871439": "bookshelf",
|
| 24 |
+
"03642806": "laptop",
|
| 25 |
+
"03624134": "knife",
|
| 26 |
+
"04468005": "train",
|
| 27 |
+
"02747177": "trash bin",
|
| 28 |
+
"03790512": "motorbike",
|
| 29 |
+
"03948459": "pistol",
|
| 30 |
+
"03337140": "file cabinet",
|
| 31 |
+
"02818832": "bed",
|
| 32 |
+
"03928116": "piano",
|
| 33 |
+
"04330267": "stove",
|
| 34 |
+
"03797390": "mug",
|
| 35 |
+
"02880940": "bowl",
|
| 36 |
+
"04554684": "washer",
|
| 37 |
+
"04004475": "printer",
|
| 38 |
+
"03513137": "helmet",
|
| 39 |
+
"03761084": "microwaves",
|
| 40 |
+
"04225987": "skateboard",
|
| 41 |
+
"04460130": "tower",
|
| 42 |
+
"02942699": "camera",
|
| 43 |
+
"02801938": "basket",
|
| 44 |
+
"02946921": "can",
|
| 45 |
+
"03938244": "pillow",
|
| 46 |
+
"03710193": "mailbox",
|
| 47 |
+
"03207941": "dishwasher",
|
| 48 |
+
"04099429": "rocket",
|
| 49 |
+
"02773838": "bag",
|
| 50 |
+
"02843684": "birdhouse",
|
| 51 |
+
"03261776": "earphone",
|
| 52 |
+
"03759954": "microphone",
|
| 53 |
+
"04074963": "remote",
|
| 54 |
+
"03085013": "keyboard",
|
| 55 |
+
"02834778": "bicycle",
|
| 56 |
+
"02954340": "cap",
|
| 57 |
+
"02858304": "boat",
|
| 58 |
+
"02992529": "mobile phone"
|
| 59 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"02691156": "airplane",
|
| 3 |
+
"02747177": "trash bin",
|
| 4 |
+
"02773838": "bag",
|
| 5 |
+
"02801938": "basket",
|
| 6 |
+
"02808440": "bathtub",
|
| 7 |
+
"02818832": "bed",
|
| 8 |
+
"02828884": "bench",
|
| 9 |
+
"02843684": "birdhouse",
|
| 10 |
+
"02871439": "bookshelf",
|
| 11 |
+
"02876657": "bottle",
|
| 12 |
+
"02880940": "bowl",
|
| 13 |
+
"02924116": "bus",
|
| 14 |
+
"02933112": "cabinet",
|
| 15 |
+
"02942699": "camera",
|
| 16 |
+
"02946921": "can",
|
| 17 |
+
"02954340": "cap",
|
| 18 |
+
"02958343": "car",
|
| 19 |
+
"02992529": "cellphone",
|
| 20 |
+
"03001627": "chair",
|
| 21 |
+
"03046257": "clock",
|
| 22 |
+
"03085013": "keyboard",
|
| 23 |
+
"03207941": "dishwasher",
|
| 24 |
+
"03211117": "display",
|
| 25 |
+
"03261776": "earphone",
|
| 26 |
+
"03325088": "faucet",
|
| 27 |
+
"03337140": "file cabinet",
|
| 28 |
+
"03467517": "guitar",
|
| 29 |
+
"03513137": "helmet",
|
| 30 |
+
"03593526": "jar",
|
| 31 |
+
"03624134": "knife",
|
| 32 |
+
"03636649": "lamp",
|
| 33 |
+
"03642806": "laptop",
|
| 34 |
+
"03691459": "loudspeaker",
|
| 35 |
+
"03710193": "mailbox",
|
| 36 |
+
"03759954": "microphone",
|
| 37 |
+
"03761084": "microwaves",
|
| 38 |
+
"03790512": "motorbike",
|
| 39 |
+
"03797390": "mug",
|
| 40 |
+
"03928116": "piano",
|
| 41 |
+
"03938244": "pillow",
|
| 42 |
+
"03948459": "pistol",
|
| 43 |
+
"03991062": "flowerpot",
|
| 44 |
+
"04004475": "printer",
|
| 45 |
+
"04074963": "remote",
|
| 46 |
+
"04090263": "rifle",
|
| 47 |
+
"04099429": "rocket",
|
| 48 |
+
"04225987": "skateboard",
|
| 49 |
+
"04256520": "sofa",
|
| 50 |
+
"04330267": "stove",
|
| 51 |
+
"04379243": "table",
|
| 52 |
+
"04401088": "telephone",
|
| 53 |
+
"04460130": "tower",
|
| 54 |
+
"04468005": "train",
|
| 55 |
+
"04530566": "watercraft",
|
| 56 |
+
"04554684": "washer"
|
| 57 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/blender_dataset_map_provider.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from pytorch3d.implicitron.tools.config import registry
|
| 12 |
+
|
| 13 |
+
from .load_blender import load_blender_data
|
| 14 |
+
from .single_sequence_dataset import (
|
| 15 |
+
_interpret_blender_cameras,
|
| 16 |
+
SingleSceneDatasetMapProviderBase,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@registry.register
|
| 21 |
+
class BlenderDatasetMapProvider(SingleSceneDatasetMapProviderBase):
|
| 22 |
+
"""
|
| 23 |
+
Provides data for one scene from Blender synthetic dataset.
|
| 24 |
+
Uses the code in load_blender.py
|
| 25 |
+
|
| 26 |
+
Members:
|
| 27 |
+
base_dir: directory holding the data for the scene.
|
| 28 |
+
object_name: The name of the scene (e.g. "lego"). This is just used as a label.
|
| 29 |
+
It will typically be equal to the name of the directory self.base_dir.
|
| 30 |
+
path_manager_factory: Creates path manager which may be used for
|
| 31 |
+
interpreting paths.
|
| 32 |
+
n_known_frames_for_test: If set, training frames are included in the val
|
| 33 |
+
and test datasets, and this many random training frames are added to
|
| 34 |
+
each test batch. If not set, test batches each contain just a single
|
| 35 |
+
testing frame.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def _load_data(self) -> None:
|
| 39 |
+
path_manager = self.path_manager_factory.get()
|
| 40 |
+
images, poses, _, hwf, i_split = load_blender_data(
|
| 41 |
+
self.base_dir,
|
| 42 |
+
testskip=1,
|
| 43 |
+
path_manager=path_manager,
|
| 44 |
+
)
|
| 45 |
+
H, W, focal = hwf
|
| 46 |
+
images_masks = torch.from_numpy(images).permute(0, 3, 1, 2)
|
| 47 |
+
|
| 48 |
+
# pyre-ignore[16]
|
| 49 |
+
self.poses = _interpret_blender_cameras(poses, focal)
|
| 50 |
+
# pyre-ignore[16]
|
| 51 |
+
self.images = images_masks[:, :3]
|
| 52 |
+
# pyre-ignore[16]
|
| 53 |
+
self.fg_probabilities = images_masks[:, 3:4]
|
| 54 |
+
# pyre-ignore[16]
|
| 55 |
+
self.i_split = i_split
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_base.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
| 8 |
+
|
| 9 |
+
from collections import defaultdict
|
| 10 |
+
from dataclasses import dataclass
|
| 11 |
+
from typing import (
|
| 12 |
+
ClassVar,
|
| 13 |
+
Dict,
|
| 14 |
+
Iterable,
|
| 15 |
+
Iterator,
|
| 16 |
+
List,
|
| 17 |
+
Optional,
|
| 18 |
+
Sequence,
|
| 19 |
+
Tuple,
|
| 20 |
+
Type,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
|
| 25 |
+
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
| 26 |
+
from pytorch3d.implicitron.dataset.utils import GenericWorkaround
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass(eq=False)
|
| 30 |
+
class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
| 31 |
+
"""
|
| 32 |
+
Base class to describe a dataset to be used with Implicitron.
|
| 33 |
+
|
| 34 |
+
The dataset is made up of frames, and the frames are grouped into sequences.
|
| 35 |
+
Each sequence has a name (a string).
|
| 36 |
+
(A sequence could be a video, or a set of images of one scene.)
|
| 37 |
+
|
| 38 |
+
This means they have a __getitem__ which returns an instance of a FrameData,
|
| 39 |
+
which will describe one frame in one sequence.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
# _seq_to_idx is a member which implementations can define.
|
| 43 |
+
# It maps sequence name to the sequence's global frame indices.
|
| 44 |
+
# It is used for the default implementations of some functions in this class.
|
| 45 |
+
# Implementations which override them are free to ignore it.
|
| 46 |
+
# _seq_to_idx: Dict[str, List[int]] = field(init=False)
|
| 47 |
+
|
| 48 |
+
def __len__(self) -> int:
|
| 49 |
+
raise NotImplementedError()
|
| 50 |
+
|
| 51 |
+
def get_frame_numbers_and_timestamps(
|
| 52 |
+
self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None
|
| 53 |
+
) -> List[Tuple[int, float]]:
|
| 54 |
+
"""
|
| 55 |
+
If the sequences in the dataset are videos rather than
|
| 56 |
+
unordered views, then the dataset should override this method to
|
| 57 |
+
return the index and timestamp in their videos of the frames whose
|
| 58 |
+
indices are given in `idxs`. In addition,
|
| 59 |
+
the values in _seq_to_idx should be in ascending order.
|
| 60 |
+
If timestamps are absent, they should be replaced with a constant.
|
| 61 |
+
|
| 62 |
+
This is used for letting SceneBatchSampler identify consecutive
|
| 63 |
+
frames.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
idxs: frame index in self
|
| 67 |
+
subset_filter: If given, an index in idxs is ignored if the
|
| 68 |
+
corresponding frame is not in any of the named subsets.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
tuple of
|
| 72 |
+
- frame index in video
|
| 73 |
+
- timestamp of frame in video
|
| 74 |
+
"""
|
| 75 |
+
raise ValueError("This dataset does not contain videos.")
|
| 76 |
+
|
| 77 |
+
def join(self, other_datasets: Iterable["DatasetBase"]) -> None:
|
| 78 |
+
"""
|
| 79 |
+
Joins the current dataset with a list of other datasets of the same type.
|
| 80 |
+
"""
|
| 81 |
+
raise NotImplementedError()
|
| 82 |
+
|
| 83 |
+
def get_eval_batches(self) -> Optional[List[List[int]]]:
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
def sequence_names(self) -> Iterable[str]:
|
| 87 |
+
"""Returns an iterator over sequence names in the dataset."""
|
| 88 |
+
# pyre-ignore[16]
|
| 89 |
+
return self._seq_to_idx.keys()
|
| 90 |
+
|
| 91 |
+
def category_to_sequence_names(self) -> Dict[str, List[str]]:
|
| 92 |
+
"""
|
| 93 |
+
Returns a dict mapping from each dataset category to a list of its
|
| 94 |
+
sequence names.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
category_to_sequence_names: Dict {category_i: [..., sequence_name_j, ...]}
|
| 98 |
+
"""
|
| 99 |
+
c2seq = defaultdict(list)
|
| 100 |
+
for sequence_name in self.sequence_names():
|
| 101 |
+
first_frame_idx = next(self.sequence_indices_in_order(sequence_name))
|
| 102 |
+
# crashes without overriding __getitem__
|
| 103 |
+
sequence_category = self[first_frame_idx].sequence_category
|
| 104 |
+
c2seq[sequence_category].append(sequence_name)
|
| 105 |
+
return dict(c2seq)
|
| 106 |
+
|
| 107 |
+
def sequence_frames_in_order(
|
| 108 |
+
self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
|
| 109 |
+
) -> Iterator[Tuple[float, int, int]]:
|
| 110 |
+
"""Returns an iterator over the frame indices in a given sequence.
|
| 111 |
+
We attempt to first sort by timestamp (if they are available),
|
| 112 |
+
then by frame number.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
seq_name: the name of the sequence.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
an iterator over triplets `(timestamp, frame_no, dataset_idx)`,
|
| 119 |
+
where `frame_no` is the index within the sequence, and
|
| 120 |
+
`dataset_idx` is the index within the dataset.
|
| 121 |
+
`None` timestamps are replaced with 0s.
|
| 122 |
+
"""
|
| 123 |
+
# pyre-ignore[16]
|
| 124 |
+
seq_frame_indices = self._seq_to_idx[seq_name]
|
| 125 |
+
nos_timestamps = self.get_frame_numbers_and_timestamps(
|
| 126 |
+
seq_frame_indices, subset_filter
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
yield from sorted(
|
| 130 |
+
[
|
| 131 |
+
(timestamp, frame_no, idx)
|
| 132 |
+
for idx, (frame_no, timestamp) in zip(seq_frame_indices, nos_timestamps)
|
| 133 |
+
]
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
def sequence_indices_in_order(
|
| 137 |
+
self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
|
| 138 |
+
) -> Iterator[int]:
|
| 139 |
+
"""Same as `sequence_frames_in_order` but returns the iterator over
|
| 140 |
+
only dataset indices.
|
| 141 |
+
"""
|
| 142 |
+
for _, _, idx in self.sequence_frames_in_order(seq_name, subset_filter):
|
| 143 |
+
yield idx
|
| 144 |
+
|
| 145 |
+
# frame_data_type is the actual type of frames returned by the dataset.
|
| 146 |
+
# Collation uses its classmethod `collate`
|
| 147 |
+
frame_data_type: ClassVar[Type[FrameData]] = FrameData
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/frame_data.py
ADDED
|
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# pyre-unsafe
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
from abc import ABC, abstractmethod
|
| 11 |
+
from collections import defaultdict
|
| 12 |
+
from dataclasses import dataclass, field, fields
|
| 13 |
+
from typing import (
|
| 14 |
+
Any,
|
| 15 |
+
ClassVar,
|
| 16 |
+
Generic,
|
| 17 |
+
List,
|
| 18 |
+
Mapping,
|
| 19 |
+
Optional,
|
| 20 |
+
Tuple,
|
| 21 |
+
Type,
|
| 22 |
+
TypeVar,
|
| 23 |
+
Union,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
import numpy as np
|
| 27 |
+
import torch
|
| 28 |
+
|
| 29 |
+
from pytorch3d.implicitron.dataset import types
|
| 30 |
+
from pytorch3d.implicitron.dataset.utils import (
|
| 31 |
+
adjust_camera_to_bbox_crop_,
|
| 32 |
+
adjust_camera_to_image_scale_,
|
| 33 |
+
bbox_xyxy_to_xywh,
|
| 34 |
+
clamp_box_to_image_bounds_and_round,
|
| 35 |
+
crop_around_box,
|
| 36 |
+
GenericWorkaround,
|
| 37 |
+
get_bbox_from_mask,
|
| 38 |
+
get_clamp_bbox,
|
| 39 |
+
load_depth,
|
| 40 |
+
load_depth_mask,
|
| 41 |
+
load_image,
|
| 42 |
+
load_mask,
|
| 43 |
+
load_pointcloud,
|
| 44 |
+
rescale_bbox,
|
| 45 |
+
resize_image,
|
| 46 |
+
safe_as_tensor,
|
| 47 |
+
)
|
| 48 |
+
from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
|
| 49 |
+
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
| 50 |
+
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
|
| 51 |
+
from pytorch3d.structures.pointclouds import join_pointclouds_as_batch, Pointclouds
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class FrameData(Mapping[str, Any]):
|
| 56 |
+
"""
|
| 57 |
+
A type of the elements returned by indexing the dataset object.
|
| 58 |
+
It can represent both individual frames and batches of thereof;
|
| 59 |
+
in this documentation, the sizes of tensors refer to single frames;
|
| 60 |
+
add the first batch dimension for the collation result.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
frame_number: The number of the frame within its sequence.
|
| 64 |
+
0-based continuous integers.
|
| 65 |
+
sequence_name: The unique name of the frame's sequence.
|
| 66 |
+
sequence_category: The object category of the sequence.
|
| 67 |
+
frame_timestamp: The time elapsed since the start of a sequence in sec.
|
| 68 |
+
image_size_hw: The size of the original image in pixels; (height, width)
|
| 69 |
+
tensor of shape (2,). Note that it is optional, e.g. it can be `None`
|
| 70 |
+
if the frame annotation has no size ans image_rgb has not [yet] been
|
| 71 |
+
loaded. Image-less FrameData is valid but mutators like crop/resize
|
| 72 |
+
may fail if the original image size cannot be deduced.
|
| 73 |
+
effective_image_size_hw: The size of the image after mutations such as
|
| 74 |
+
crop/resize in pixels; (height, width). if the image has not been mutated,
|
| 75 |
+
it is equal to `image_size_hw`. Note that it is also optional, for the
|
| 76 |
+
same reason as `image_size_hw`.
|
| 77 |
+
image_path: The qualified path to the loaded image (with dataset_root).
|
| 78 |
+
image_rgb: A Tensor of shape `(3, H, W)` holding the RGB image
|
| 79 |
+
of the frame; elements are floats in [0, 1].
|
| 80 |
+
mask_crop: A binary mask of shape `(1, H, W)` denoting the valid image
|
| 81 |
+
regions. Regions can be invalid (mask_crop[i,j]=0) in case they
|
| 82 |
+
are a result of zero-padding of the image after cropping around
|
| 83 |
+
the object bounding box; elements are floats in {0.0, 1.0}.
|
| 84 |
+
depth_path: The qualified path to the frame's depth map.
|
| 85 |
+
depth_map: A float Tensor of shape `(1, H, W)` holding the depth map
|
| 86 |
+
of the frame; values correspond to distances from the camera;
|
| 87 |
+
use `depth_mask` and `mask_crop` to filter for valid pixels.
|
| 88 |
+
depth_mask: A binary mask of shape `(1, H, W)` denoting pixels of the
|
| 89 |
+
depth map that are valid for evaluation, they have been checked for
|
| 90 |
+
consistency across views; elements are floats in {0.0, 1.0}.
|
| 91 |
+
mask_path: A qualified path to the foreground probability mask.
|
| 92 |
+
fg_probability: A Tensor of `(1, H, W)` denoting the probability of the
|
| 93 |
+
pixels belonging to the captured object; elements are floats
|
| 94 |
+
in [0, 1].
|
| 95 |
+
bbox_xywh: The bounding box tightly enclosing the foreground object in the
|
| 96 |
+
format (x0, y0, width, height). The convention assumes that
|
| 97 |
+
`x0+width` and `y0+height` includes the boundary of the box.
|
| 98 |
+
I.e., to slice out the corresponding crop from an image tensor `I`
|
| 99 |
+
we execute `crop = I[..., y0:y0+height, x0:x0+width]`
|
| 100 |
+
crop_bbox_xywh: The bounding box denoting the boundaries of `image_rgb`
|
| 101 |
+
in the original image coordinates in the format (x0, y0, width, height).
|
| 102 |
+
The convention is the same as for `bbox_xywh`. `crop_bbox_xywh` differs
|
| 103 |
+
from `bbox_xywh` due to padding (which can happen e.g. due to
|
| 104 |
+
setting `JsonIndexDataset.box_crop_context > 0`)
|
| 105 |
+
camera: A PyTorch3D camera object corresponding the frame's viewpoint,
|
| 106 |
+
corrected for cropping if it happened.
|
| 107 |
+
camera_quality_score: The score proportional to the confidence of the
|
| 108 |
+
frame's camera estimation (the higher the more accurate).
|
| 109 |
+
point_cloud_quality_score: The score proportional to the accuracy of the
|
| 110 |
+
frame's sequence point cloud (the higher the more accurate).
|
| 111 |
+
sequence_point_cloud_path: The path to the sequence's point cloud.
|
| 112 |
+
sequence_point_cloud: A PyTorch3D Pointclouds object holding the
|
| 113 |
+
point cloud corresponding to the frame's sequence. When the object
|
| 114 |
+
represents a batch of frames, point clouds may be deduplicated;
|
| 115 |
+
see `sequence_point_cloud_idx`.
|
| 116 |
+
sequence_point_cloud_idx: Integer indices mapping frame indices to the
|
| 117 |
+
corresponding point clouds in `sequence_point_cloud`; to get the
|
| 118 |
+
corresponding point cloud to `image_rgb[i]`, use
|
| 119 |
+
`sequence_point_cloud[sequence_point_cloud_idx[i]]`.
|
| 120 |
+
frame_type: The type of the loaded frame specified in
|
| 121 |
+
`subset_lists_file`, if provided.
|
| 122 |
+
meta: A dict for storing additional frame information.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
frame_number: Optional[torch.LongTensor]
|
| 126 |
+
sequence_name: Union[str, List[str]]
|
| 127 |
+
sequence_category: Union[str, List[str]]
|
| 128 |
+
frame_timestamp: Optional[torch.Tensor] = None
|
| 129 |
+
image_size_hw: Optional[torch.LongTensor] = None
|
| 130 |
+
effective_image_size_hw: Optional[torch.LongTensor] = None
|
| 131 |
+
image_path: Union[str, List[str], None] = None
|
| 132 |
+
image_rgb: Optional[torch.Tensor] = None
|
| 133 |
+
# masks out padding added due to cropping the square bit
|
| 134 |
+
mask_crop: Optional[torch.Tensor] = None
|
| 135 |
+
depth_path: Union[str, List[str], None] = None
|
| 136 |
+
depth_map: Optional[torch.Tensor] = None
|
| 137 |
+
depth_mask: Optional[torch.Tensor] = None
|
| 138 |
+
mask_path: Union[str, List[str], None] = None
|
| 139 |
+
fg_probability: Optional[torch.Tensor] = None
|
| 140 |
+
bbox_xywh: Optional[torch.Tensor] = None
|
| 141 |
+
crop_bbox_xywh: Optional[torch.Tensor] = None
|
| 142 |
+
camera: Optional[PerspectiveCameras] = None
|
| 143 |
+
camera_quality_score: Optional[torch.Tensor] = None
|
| 144 |
+
point_cloud_quality_score: Optional[torch.Tensor] = None
|
| 145 |
+
sequence_point_cloud_path: Union[str, List[str], None] = None
|
| 146 |
+
sequence_point_cloud: Optional[Pointclouds] = None
|
| 147 |
+
sequence_point_cloud_idx: Optional[torch.Tensor] = None
|
| 148 |
+
frame_type: Union[str, List[str], None] = None # known | unseen
|
| 149 |
+
meta: dict = field(default_factory=lambda: {})
|
| 150 |
+
|
| 151 |
+
# NOTE that batching resets this attribute
|
| 152 |
+
_uncropped: bool = field(init=False, default=True)
|
| 153 |
+
|
| 154 |
+
def to(self, *args, **kwargs):
|
| 155 |
+
new_params = {}
|
| 156 |
+
for field_name in iter(self):
|
| 157 |
+
value = getattr(self, field_name)
|
| 158 |
+
if isinstance(value, (torch.Tensor, Pointclouds, CamerasBase)):
|
| 159 |
+
new_params[field_name] = value.to(*args, **kwargs)
|
| 160 |
+
else:
|
| 161 |
+
new_params[field_name] = value
|
| 162 |
+
frame_data = type(self)(**new_params)
|
| 163 |
+
frame_data._uncropped = self._uncropped
|
| 164 |
+
return frame_data
|
| 165 |
+
|
| 166 |
+
def cpu(self):
|
| 167 |
+
return self.to(device=torch.device("cpu"))
|
| 168 |
+
|
| 169 |
+
def cuda(self):
|
| 170 |
+
return self.to(device=torch.device("cuda"))
|
| 171 |
+
|
| 172 |
+
# the following functions make sure **frame_data can be passed to functions
|
| 173 |
+
def __iter__(self):
|
| 174 |
+
for f in fields(self):
|
| 175 |
+
if f.name.startswith("_"):
|
| 176 |
+
continue
|
| 177 |
+
|
| 178 |
+
yield f.name
|
| 179 |
+
|
| 180 |
+
def __getitem__(self, key):
|
| 181 |
+
return getattr(self, key)
|
| 182 |
+
|
| 183 |
+
def __len__(self):
|
| 184 |
+
return sum(1 for f in iter(self))
|
| 185 |
+
|
| 186 |
+
def crop_by_metadata_bbox_(
|
| 187 |
+
self,
|
| 188 |
+
box_crop_context: float,
|
| 189 |
+
) -> None:
|
| 190 |
+
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
| 191 |
+
The bounding box is taken from the object state (usually taken from
|
| 192 |
+
the frame annotation or estimated from the foregroubnd mask).
|
| 193 |
+
If the expanded bounding box does not fit the image, it is clamped,
|
| 194 |
+
i.e. the image is *not* padded.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
| 198 |
+
|
| 199 |
+
Raises:
|
| 200 |
+
ValueError: If the object does not contain a bounding box (usually when no
|
| 201 |
+
mask annotation is provided)
|
| 202 |
+
ValueError: If the frame data have been cropped or resized, thus the intrinsic
|
| 203 |
+
bounding box is not valid for the current image size.
|
| 204 |
+
ValueError: If the frame does not have an image size (usually a corner case
|
| 205 |
+
when no image has been loaded)
|
| 206 |
+
"""
|
| 207 |
+
if self.bbox_xywh is None:
|
| 208 |
+
raise ValueError(
|
| 209 |
+
"Attempted cropping by metadata with empty bounding box. Consider either"
|
| 210 |
+
" to remove_empty_masks or turn off box_crop in the dataset config."
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
if not self._uncropped:
|
| 214 |
+
raise ValueError(
|
| 215 |
+
"Trying to apply the metadata bounding box to already cropped "
|
| 216 |
+
"or resized image; coordinates have changed."
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
self._crop_by_bbox_(
|
| 220 |
+
box_crop_context,
|
| 221 |
+
self.bbox_xywh,
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def crop_by_given_bbox_(
|
| 225 |
+
self,
|
| 226 |
+
box_crop_context: float,
|
| 227 |
+
bbox_xywh: torch.Tensor,
|
| 228 |
+
) -> None:
|
| 229 |
+
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
| 230 |
+
If the expanded bounding box does not fit the image, it is clamped,
|
| 231 |
+
i.e. the image is *not* padded.
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
| 235 |
+
bbox_xywh: bounding box in [x0, y0, width, height] format. If float
|
| 236 |
+
tensor, values are floored (after converting to [x0, y0, x1, y1]).
|
| 237 |
+
|
| 238 |
+
Raises:
|
| 239 |
+
ValueError: If the frame does not have an image size (usually a corner case
|
| 240 |
+
when no image has been loaded)
|
| 241 |
+
"""
|
| 242 |
+
self._crop_by_bbox_(
|
| 243 |
+
box_crop_context,
|
| 244 |
+
bbox_xywh,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
def _crop_by_bbox_(
|
| 248 |
+
self,
|
| 249 |
+
box_crop_context: float,
|
| 250 |
+
bbox_xywh: torch.Tensor,
|
| 251 |
+
) -> None:
|
| 252 |
+
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
| 253 |
+
If the expanded bounding box does not fit the image, it is clamped,
|
| 254 |
+
i.e. the image is *not* padded.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
| 258 |
+
bbox_xywh: bounding box in [x0, y0, width, height] format. If float
|
| 259 |
+
tensor, values are floored (after converting to [x0, y0, x1, y1]).
|
| 260 |
+
|
| 261 |
+
Raises:
|
| 262 |
+
ValueError: If the frame does not have an image size (usually a corner case
|
| 263 |
+
when no image has been loaded)
|
| 264 |
+
"""
|
| 265 |
+
effective_image_size_hw = self.effective_image_size_hw
|
| 266 |
+
if effective_image_size_hw is None:
|
| 267 |
+
raise ValueError("Calling crop on image-less FrameData")
|
| 268 |
+
|
| 269 |
+
bbox_xyxy = get_clamp_bbox(
|
| 270 |
+
bbox_xywh,
|
| 271 |
+
image_path=self.image_path, # pyre-ignore
|
| 272 |
+
box_crop_context=box_crop_context,
|
| 273 |
+
)
|
| 274 |
+
clamp_bbox_xyxy = clamp_box_to_image_bounds_and_round(
|
| 275 |
+
bbox_xyxy,
|
| 276 |
+
image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
|
| 277 |
+
)
|
| 278 |
+
crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
| 279 |
+
self.crop_bbox_xywh = crop_bbox_xywh
|
| 280 |
+
|
| 281 |
+
if self.fg_probability is not None:
|
| 282 |
+
self.fg_probability = crop_around_box(
|
| 283 |
+
self.fg_probability,
|
| 284 |
+
clamp_bbox_xyxy,
|
| 285 |
+
self.mask_path, # pyre-ignore
|
| 286 |
+
)
|
| 287 |
+
if self.image_rgb is not None:
|
| 288 |
+
self.image_rgb = crop_around_box(
|
| 289 |
+
self.image_rgb,
|
| 290 |
+
clamp_bbox_xyxy,
|
| 291 |
+
self.image_path, # pyre-ignore
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
depth_map = self.depth_map
|
| 295 |
+
if depth_map is not None:
|
| 296 |
+
clamp_bbox_xyxy_depth = rescale_bbox(
|
| 297 |
+
clamp_bbox_xyxy, tuple(depth_map.shape[-2:]), effective_image_size_hw
|
| 298 |
+
).long()
|
| 299 |
+
self.depth_map = crop_around_box(
|
| 300 |
+
depth_map,
|
| 301 |
+
clamp_bbox_xyxy_depth,
|
| 302 |
+
self.depth_path, # pyre-ignore
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
depth_mask = self.depth_mask
|
| 306 |
+
if depth_mask is not None:
|
| 307 |
+
clamp_bbox_xyxy_depth = rescale_bbox(
|
| 308 |
+
clamp_bbox_xyxy, tuple(depth_mask.shape[-2:]), effective_image_size_hw
|
| 309 |
+
).long()
|
| 310 |
+
self.depth_mask = crop_around_box(
|
| 311 |
+
depth_mask,
|
| 312 |
+
clamp_bbox_xyxy_depth,
|
| 313 |
+
self.mask_path, # pyre-ignore
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# changing principal_point according to bbox_crop
|
| 317 |
+
if self.camera is not None:
|
| 318 |
+
adjust_camera_to_bbox_crop_(
|
| 319 |
+
camera=self.camera,
|
| 320 |
+
image_size_wh=effective_image_size_hw.flip(dims=[-1]),
|
| 321 |
+
clamp_bbox_xywh=crop_bbox_xywh,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
# pyre-ignore
|
| 325 |
+
self.effective_image_size_hw = crop_bbox_xywh[..., 2:].flip(dims=[-1])
|
| 326 |
+
self._uncropped = False
|
| 327 |
+
|
| 328 |
+
def resize_frame_(self, new_size_hw: torch.LongTensor) -> None:
|
| 329 |
+
"""Resizes frame data in-place according to given dimensions.
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
new_size_hw: target image size [height, width], a LongTensor of shape (2,)
|
| 333 |
+
|
| 334 |
+
Raises:
|
| 335 |
+
ValueError: If the frame does not have an image size (usually a corner case
|
| 336 |
+
when no image has been loaded)
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
effective_image_size_hw = self.effective_image_size_hw
|
| 340 |
+
if effective_image_size_hw is None:
|
| 341 |
+
raise ValueError("Calling resize on image-less FrameData")
|
| 342 |
+
|
| 343 |
+
image_height, image_width = new_size_hw.tolist()
|
| 344 |
+
|
| 345 |
+
if self.fg_probability is not None:
|
| 346 |
+
self.fg_probability, _, _ = resize_image(
|
| 347 |
+
self.fg_probability,
|
| 348 |
+
image_height=image_height,
|
| 349 |
+
image_width=image_width,
|
| 350 |
+
mode="nearest",
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
if self.image_rgb is not None:
|
| 354 |
+
self.image_rgb, _, self.mask_crop = resize_image(
|
| 355 |
+
self.image_rgb, image_height=image_height, image_width=image_width
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
if self.depth_map is not None:
|
| 359 |
+
self.depth_map, _, _ = resize_image(
|
| 360 |
+
self.depth_map,
|
| 361 |
+
image_height=image_height,
|
| 362 |
+
image_width=image_width,
|
| 363 |
+
mode="nearest",
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
if self.depth_mask is not None:
|
| 367 |
+
self.depth_mask, _, _ = resize_image(
|
| 368 |
+
self.depth_mask,
|
| 369 |
+
image_height=image_height,
|
| 370 |
+
image_width=image_width,
|
| 371 |
+
mode="nearest",
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
if self.camera is not None:
|
| 375 |
+
if self.image_size_hw is None:
|
| 376 |
+
raise ValueError(
|
| 377 |
+
"image_size_hw has to be defined for resizing FrameData with cameras."
|
| 378 |
+
)
|
| 379 |
+
adjust_camera_to_image_scale_(
|
| 380 |
+
camera=self.camera,
|
| 381 |
+
original_size_wh=effective_image_size_hw.flip(dims=[-1]),
|
| 382 |
+
new_size_wh=new_size_hw.flip(dims=[-1]), # pyre-ignore
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
self.effective_image_size_hw = new_size_hw
|
| 386 |
+
self._uncropped = False
|
| 387 |
+
|
| 388 |
+
@classmethod
|
| 389 |
+
def collate(cls, batch):
|
| 390 |
+
"""
|
| 391 |
+
Given a list objects `batch` of class `cls`, collates them into a batched
|
| 392 |
+
representation suitable for processing with deep networks.
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
elem = batch[0]
|
| 396 |
+
|
| 397 |
+
if isinstance(elem, cls):
|
| 398 |
+
pointcloud_ids = [id(el.sequence_point_cloud) for el in batch]
|
| 399 |
+
id_to_idx = defaultdict(list)
|
| 400 |
+
for i, pc_id in enumerate(pointcloud_ids):
|
| 401 |
+
id_to_idx[pc_id].append(i)
|
| 402 |
+
|
| 403 |
+
sequence_point_cloud = []
|
| 404 |
+
sequence_point_cloud_idx = -np.ones((len(batch),))
|
| 405 |
+
for i, ind in enumerate(id_to_idx.values()):
|
| 406 |
+
sequence_point_cloud_idx[ind] = i
|
| 407 |
+
sequence_point_cloud.append(batch[ind[0]].sequence_point_cloud)
|
| 408 |
+
assert (sequence_point_cloud_idx >= 0).all()
|
| 409 |
+
|
| 410 |
+
override_fields = {
|
| 411 |
+
"sequence_point_cloud": sequence_point_cloud,
|
| 412 |
+
"sequence_point_cloud_idx": sequence_point_cloud_idx.tolist(),
|
| 413 |
+
}
|
| 414 |
+
# note that the pre-collate value of sequence_point_cloud_idx is unused
|
| 415 |
+
|
| 416 |
+
collated = {}
|
| 417 |
+
for f in fields(elem):
|
| 418 |
+
if not f.init:
|
| 419 |
+
continue
|
| 420 |
+
|
| 421 |
+
list_values = override_fields.get(
|
| 422 |
+
f.name, [getattr(d, f.name) for d in batch]
|
| 423 |
+
)
|
| 424 |
+
collated[f.name] = (
|
| 425 |
+
cls.collate(list_values)
|
| 426 |
+
if all(list_value is not None for list_value in list_values)
|
| 427 |
+
else None
|
| 428 |
+
)
|
| 429 |
+
return cls(**collated)
|
| 430 |
+
|
| 431 |
+
elif isinstance(elem, Pointclouds):
|
| 432 |
+
return join_pointclouds_as_batch(batch)
|
| 433 |
+
|
| 434 |
+
elif isinstance(elem, CamerasBase):
|
| 435 |
+
# TODO: don't store K; enforce working in NDC space
|
| 436 |
+
return join_cameras_as_batch(batch)
|
| 437 |
+
else:
|
| 438 |
+
return torch.utils.data.dataloader.default_collate(batch)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class FrameDataBuilderBase(ReplaceableBase, Generic[FrameDataSubtype], ABC):
|
| 445 |
+
"""A base class for FrameDataBuilders that build a FrameData object, load and
|
| 446 |
+
process the binary data (crop and resize). Implementations should parametrize
|
| 447 |
+
the class with a subtype of FrameData and set frame_data_type class variable to
|
| 448 |
+
that type. They have to also implement `build` method.
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
# To be initialised to FrameDataSubtype
|
| 452 |
+
frame_data_type: ClassVar[Type[FrameDataSubtype]]
|
| 453 |
+
|
| 454 |
+
@abstractmethod
|
| 455 |
+
def build(
|
| 456 |
+
self,
|
| 457 |
+
frame_annotation: types.FrameAnnotation,
|
| 458 |
+
sequence_annotation: types.SequenceAnnotation,
|
| 459 |
+
*,
|
| 460 |
+
load_blobs: bool = True,
|
| 461 |
+
**kwargs,
|
| 462 |
+
) -> FrameDataSubtype:
|
| 463 |
+
"""An abstract method to build the frame data based on raw frame/sequence
|
| 464 |
+
annotations, load the binary data and adjust them according to the metadata.
|
| 465 |
+
"""
|
| 466 |
+
raise NotImplementedError()
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
class GenericFrameDataBuilder(FrameDataBuilderBase[FrameDataSubtype], ABC):
|
| 470 |
+
"""
|
| 471 |
+
A class to build a FrameData object, load and process the binary data (crop and
|
| 472 |
+
resize). This is an abstract class for extending to build FrameData subtypes. Most
|
| 473 |
+
users need to use concrete `FrameDataBuilder` class instead.
|
| 474 |
+
Beware that modifications of frame data are done in-place.
|
| 475 |
+
|
| 476 |
+
Args:
|
| 477 |
+
dataset_root: The root folder of the dataset; all paths in frame / sequence
|
| 478 |
+
annotations are defined w.r.t. this root. Has to be set if any of the
|
| 479 |
+
load_* flabs below is true.
|
| 480 |
+
load_images: Enable loading the frame RGB data.
|
| 481 |
+
load_depths: Enable loading the frame depth maps.
|
| 482 |
+
load_depth_masks: Enable loading the frame depth map masks denoting the
|
| 483 |
+
depth values used for evaluation (the points consistent across views).
|
| 484 |
+
load_masks: Enable loading frame foreground masks.
|
| 485 |
+
load_point_clouds: Enable loading sequence-level point clouds.
|
| 486 |
+
max_points: Cap on the number of loaded points in the point cloud;
|
| 487 |
+
if reached, they are randomly sampled without replacement.
|
| 488 |
+
mask_images: Whether to mask the images with the loaded foreground masks;
|
| 489 |
+
0 value is used for background.
|
| 490 |
+
mask_depths: Whether to mask the depth maps with the loaded foreground
|
| 491 |
+
masks; 0 value is used for background.
|
| 492 |
+
image_height: The height of the returned images, masks, and depth maps;
|
| 493 |
+
aspect ratio is preserved during cropping/resizing.
|
| 494 |
+
image_width: The width of the returned images, masks, and depth maps;
|
| 495 |
+
aspect ratio is preserved during cropping/resizing.
|
| 496 |
+
box_crop: Enable cropping of the image around the bounding box inferred
|
| 497 |
+
from the foreground region of the loaded segmentation mask; masks
|
| 498 |
+
and depth maps are cropped accordingly; cameras are corrected.
|
| 499 |
+
box_crop_mask_thr: The threshold used to separate pixels into foreground
|
| 500 |
+
and background based on the foreground_probability mask; if no value
|
| 501 |
+
is greater than this threshold, the loader lowers it and repeats.
|
| 502 |
+
box_crop_context: The amount of additional padding added to each
|
| 503 |
+
dimension of the cropping bounding box, relative to box size.
|
| 504 |
+
path_manager: Optionally a PathManager for interpreting paths in a special way.
|
| 505 |
+
"""
|
| 506 |
+
|
| 507 |
+
dataset_root: Optional[str] = None
|
| 508 |
+
load_images: bool = True
|
| 509 |
+
load_depths: bool = True
|
| 510 |
+
load_depth_masks: bool = True
|
| 511 |
+
load_masks: bool = True
|
| 512 |
+
load_point_clouds: bool = False
|
| 513 |
+
max_points: int = 0
|
| 514 |
+
mask_images: bool = False
|
| 515 |
+
mask_depths: bool = False
|
| 516 |
+
image_height: Optional[int] = 800
|
| 517 |
+
image_width: Optional[int] = 800
|
| 518 |
+
box_crop: bool = True
|
| 519 |
+
box_crop_mask_thr: float = 0.4
|
| 520 |
+
box_crop_context: float = 0.3
|
| 521 |
+
path_manager: Any = None
|
| 522 |
+
|
| 523 |
+
def __post_init__(self) -> None:
|
| 524 |
+
load_any_blob = (
|
| 525 |
+
self.load_images
|
| 526 |
+
or self.load_depths
|
| 527 |
+
or self.load_depth_masks
|
| 528 |
+
or self.load_masks
|
| 529 |
+
or self.load_point_clouds
|
| 530 |
+
)
|
| 531 |
+
if load_any_blob and self.dataset_root is None:
|
| 532 |
+
raise ValueError(
|
| 533 |
+
"dataset_root must be set to load any blob data. "
|
| 534 |
+
"Make sure it is set in either FrameDataBuilder or Dataset params."
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
if load_any_blob and not self._exists_in_dataset_root(""):
|
| 538 |
+
raise ValueError(
|
| 539 |
+
f"dataset_root is passed but {self.dataset_root} does not exist."
|
| 540 |
+
)
|
| 541 |
+
|
| 542 |
+
def build(
|
| 543 |
+
self,
|
| 544 |
+
frame_annotation: types.FrameAnnotation,
|
| 545 |
+
sequence_annotation: types.SequenceAnnotation,
|
| 546 |
+
*,
|
| 547 |
+
load_blobs: bool = True,
|
| 548 |
+
**kwargs,
|
| 549 |
+
) -> FrameDataSubtype:
|
| 550 |
+
"""Builds the frame data based on raw frame/sequence annotations, loads the
|
| 551 |
+
binary data and adjust them according to the metadata. The processing includes:
|
| 552 |
+
* if box_crop is set, the image/mask/depth are cropped with the bounding
|
| 553 |
+
box provided or estimated from MaskAnnotation,
|
| 554 |
+
* if image_height/image_width are set, the image/mask/depth are resized to
|
| 555 |
+
fit that resolution. Note that the aspect ratio is preserved, and the
|
| 556 |
+
(possibly cropped) image is pasted into the top-left corner. In the
|
| 557 |
+
resulting frame_data, mask_crop field corresponds to the mask of the
|
| 558 |
+
pasted image.
|
| 559 |
+
|
| 560 |
+
Args:
|
| 561 |
+
frame_annotation: frame annotation
|
| 562 |
+
sequence_annotation: sequence annotation
|
| 563 |
+
load_blobs: if the function should attempt loading the image, depth map
|
| 564 |
+
and mask, and foreground mask
|
| 565 |
+
|
| 566 |
+
Returns:
|
| 567 |
+
The constructed FrameData object.
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
point_cloud = sequence_annotation.point_cloud
|
| 571 |
+
|
| 572 |
+
frame_data = self.frame_data_type(
|
| 573 |
+
frame_number=safe_as_tensor(frame_annotation.frame_number, torch.long),
|
| 574 |
+
frame_timestamp=safe_as_tensor(
|
| 575 |
+
frame_annotation.frame_timestamp, torch.float
|
| 576 |
+
),
|
| 577 |
+
sequence_name=frame_annotation.sequence_name,
|
| 578 |
+
sequence_category=sequence_annotation.category,
|
| 579 |
+
camera_quality_score=safe_as_tensor(
|
| 580 |
+
sequence_annotation.viewpoint_quality_score, torch.float
|
| 581 |
+
),
|
| 582 |
+
point_cloud_quality_score=(
|
| 583 |
+
safe_as_tensor(point_cloud.quality_score, torch.float)
|
| 584 |
+
if point_cloud is not None
|
| 585 |
+
else None
|
| 586 |
+
),
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
+
fg_mask_np: Optional[np.ndarray] = None
|
| 590 |
+
mask_annotation = frame_annotation.mask
|
| 591 |
+
if mask_annotation is not None:
|
| 592 |
+
if load_blobs and self.load_masks:
|
| 593 |
+
fg_mask_np, mask_path = self._load_fg_probability(frame_annotation)
|
| 594 |
+
frame_data.mask_path = mask_path
|
| 595 |
+
frame_data.fg_probability = safe_as_tensor(fg_mask_np, torch.float)
|
| 596 |
+
|
| 597 |
+
bbox_xywh = mask_annotation.bounding_box_xywh
|
| 598 |
+
if bbox_xywh is None and fg_mask_np is not None:
|
| 599 |
+
bbox_xywh = get_bbox_from_mask(fg_mask_np, self.box_crop_mask_thr)
|
| 600 |
+
|
| 601 |
+
frame_data.bbox_xywh = safe_as_tensor(bbox_xywh, torch.float)
|
| 602 |
+
|
| 603 |
+
if frame_annotation.image is not None:
|
| 604 |
+
image_size_hw = safe_as_tensor(frame_annotation.image.size, torch.long)
|
| 605 |
+
frame_data.image_size_hw = image_size_hw # original image size
|
| 606 |
+
# image size after crop/resize
|
| 607 |
+
frame_data.effective_image_size_hw = image_size_hw
|
| 608 |
+
image_path = None
|
| 609 |
+
dataset_root = self.dataset_root
|
| 610 |
+
if frame_annotation.image.path is not None and dataset_root is not None:
|
| 611 |
+
image_path = os.path.join(dataset_root, frame_annotation.image.path)
|
| 612 |
+
frame_data.image_path = image_path
|
| 613 |
+
|
| 614 |
+
if load_blobs and self.load_images:
|
| 615 |
+
if image_path is None:
|
| 616 |
+
raise ValueError("Image path is required to load images.")
|
| 617 |
+
|
| 618 |
+
image_np = load_image(self._local_path(image_path))
|
| 619 |
+
frame_data.image_rgb = self._postprocess_image(
|
| 620 |
+
image_np, frame_annotation.image.size, frame_data.fg_probability
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
if (
|
| 624 |
+
load_blobs
|
| 625 |
+
and self.load_depths
|
| 626 |
+
and frame_annotation.depth is not None
|
| 627 |
+
and frame_annotation.depth.path is not None
|
| 628 |
+
):
|
| 629 |
+
(
|
| 630 |
+
frame_data.depth_map,
|
| 631 |
+
frame_data.depth_path,
|
| 632 |
+
frame_data.depth_mask,
|
| 633 |
+
) = self._load_mask_depth(frame_annotation, fg_mask_np)
|
| 634 |
+
|
| 635 |
+
if load_blobs and self.load_point_clouds and point_cloud is not None:
|
| 636 |
+
pcl_path = self._fix_point_cloud_path(point_cloud.path)
|
| 637 |
+
frame_data.sequence_point_cloud = load_pointcloud(
|
| 638 |
+
self._local_path(pcl_path), max_points=self.max_points
|
| 639 |
+
)
|
| 640 |
+
frame_data.sequence_point_cloud_path = pcl_path
|
| 641 |
+
|
| 642 |
+
if frame_annotation.viewpoint is not None:
|
| 643 |
+
frame_data.camera = self._get_pytorch3d_camera(frame_annotation)
|
| 644 |
+
|
| 645 |
+
if self.box_crop:
|
| 646 |
+
frame_data.crop_by_metadata_bbox_(self.box_crop_context)
|
| 647 |
+
|
| 648 |
+
if self.image_height is not None and self.image_width is not None:
|
| 649 |
+
new_size = (self.image_height, self.image_width)
|
| 650 |
+
frame_data.resize_frame_(
|
| 651 |
+
new_size_hw=torch.tensor(new_size, dtype=torch.long), # pyre-ignore
|
| 652 |
+
)
|
| 653 |
+
|
| 654 |
+
return frame_data
|
| 655 |
+
|
| 656 |
+
def _load_fg_probability(
|
| 657 |
+
self, entry: types.FrameAnnotation
|
| 658 |
+
) -> Tuple[np.ndarray, str]:
|
| 659 |
+
assert self.dataset_root is not None and entry.mask is not None
|
| 660 |
+
full_path = os.path.join(self.dataset_root, entry.mask.path)
|
| 661 |
+
fg_probability = load_mask(self._local_path(full_path))
|
| 662 |
+
if fg_probability.shape[-2:] != entry.image.size:
|
| 663 |
+
raise ValueError(
|
| 664 |
+
f"bad mask size: {fg_probability.shape[-2:]} vs {entry.image.size}!"
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
return fg_probability, full_path
|
| 668 |
+
|
| 669 |
+
def _postprocess_image(
|
| 670 |
+
self,
|
| 671 |
+
image_np: np.ndarray,
|
| 672 |
+
image_size: Tuple[int, int],
|
| 673 |
+
fg_probability: Optional[torch.Tensor],
|
| 674 |
+
) -> torch.Tensor:
|
| 675 |
+
image_rgb = safe_as_tensor(image_np, torch.float)
|
| 676 |
+
|
| 677 |
+
if image_rgb.shape[-2:] != image_size:
|
| 678 |
+
raise ValueError(f"bad image size: {image_rgb.shape[-2:]} vs {image_size}!")
|
| 679 |
+
|
| 680 |
+
if self.mask_images:
|
| 681 |
+
assert fg_probability is not None
|
| 682 |
+
image_rgb *= fg_probability
|
| 683 |
+
|
| 684 |
+
return image_rgb
|
| 685 |
+
|
| 686 |
+
def _load_mask_depth(
|
| 687 |
+
self,
|
| 688 |
+
entry: types.FrameAnnotation,
|
| 689 |
+
fg_mask: Optional[np.ndarray],
|
| 690 |
+
) -> Tuple[torch.Tensor, str, torch.Tensor]:
|
| 691 |
+
entry_depth = entry.depth
|
| 692 |
+
dataset_root = self.dataset_root
|
| 693 |
+
assert dataset_root is not None
|
| 694 |
+
assert entry_depth is not None and entry_depth.path is not None
|
| 695 |
+
path = os.path.join(dataset_root, entry_depth.path)
|
| 696 |
+
depth_map = load_depth(self._local_path(path), entry_depth.scale_adjustment)
|
| 697 |
+
|
| 698 |
+
if self.mask_depths:
|
| 699 |
+
assert fg_mask is not None
|
| 700 |
+
depth_map *= fg_mask
|
| 701 |
+
|
| 702 |
+
mask_path = entry_depth.mask_path
|
| 703 |
+
if self.load_depth_masks and mask_path is not None:
|
| 704 |
+
mask_path = os.path.join(dataset_root, mask_path)
|
| 705 |
+
depth_mask = load_depth_mask(self._local_path(mask_path))
|
| 706 |
+
else:
|
| 707 |
+
depth_mask = (depth_map > 0.0).astype(np.float32)
|
| 708 |
+
|
| 709 |
+
return torch.tensor(depth_map), path, torch.tensor(depth_mask)
|
| 710 |
+
|
| 711 |
+
def _get_pytorch3d_camera(
|
| 712 |
+
self,
|
| 713 |
+
entry: types.FrameAnnotation,
|
| 714 |
+
) -> PerspectiveCameras:
|
| 715 |
+
entry_viewpoint = entry.viewpoint
|
| 716 |
+
assert entry_viewpoint is not None
|
| 717 |
+
# principal point and focal length
|
| 718 |
+
principal_point = torch.tensor(
|
| 719 |
+
entry_viewpoint.principal_point, dtype=torch.float
|
| 720 |
+
)
|
| 721 |
+
focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
|
| 722 |
+
|
| 723 |
+
format = entry_viewpoint.intrinsics_format
|
| 724 |
+
if entry_viewpoint.intrinsics_format == "ndc_norm_image_bounds":
|
| 725 |
+
# legacy PyTorch3D NDC format
|
| 726 |
+
# convert to pixels unequally and convert to ndc equally
|
| 727 |
+
image_size_as_list = list(reversed(entry.image.size))
|
| 728 |
+
image_size_wh = torch.tensor(image_size_as_list, dtype=torch.float)
|
| 729 |
+
per_axis_scale = image_size_wh / image_size_wh.min()
|
| 730 |
+
focal_length = focal_length * per_axis_scale
|
| 731 |
+
principal_point = principal_point * per_axis_scale
|
| 732 |
+
elif entry_viewpoint.intrinsics_format != "ndc_isotropic":
|
| 733 |
+
raise ValueError(f"Unknown intrinsics format: {format}")
|
| 734 |
+
|
| 735 |
+
return PerspectiveCameras(
|
| 736 |
+
focal_length=focal_length[None],
|
| 737 |
+
principal_point=principal_point[None],
|
| 738 |
+
R=torch.tensor(entry_viewpoint.R, dtype=torch.float)[None],
|
| 739 |
+
T=torch.tensor(entry_viewpoint.T, dtype=torch.float)[None],
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
def _fix_point_cloud_path(self, path: str) -> str:
|
| 743 |
+
"""
|
| 744 |
+
Fix up a point cloud path from the dataset.
|
| 745 |
+
Some files in Co3Dv2 have an accidental absolute path stored.
|
| 746 |
+
"""
|
| 747 |
+
unwanted_prefix = (
|
| 748 |
+
"/large_experiments/p3/replay/datasets/co3d/co3d45k_220512/export_v23/"
|
| 749 |
+
)
|
| 750 |
+
if path.startswith(unwanted_prefix):
|
| 751 |
+
path = path[len(unwanted_prefix) :]
|
| 752 |
+
assert self.dataset_root is not None
|
| 753 |
+
return os.path.join(self.dataset_root, path)
|
| 754 |
+
|
| 755 |
+
def _local_path(self, path: str) -> str:
|
| 756 |
+
if self.path_manager is None:
|
| 757 |
+
return path
|
| 758 |
+
return self.path_manager.get_local_path(path)
|
| 759 |
+
|
| 760 |
+
def _exists_in_dataset_root(self, relpath) -> bool:
|
| 761 |
+
if not self.dataset_root:
|
| 762 |
+
return False
|
| 763 |
+
|
| 764 |
+
full_path = os.path.join(self.dataset_root, relpath)
|
| 765 |
+
if self.path_manager is None:
|
| 766 |
+
return os.path.exists(full_path)
|
| 767 |
+
else:
|
| 768 |
+
return self.path_manager.exists(full_path)
|
| 769 |
+
|
| 770 |
+
|
| 771 |
+
@registry.register
|
| 772 |
+
class FrameDataBuilder(GenericWorkaround, GenericFrameDataBuilder[FrameData]):
|
| 773 |
+
"""
|
| 774 |
+
A concrete class to build a FrameData object, load and process the binary data (crop
|
| 775 |
+
and resize). Beware that modifications of frame data are done in-place. Please see
|
| 776 |
+
the documentation for `GenericFrameDataBuilder` for the description of parameters
|
| 777 |
+
and methods.
|
| 778 |
+
"""
|
| 779 |
+
|
| 780 |
+
frame_data_type: ClassVar[Type[FrameData]] = FrameData
|