savoji commited on
Commit
3814456
·
verified ·
1 Parent(s): 56b6795

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/README.md +455 -0
  2. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/__init__.py +7 -0
  3. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/experiment.py +303 -0
  4. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/__init__.py +7 -0
  5. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/model_factory.py +136 -0
  6. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/optimizer_factory.py +339 -0
  7. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/training_loop.py +454 -0
  8. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/utils.py +19 -0
  9. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/__init__.py +7 -0
  10. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/experiment.yaml +1243 -0
  11. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_experiment.py +282 -0
  12. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_optimizer_factory.py +185 -0
  13. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_visualize.py +29 -0
  14. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/utils.py +42 -0
  15. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/visualize_reconstruction.py +160 -0
  16. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/.gitignore +5 -0
  17. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/README.md +91 -0
  18. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/__init__.py +5 -0
  19. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/fern.yaml +45 -0
  20. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/lego.yaml +45 -0
  21. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/pt3logo.yaml +45 -0
  22. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/__init__.py +5 -0
  23. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/dataset.py +166 -0
  24. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/eval_video_utils.py +158 -0
  25. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/implicit_function.py +301 -0
  26. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/nerf_renderer.py +436 -0
  27. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/raymarcher.py +73 -0
  28. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/raysampler.py +365 -0
  29. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/stats.py +346 -0
  30. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/utils.py +59 -0
  31. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/test_nerf.py +172 -0
  32. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/__init__.py +5 -0
  33. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/test_raymarcher.py +38 -0
  34. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/test_raysampler.py +126 -0
  35. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/train_nerf.py +273 -0
  36. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__init__.py +9 -0
  37. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/__init__.py +14 -0
  38. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/__init__.py +13 -0
  39. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/r2n2.py +427 -0
  40. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/r2n2_synset_dict.json +15 -0
  41. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/utils.py +504 -0
  42. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet_base.py +291 -0
  43. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/utils.py +50 -0
  44. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/__init__.py +7 -0
  45. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/eval_demo.py +183 -0
  46. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__init__.py +23 -0
  47. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/__init__.cpython-310.pyc +0 -0
  48. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/plotly_vis.cpython-310.pyc +0 -0
  49. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/texture_vis.cpython-310.pyc +0 -0
  50. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/plotly_vis.py +1057 -0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/README.md ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Implicitron is a PyTorch3D-based framework for new-view synthesis via modeling the neural-network based representations.
4
+
5
+ # License
6
+
7
+ Implicitron is distributed as part of PyTorch3D under the [BSD license](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE).
8
+ It includes code from the [NeRF](https://github.com/bmild/nerf), [SRN](http://github.com/vsitzmann/scene-representation-networks) and [IDR](http://github.com/lioryariv/idr) repos.
9
+ See [LICENSE-3RD-PARTY](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE-3RD-PARTY) for their licenses.
10
+
11
+
12
+ # Installation
13
+
14
+ There are three ways to set up Implicitron, depending on the flexibility level required.
15
+ If you only want to train or evaluate models as they are implemented changing only the parameters, you can just install the package.
16
+ Implicitron also provides a flexible API that supports user-defined plug-ins;
17
+ if you want to re-implement some of the components without changing the high-level pipeline, you need to create a custom launcher script.
18
+ The most flexible option, though, is cloning PyTorch3D repo and building it from sources, which allows changing the code in arbitrary ways.
19
+ Below, we descibe all three options in more details.
20
+
21
+
22
+ ## [Option 1] Running an executable from the package
23
+
24
+ This option allows you to use the code as is without changing the implementations.
25
+ Only configuration can be changed (see [Configuration system](#configuration-system)).
26
+
27
+ For this setup, install the dependencies and PyTorch3D from conda following [the guide](https://github.com/facebookresearch/pytorch3d/blob/master/INSTALL.md#1-install-with-cuda-support-from-anaconda-cloud-on-linux-only). Then, install implicitron-specific dependencies:
28
+
29
+ ```shell
30
+ pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
31
+ ```
32
+
33
+ Runner executable is available as `pytorch3d_implicitron_runner` shell command.
34
+ See [Running](#running) section below for examples of training and evaluation commands.
35
+
36
+
37
+ ## [Option 2] Supporting custom implementations
38
+
39
+ To plug in custom implementations, for example, of renderer or implicit-function protocols, you need to create your own runner script and import the plug-in implementations there.
40
+ First, install PyTorch3D and Implicitron dependencies as described in the previous section.
41
+ Then, implement the custom script; copying `pytorch3d/projects/implicitron_trainer` is a good place to start.
42
+ See [Custom plugins](#custom-plugins) for more information on how to import implementations and enable them in the configs.
43
+
44
+
45
+ ## [Option 3] Cloning PyTorch3D repo
46
+
47
+ This is the most flexible way to set up Implicitron as it allows changing the code directly.
48
+ It allows modifying the high-level rendering pipeline or implementing yet-unsupported loss functions.
49
+ Please follow the instructions to [install PyTorch3D from a local clone](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md#2-install-from-a-local-clone).
50
+ Then, install Implicitron-specific dependencies:
51
+
52
+ ```shell
53
+ pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
54
+ ```
55
+
56
+ You are still encouraged to implement custom plugins as above where possible as it makes reusing the code easier.
57
+ The executable is located in `pytorch3d/projects/implicitron_trainer`.
58
+
59
+ > **_NOTE:_** Both `pytorch3d_implicitron_runner` and `pytorch3d_implicitron_visualizer`
60
+ executables (mentioned below) are not available when using local clone.
61
+ Instead users should use the python scripts `experiment.py` and `visualize_reconstruction.py` (see the [Running](Running) section below).
62
+
63
+
64
+ # Running
65
+
66
+ This section assumes that you use the executable provided by the installed package
67
+ (Option 1 / Option 2 in [#Installation](Installation) above),
68
+ i.e. `pytorch3d_implicitron_runner` and `pytorch3d_implicitron_visualizer` are available.
69
+
70
+ > **_NOTE:_** If the executables are not available (e.g. when using a local clone - Option 3 in [#Installation](Installation)),
71
+ users should directly use the `experiment.py` and `visualize_reconstruction.py` python scripts
72
+ which correspond to the executables as follows:
73
+ - `pytorch3d_implicitron_runner` corresponds to `<pytorch3d_root>/projects/implicitron_trainer/experiment.py`
74
+ - `pytorch3d_implicitron_visualizer` corresponds to `<pytorch3d_root>/projects/implicitron_trainer/visualize_reconstruction.py`
75
+
76
+ For instance, in order to directly execute training with the python script, users can call:
77
+ ```shell
78
+ cd <pytorch3d_root>/projects/
79
+ python -m implicitron_trainer.experiment <args>`
80
+ ```
81
+
82
+ If you have a custom `experiment.py` or `visualize_reconstruction.py` script
83
+ (as in the Option 2 [above](#Installation)), replace the executable with the path to your script.
84
+
85
+ ## Training
86
+
87
+ To run training, pass a yaml config file, followed by a list of overridden arguments.
88
+ For example, to train NeRF on the first skateboard sequence from CO3D dataset, you can run:
89
+ ```shell
90
+ dataset_args=data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
91
+ pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf \
92
+ $dataset_args.dataset_root=<DATASET_ROOT> $dataset_args.category='skateboard' \
93
+ $dataset_args.test_restrict_sequence_id=0 test_when_finished=True exp_dir=<CHECKPOINT_DIR>
94
+ ```
95
+
96
+ Here, `--config-path` points to the config path relative to `pytorch3d_implicitron_runner` location;
97
+ `--config-name` picks the config (in this case, `repro_singleseq_nerf.yaml`);
98
+ `test_when_finished` will launch evaluation script once training is finished.
99
+ Replace `<DATASET_ROOT>` with the location where the dataset in Implicitron format is stored
100
+ and `<CHECKPOINT_DIR>` with a directory where checkpoints will be dumped during training.
101
+ Other configuration parameters can be overridden in the same way.
102
+ See [Configuration system](#configuration-system) section for more information on this.
103
+
104
+ ### Visdom logging
105
+
106
+ Note that the training script logs its progress to Visdom. Make sure to start a visdom server before the training commences:
107
+ ```
108
+ python -m visdom.server
109
+ ```
110
+ > In case a Visdom server is not started, the console will get flooded with `requests.exceptions.ConnectionError` errors signalling that a Visdom server is not available. Note that these errors <b>will NOT interrupt</b> the program and the training will still continue without issues.
111
+
112
+ ## Evaluation
113
+
114
+ To run evaluation on the latest checkpoint after (or during) training, simply add `eval_only=True` to your training command.
115
+
116
+ E.g. for executing the evaluation on the NeRF skateboard sequence, you can run:
117
+ ```shell
118
+ dataset_args=data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
119
+ pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf \
120
+ $dataset_args.dataset_root=<CO3D_DATASET_ROOT> $dataset_args.category='skateboard' \
121
+ $dataset_args.test_restrict_sequence_id=0 exp_dir=<CHECKPOINT_DIR> eval_only=True
122
+ ```
123
+ Evaluation prints the metrics to `stdout` and dumps them to a json file in `exp_dir`.
124
+
125
+ ## Visualisation
126
+
127
+ The script produces a video of renders by a trained model assuming a pre-defined camera trajectory.
128
+ In order for it to work, `ffmpeg` needs to be installed:
129
+
130
+ ```shell
131
+ conda install ffmpeg
132
+ ```
133
+
134
+ Here is an example of calling the script:
135
+ ```shell
136
+ pytorch3d_implicitron_visualizer exp_dir=<CHECKPOINT_DIR> \
137
+ visdom_show_preds=True n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
138
+ ```
139
+
140
+ The argument `n_eval_cameras` sets the number of renderring viewpoints sampled on a trajectory, which defaults to a circular fly-around;
141
+ `render_size` sets the size of a render passed to the model, which can be resized to `video_size` before writing.
142
+
143
+ Rendered videos of images, masks, and depth maps will be saved to `<CHECKPOINT_DIR>/video`.
144
+
145
+
146
+ # Configuration system
147
+
148
+ We use hydra and OmegaConf to parse the configs.
149
+ The config schema and default values are defined by the dataclasses implementing the modules.
150
+ More specifically, if a class derives from `Configurable`, its fields can be set in config yaml files or overridden in CLI.
151
+ For example, `GenericModel` has a field `render_image_width` with the default value 400.
152
+ If it is specified in the yaml config file or in CLI command, the new value will be used.
153
+
154
+ Configurables can form hierarchies.
155
+ For example, `GenericModel` has a field `raysampler: RaySampler`, which is also Configurable.
156
+ In the config, inner parameters can be propagated using `_args` postfix, e.g. to change `raysampler.n_pts_per_ray_training` (the number of sampled points per ray), the node `raysampler_args.n_pts_per_ray_training` should be specified.
157
+
158
+ ### Top-level configuration class: `Experiment`
159
+
160
+ <b>The root of the hierarchy is defined by `Experiment` Configurable in `<pytorch3d_root>/projects/implicitron_trainer/experiment.py`.</b>
161
+
162
+ It has top-level fields like `seed`, which seeds the random number generator.
163
+ Additionally, it has non-leaf nodes like `model_factory_ImplicitronModelFactory_args.model_GenericModel_args`, which dispatches the config parameters to `GenericModel`.
164
+ Thus, changing the model parameters may be achieved in two ways: either by editing the config file, e.g.
165
+ ```yaml
166
+ model_factory_ImplicitronModelFactory_args:
167
+ model_GenericModel_args:
168
+ render_image_width: 800
169
+ raysampler_args:
170
+ n_pts_per_ray_training: 128
171
+ ```
172
+
173
+ or, equivalently, by adding the following to `pytorch3d_implicitron_runner` arguments:
174
+
175
+ ```shell
176
+ model_args=model_factory_ImplicitronModelFactory_args.model_GenericModel_args
177
+ $model_args.render_image_width=800 $model_args.raysampler_args.n_pts_per_ray_training=128
178
+ ```
179
+
180
+ See the documentation in `pytorch3d/implicitron/tools/config.py` for more details.
181
+
182
+ ## Replaceable implementations
183
+
184
+ Sometimes changing the model parameters does not provide enough flexibility, and you want to provide a new implementation for a building block.
185
+ The configuration system also supports it!
186
+ Abstract classes like `BaseRenderer` derive from `ReplaceableBase` instead of `Configurable`.
187
+ This means that other Configurables can refer to them using the base type, while the specific implementation is chosen in the config using `_class_type`-postfixed node.
188
+ In that case, `_args` node name has to include the implementation type.
189
+ More specifically, to change renderer settings, the config will look like this:
190
+ ```yaml
191
+ model_factory_ImplicitronModelFactory_args:
192
+ model_GenericModel_args:
193
+ renderer_class_type: LSTMRenderer
194
+ renderer_LSTMRenderer_args:
195
+ num_raymarch_steps: 10
196
+ hidden_size: 16
197
+ ```
198
+
199
+ See the documentation in `pytorch3d/implicitron/tools/config.py` for more details on the configuration system.
200
+
201
+ ## Custom plugins
202
+
203
+ If you have an idea for another implementation of a replaceable component, it can be plugged in without changing the core code.
204
+ For that, you need to set up Implicitron through option 2 or 3 above.
205
+ Let's say you want to implement a renderer that accumulates opacities similar to an X-ray machine.
206
+ First, create a module `x_ray_renderer.py` with a class deriving from `BaseRenderer`:
207
+
208
+ ```python
209
+ from pytorch3d.implicitron.tools.config import registry
210
+
211
+ @registry.register
212
+ class XRayRenderer(BaseRenderer, torch.nn.Module):
213
+ n_pts_per_ray: int = 64
214
+
215
+ def __post_init__(self):
216
+ # custom initialization
217
+
218
+ def forward(
219
+ self,
220
+ ray_bundle,
221
+ implicit_functions=[],
222
+ evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
223
+ **kwargs,
224
+ ) -> RendererOutput:
225
+ ...
226
+ ```
227
+
228
+ Please note `@registry.register` decorator that registers the plug-in as an implementation of `Renderer`.
229
+ IMPORTANT: In order for it to run, the class (or its enclosing module) has to be imported in your launch script.
230
+ Additionally, this has to be done before parsing the root configuration class `ExperimentConfig`.
231
+ Simply add `import .x_ray_renderer` in the beginning of `experiment.py`.
232
+
233
+ After that, you should be able to change the config with:
234
+ ```yaml
235
+ model_factory_ImplicitronModelFactory_args:
236
+ model_GenericModel_args:
237
+ renderer_class_type: XRayRenderer
238
+ renderer_XRayRenderer_args:
239
+ n_pts_per_ray: 128
240
+ ```
241
+
242
+ to replace the implementation and potentially override the parameters.
243
+
244
+ # Code and config structure
245
+
246
+ The main object for this trainer loop is `Experiment`. It has four top-level replaceable components.
247
+
248
+ * `data_source`: This is a `DataSourceBase` which defaults to `ImplicitronDataSource`.
249
+ It constructs the data sets and dataloaders.
250
+ * `model_factory`: This is a `ModelFactoryBase` which defaults to `ImplicitronModelFactory`.
251
+ It constructs the model, which is usually an instance of `OverfitModel` (for NeRF-style training with overfitting to one scene) or `GenericModel` (that is able to generalize to multiple scenes by NeRFormer-style conditioning on other scene views), and can load its weights from a checkpoint.
252
+ * `optimizer_factory`: This is an `OptimizerFactoryBase` which defaults to `ImplicitronOptimizerFactory`.
253
+ It constructs the optimizer and can load its weights from a checkpoint.
254
+ * `training_loop`: This is a `TrainingLoopBase` which defaults to `ImplicitronTrainingLoop` and defines the main training loop.
255
+
256
+ As per above, the config structure is parsed automatically from the module hierarchy.
257
+ In particular, for ImplicitronModelFactory with generic model, model parameters are contained in the `model_factory_ImplicitronModelFactory_args.model_GenericModel_args` node, and dataset parameters in `data_source_ImplicitronDataSource_args` node.
258
+
259
+ Here is the class structure of GenericModel (single-line edges show aggregation, while double lines show available implementations):
260
+ ```
261
+ model_GenericModel_args: GenericModel
262
+ └-- global_encoder_*_args: GlobalEncoderBase
263
+ ╘== SequenceAutodecoder
264
+ └-- autodecoder_args: Autodecoder
265
+ ╘== HarmonicTimeEncoder
266
+ └-- raysampler_*_args: RaySampler
267
+ ╘== AdaptiveRaysampler
268
+ ╘== NearFarRaysampler
269
+ └-- renderer_*_args: BaseRenderer
270
+ ╘== MultiPassEmissionAbsorptionRenderer
271
+ ╘== LSTMRenderer
272
+ ╘== SignedDistanceFunctionRenderer
273
+ └-- ray_tracer_args: RayTracing
274
+ └-- ray_normal_coloring_network_args: RayNormalColoringNetwork
275
+ └-- implicit_function_*_args: ImplicitFunctionBase
276
+ ╘== NeuralRadianceFieldImplicitFunction
277
+ ╘== SRNImplicitFunction
278
+ └-- raymarch_function_args: SRNRaymarchFunction
279
+ └-- pixel_generator_args: SRNPixelGenerator
280
+ ╘== SRNHyperNetImplicitFunction
281
+ └-- hypernet_args: SRNRaymarchHyperNet
282
+ └-- pixel_generator_args: SRNPixelGenerator
283
+ ╘== IdrFeatureField
284
+ └-- image_feature_extractor_*_args: FeatureExtractorBase
285
+ ╘== ResNetFeatureExtractor
286
+ └-- view_pooler_args: ViewPooler
287
+ └-- view_sampler_args: ViewSampler
288
+ └-- feature_aggregator_*_args: FeatureAggregatorBase
289
+ ╘== IdentityFeatureAggregator
290
+ ╘== AngleWeightedIdentityFeatureAggregator
291
+ ╘== AngleWeightedReductionFeatureAggregator
292
+ ╘== ReductionFeatureAggregator
293
+ ```
294
+
295
+ Here is the class structure of OverfitModel:
296
+
297
+ ```
298
+ model_OverfitModel_args: OverfitModel
299
+ └-- raysampler_*_args: RaySampler
300
+ ╘== AdaptiveRaysampler
301
+ ╘== NearFarRaysampler
302
+ └-- renderer_*_args: BaseRenderer
303
+ ╘== MultiPassEmissionAbsorptionRenderer
304
+ ╘== LSTMRenderer
305
+ ╘== SignedDistanceFunctionRenderer
306
+ └-- ray_tracer_args: RayTracing
307
+ └-- ray_normal_coloring_network_args: RayNormalColoringNetwork
308
+ └-- implicit_function_*_args: ImplicitFunctionBase
309
+ ╘== NeuralRadianceFieldImplicitFunction
310
+ ╘== SRNImplicitFunction
311
+ └-- raymarch_function_args: SRNRaymarchFunction
312
+ └-- pixel_generator_args: SRNPixelGenerator
313
+ ╘== SRNHyperNetImplicitFunction
314
+ └-- hypernet_args: SRNRaymarchHyperNet
315
+ └-- pixel_generator_args: SRNPixelGenerator
316
+ ╘== IdrFeatureField
317
+ └-- coarse_implicit_function_*_args: ImplicitFunctionBase
318
+ ╘== NeuralRadianceFieldImplicitFunction
319
+ ╘== SRNImplicitFunction
320
+ └-- raymarch_function_args: SRNRaymarchFunction
321
+ └-- pixel_generator_args: SRNPixelGenerator
322
+ ╘== SRNHyperNetImplicitFunction
323
+ └-- hypernet_args: SRNRaymarchHyperNet
324
+ └-- pixel_generator_args: SRNPixelGenerator
325
+ ╘== IdrFeatureField
326
+ ```
327
+
328
+ OverfitModel has been introduced to create a simple class to disantagle Nerfs which the overfit pattern
329
+ from the GenericModel.
330
+
331
+
332
+ Please look at the annotations of the respective classes or functions for the lists of hyperparameters.
333
+ `tests/experiment.yaml` shows every possible option if you have no user-defined classes.
334
+
335
+
336
+ # Implementations of existing methods
337
+
338
+ We provide configuration files that implement several existing works.
339
+
340
+ <b>The configuration files live in `pytorch3d/projects/implicitron_trainer/configs`.</b>
341
+
342
+
343
+ ## NeRF
344
+
345
+ The following config file corresponds to training of a vanilla NeRF on Blender Synthetic dataset
346
+ (see https://arxiv.org/abs/2003.08934 for details of the method):
347
+
348
+ `./configs/repro_singleseq_nerf_blender.yaml`
349
+
350
+
351
+ ### Downloading Blender-Synthetic
352
+ Training requires the Blender Synthetic dataset.
353
+ To download the dataset, visit the [gdrive bucket](https://drive.google.com/file/d/18JxhpWD-4ZmuFKLzKlAw-w5PpzZxXOcG/view?usp=share_link)
354
+ and click Download.
355
+ Then unpack the downloaded .zip file to a folder which we call `<BLENDER_DATASET_ROOT_FOLDER>`.
356
+
357
+
358
+ ### Launching NeRF training
359
+ In order to train NeRF on the "drums" scene, execute the following command:
360
+ ```shell
361
+ export BLENDER_DATASET_ROOT="<BLENDER_DATASET_ROOT_FOLDER>" \
362
+ export BLENDER_SINGLESEQ_CLASS="drums" \
363
+ pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf_blender
364
+ ```
365
+
366
+ Note that the training scene is selected by setting the environment variable `BLENDER_SINGLESEQ_CLASS`
367
+ appropriately (one of `"chair"`, `"drums"`, `"ficus"`, `"hotdog"`, `"lego"`, `"materials"`, `"mic"`, `"ship"`).
368
+
369
+ By default, the training outputs will be stored to `"./data/nerf_blender_repro/$BLENDER_SINGLESEQ_CLASS/"`
370
+
371
+
372
+ ### Visualizing trained NeRF
373
+ ```shell
374
+ pytorch3d_implicitron_visualizer exp_dir=<CHECKPOINT_DIR> \
375
+ visdom_show_preds=True n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
376
+ ```
377
+ where `<CHECKPOINT_DIR>` corresponds to the directory with the training outputs (defaults to `"./data/nerf_blender_repro/$BLENDER_SINGLESEQ_CLASS/"`).
378
+
379
+ The script will output a rendered video of the learned radiance field to `"./data/nerf_blender_repro/$BLENDER_SINGLESEQ_CLASS/"` (requires `ffmpeg`).
380
+
381
+ > **_NOTE:_** Recall that, if `pytorch3d_implicitron_runner`/`pytorch3d_implicitron_visualizer` are not available, replace the calls
382
+ with `cd <pytorch3d_root>/projects/; python -m implicitron_trainer.[experiment|visualize_reconstruction]`
383
+
384
+
385
+ ## CO3D experiments
386
+
387
+ Common Objects in 3D (CO3D) is a large-scale dataset of videos of rigid objects grouped into 50 common categories.
388
+ Implicitron provides implementations and config files to reproduce the results from [the paper](https://arxiv.org/abs/2109.00512).
389
+ Please follow [the link](https://github.com/facebookresearch/co3d#automatic-batch-download) for the instructions to download the dataset.
390
+ In training and evaluation scripts, use the download location as `<DATASET_ROOT>`.
391
+ It is also possible to define environment variable `CO3D_DATASET_ROOT` instead of specifying it.
392
+ To reproduce the experiments from the paper, use the following configs.
393
+
394
+ For single-sequence experiments:
395
+
396
+ | Method | config file |
397
+ |-----------------|-------------------------------------|
398
+ | NeRF | repro_singleseq_nerf.yaml |
399
+ | NeRF + WCE | repro_singleseq_nerf_wce.yaml |
400
+ | NerFormer | repro_singleseq_nerformer.yaml |
401
+ | IDR | repro_singleseq_idr.yaml |
402
+ | SRN | repro_singleseq_srn_noharm.yaml |
403
+ | SRN + γ | repro_singleseq_srn.yaml |
404
+ | SRN + WCE | repro_singleseq_srn_wce_noharm.yaml |
405
+ | SRN + WCE + γ | repro_singleseq_srn_wce_noharm.yaml |
406
+
407
+ For multi-sequence autodecoder experiments (without generalization to new sequences):
408
+
409
+ | Method | config file |
410
+ |-----------------|--------------------------------------------|
411
+ | NeRF + AD | repro_multiseq_nerf_ad.yaml |
412
+ | SRN + AD | repro_multiseq_srn_ad_hypernet_noharm.yaml |
413
+ | SRN + γ + AD | repro_multiseq_srn_ad_hypernet.yaml |
414
+
415
+ For multi-sequence experiments (with generalization to new sequences):
416
+
417
+ | Method | config file |
418
+ |-----------------|--------------------------------------|
419
+ | NeRF + WCE | repro_multiseq_nerf_wce.yaml |
420
+ | NerFormer | repro_multiseq_nerformer.yaml |
421
+ | SRN + WCE | repro_multiseq_srn_wce_noharm.yaml |
422
+ | SRN + WCE + γ | repro_multiseq_srn_wce.yaml |
423
+
424
+
425
+ ## CO3Dv2 experiments
426
+
427
+ The following config files implement training on the second version of CO3D, `CO3Dv2`.
428
+
429
+ In order to launch trainings, set the `CO3DV2_DATASET_ROOT` environment variable
430
+ to the root folder of the dataset (note that the name of the env. variable differs from the CO3Dv1 version).
431
+
432
+ Single-sequence experiments:
433
+
434
+ | Method | config file |
435
+ |-----------------|-------------------------------------|
436
+ | NeRF | repro_singleseq_v2_nerf.yaml |
437
+ | NerFormer | repro_singleseq_v2_nerformer.yaml |
438
+ | IDR | repro_singleseq_v2_idr.yaml |
439
+ | SRN | repro_singleseq_v2_srn_noharm.yaml |
440
+
441
+ Multi-sequence autodecoder experiments (without generalization to new sequences):
442
+
443
+ | Method | config file |
444
+ |-----------------|--------------------------------------------|
445
+ | NeRF + AD | repro_multiseq_v2_nerf_ad.yaml |
446
+ | SRN + γ + AD | repro_multiseq_v2_srn_ad_hypernet.yaml |
447
+
448
+ Multi-sequence experiments (with generalization to new sequences):
449
+
450
+ | Method | config file |
451
+ |-----------------|----------------------------------------|
452
+ | NeRF + WCE | repro_multiseq_v2_nerf_wce.yaml |
453
+ | NerFormer | repro_multiseq_v2_nerformer.yaml |
454
+ | SRN + WCE | repro_multiseq_v2_srn_wce_noharm.yaml |
455
+ | SRN + WCE + γ | repro_multiseq_v2_srn_wce.yaml |
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/experiment.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # pyre-unsafe
9
+
10
+ """"
11
+ This file is the entry point for launching experiments with Implicitron.
12
+
13
+ Launch Training
14
+ ---------------
15
+ Experiment config .yaml files are located in the
16
+ `projects/implicitron_trainer/configs` folder. To launch an experiment,
17
+ specify the name of the file. Specific config values can also be overridden
18
+ from the command line, for example:
19
+
20
+ ```
21
+ ./experiment.py --config-name base_config.yaml override.param.one=42 override.param.two=84
22
+ ```
23
+
24
+ Main functions
25
+ ---------------
26
+ - The Experiment class defines `run` which creates the model, optimizer, and other
27
+ objects used in training, then starts TrainingLoop's `run` function.
28
+ - TrainingLoop takes care of the actual training logic: forward and backward passes,
29
+ evaluation and testing, as well as model checkpointing, visualization, and metric
30
+ printing.
31
+
32
+ Outputs
33
+ --------
34
+ The outputs of the experiment are saved and logged in multiple ways:
35
+ - Checkpoints:
36
+ Model, optimizer and stats are stored in the directory
37
+ named by the `exp_dir` key from the config file / CLI parameters.
38
+ - Stats
39
+ Stats are logged and plotted to the file "train_stats.pdf" in the
40
+ same directory. The stats are also saved as part of the checkpoint file.
41
+ - Visualizations
42
+ Predictions are plotted to a visdom server running at the
43
+ port specified by the `visdom_server` and `visdom_port` keys in the
44
+ config file.
45
+
46
+ """
47
+ import logging
48
+ import os
49
+ import warnings
50
+
51
+ from dataclasses import field
52
+
53
+ import hydra
54
+
55
+ import torch
56
+ from accelerate import Accelerator
57
+ from omegaconf import DictConfig, OmegaConf
58
+ from packaging import version
59
+
60
+ from pytorch3d.implicitron.dataset.data_source import (
61
+ DataSourceBase,
62
+ ImplicitronDataSource,
63
+ )
64
+ from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
65
+
66
+ from pytorch3d.implicitron.models.renderer.multipass_ea import (
67
+ MultiPassEmissionAbsorptionRenderer,
68
+ )
69
+ from pytorch3d.implicitron.models.renderer.ray_sampler import AdaptiveRaySampler
70
+ from pytorch3d.implicitron.tools.config import (
71
+ Configurable,
72
+ expand_args_fields,
73
+ remove_unused_components,
74
+ run_auto_creation,
75
+ )
76
+
77
+ from .impl.model_factory import ModelFactoryBase
78
+ from .impl.optimizer_factory import OptimizerFactoryBase
79
+ from .impl.training_loop import TrainingLoopBase
80
+ from .impl.utils import seed_all_random_engines
81
+
82
+ logger = logging.getLogger(__name__)
83
+
84
+ # workaround for https://github.com/facebookresearch/hydra/issues/2262
85
+ _RUN = hydra.types.RunMode.RUN
86
+
87
+ if version.parse(hydra.__version__) < version.Version("1.1"):
88
+ raise ValueError(
89
+ f"Hydra version {hydra.__version__} is too old."
90
+ " (Implicitron requires version 1.1 or later.)"
91
+ )
92
+
93
+ try:
94
+ # only makes sense in FAIR cluster
95
+ import pytorch3d.implicitron.fair_cluster.slurm # noqa: F401
96
+ except ModuleNotFoundError:
97
+ pass
98
+
99
+ no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
100
+
101
+
102
+ class Experiment(Configurable):
103
+ """
104
+ This class is at the top level of Implicitron's config hierarchy. Its
105
+ members are high-level components necessary for training an implicit rende-
106
+ ring network.
107
+
108
+ Members:
109
+ data_source: An object that produces datasets and dataloaders.
110
+ model_factory: An object that produces an implicit rendering model as
111
+ well as its corresponding Stats object.
112
+ optimizer_factory: An object that produces the optimizer and lr
113
+ scheduler.
114
+ training_loop: An object that runs training given the outputs produced
115
+ by the data_source, model_factory and optimizer_factory.
116
+ seed: A random seed to ensure reproducibility.
117
+ detect_anomaly: Whether torch.autograd should detect anomalies. Useful
118
+ for debugging, but might slow down the training.
119
+ exp_dir: Root experimentation directory. Checkpoints and training stats
120
+ will be saved here.
121
+ """
122
+
123
+ # pyre-fixme[13]: Attribute `data_source` is never initialized.
124
+ data_source: DataSourceBase
125
+ data_source_class_type: str = "ImplicitronDataSource"
126
+ # pyre-fixme[13]: Attribute `model_factory` is never initialized.
127
+ model_factory: ModelFactoryBase
128
+ model_factory_class_type: str = "ImplicitronModelFactory"
129
+ # pyre-fixme[13]: Attribute `optimizer_factory` is never initialized.
130
+ optimizer_factory: OptimizerFactoryBase
131
+ optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
132
+ # pyre-fixme[13]: Attribute `training_loop` is never initialized.
133
+ training_loop: TrainingLoopBase
134
+ training_loop_class_type: str = "ImplicitronTrainingLoop"
135
+
136
+ seed: int = 42
137
+ detect_anomaly: bool = False
138
+ exp_dir: str = "./data/default_experiment/"
139
+
140
+ hydra: dict = field(
141
+ default_factory=lambda: {
142
+ "run": {"dir": "."}, # Make hydra not change the working dir.
143
+ "output_subdir": None, # disable storing the .hydra logs
144
+ "mode": _RUN,
145
+ }
146
+ )
147
+
148
+ def __post_init__(self):
149
+ seed_all_random_engines(
150
+ self.seed
151
+ ) # Set all random engine seeds for reproducibility
152
+
153
+ run_auto_creation(self)
154
+
155
+ def run(self) -> None:
156
+ # Initialize the accelerator if desired.
157
+ if no_accelerate:
158
+ accelerator = None
159
+ device = torch.device("cuda:0")
160
+ else:
161
+ accelerator = Accelerator(device_placement=False)
162
+ logger.info(accelerator.state)
163
+ device = accelerator.device
164
+
165
+ logger.info(f"Running experiment on device: {device}")
166
+ os.makedirs(self.exp_dir, exist_ok=True)
167
+
168
+ # set the debug mode
169
+ if self.detect_anomaly:
170
+ logger.info("Anomaly detection!")
171
+ torch.autograd.set_detect_anomaly(self.detect_anomaly)
172
+
173
+ # Initialize the datasets and dataloaders.
174
+ datasets, dataloaders = self.data_source.get_datasets_and_dataloaders()
175
+
176
+ # Init the model and the corresponding Stats object.
177
+ model = self.model_factory(
178
+ accelerator=accelerator,
179
+ exp_dir=self.exp_dir,
180
+ )
181
+
182
+ stats = self.training_loop.load_stats(
183
+ log_vars=model.log_vars,
184
+ exp_dir=self.exp_dir,
185
+ resume=self.model_factory.resume,
186
+ resume_epoch=self.model_factory.resume_epoch, # pyre-ignore [16]
187
+ )
188
+ start_epoch = stats.epoch + 1
189
+
190
+ model.to(device)
191
+
192
+ # Init the optimizer and LR scheduler.
193
+ optimizer, scheduler = self.optimizer_factory(
194
+ accelerator=accelerator,
195
+ exp_dir=self.exp_dir,
196
+ last_epoch=start_epoch,
197
+ model=model,
198
+ resume=self.model_factory.resume,
199
+ resume_epoch=self.model_factory.resume_epoch,
200
+ )
201
+
202
+ # Wrap all modules in the distributed library
203
+ # Note: we don't pass the scheduler to prepare as it
204
+ # doesn't need to be stepped at each optimizer step
205
+ train_loader = dataloaders.train
206
+ val_loader = dataloaders.val
207
+ test_loader = dataloaders.test
208
+ if accelerator is not None:
209
+ (
210
+ model,
211
+ optimizer,
212
+ train_loader,
213
+ val_loader,
214
+ ) = accelerator.prepare(model, optimizer, train_loader, val_loader)
215
+
216
+ # Enter the main training loop.
217
+ self.training_loop.run(
218
+ train_loader=train_loader,
219
+ val_loader=val_loader,
220
+ test_loader=test_loader,
221
+ # pyre-ignore[6]
222
+ train_dataset=datasets.train,
223
+ model=model,
224
+ optimizer=optimizer,
225
+ scheduler=scheduler,
226
+ accelerator=accelerator,
227
+ device=device,
228
+ exp_dir=self.exp_dir,
229
+ stats=stats,
230
+ seed=self.seed,
231
+ )
232
+
233
+
234
+ def _setup_envvars_for_cluster() -> bool:
235
+ """
236
+ Prepares to run on cluster if relevant.
237
+ Returns whether FAIR cluster in use.
238
+ """
239
+ # TODO: How much of this is needed in general?
240
+
241
+ try:
242
+ import submitit
243
+ except ImportError:
244
+ return False
245
+
246
+ try:
247
+ # Only needed when launching on cluster with slurm and submitit
248
+ job_env = submitit.JobEnvironment()
249
+ except RuntimeError:
250
+ return False
251
+
252
+ os.environ["LOCAL_RANK"] = str(job_env.local_rank)
253
+ os.environ["RANK"] = str(job_env.global_rank)
254
+ os.environ["WORLD_SIZE"] = str(job_env.num_tasks)
255
+ os.environ["MASTER_ADDR"] = "localhost"
256
+ os.environ["MASTER_PORT"] = "42918"
257
+ logger.info(
258
+ "Num tasks %s, global_rank %s"
259
+ % (str(job_env.num_tasks), str(job_env.global_rank))
260
+ )
261
+
262
+ return True
263
+
264
+
265
+ def dump_cfg(cfg: DictConfig) -> None:
266
+ remove_unused_components(cfg)
267
+ # dump the exp config to the exp dir
268
+ os.makedirs(cfg.exp_dir, exist_ok=True)
269
+ try:
270
+ cfg_filename = os.path.join(cfg.exp_dir, "expconfig.yaml")
271
+ OmegaConf.save(config=cfg, f=cfg_filename)
272
+ except PermissionError:
273
+ warnings.warn("Can't dump config due to insufficient permissions!")
274
+
275
+
276
+ expand_args_fields(Experiment)
277
+ cs = hydra.core.config_store.ConfigStore.instance()
278
+ cs.store(name="default_config", node=Experiment)
279
+
280
+
281
+ @hydra.main(config_path="./configs/", config_name="default_config")
282
+ def experiment(cfg: DictConfig) -> None:
283
+ # CUDA_VISIBLE_DEVICES must have been set.
284
+
285
+ if "CUDA_DEVICE_ORDER" not in os.environ:
286
+ os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
287
+
288
+ if not _setup_envvars_for_cluster():
289
+ logger.info("Running locally")
290
+
291
+ # TODO: The following may be needed for hydra/submitit it to work
292
+ expand_args_fields(ImplicitronModelBase)
293
+ expand_args_fields(AdaptiveRaySampler)
294
+ expand_args_fields(MultiPassEmissionAbsorptionRenderer)
295
+ expand_args_fields(ImplicitronDataSource)
296
+
297
+ experiment = Experiment(**cfg)
298
+ dump_cfg(cfg)
299
+ experiment.run()
300
+
301
+
302
+ if __name__ == "__main__":
303
+ experiment()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/model_factory.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import logging
10
+ import os
11
+ from typing import Optional
12
+
13
+ import torch.optim
14
+
15
+ from accelerate import Accelerator
16
+ from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
17
+ from pytorch3d.implicitron.tools import model_io
18
+ from pytorch3d.implicitron.tools.config import (
19
+ registry,
20
+ ReplaceableBase,
21
+ run_auto_creation,
22
+ )
23
+ from pytorch3d.implicitron.tools.stats import Stats
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class ModelFactoryBase(ReplaceableBase):
29
+
30
+ resume: bool = True # resume from the last checkpoint
31
+
32
+ def __call__(self, **kwargs) -> ImplicitronModelBase:
33
+ """
34
+ Initialize the model (possibly from a previously saved state).
35
+
36
+ Returns: An instance of ImplicitronModelBase.
37
+ """
38
+ raise NotImplementedError()
39
+
40
+ def load_stats(self, **kwargs) -> Stats:
41
+ """
42
+ Initialize or load a Stats object.
43
+ """
44
+ raise NotImplementedError()
45
+
46
+
47
+ @registry.register
48
+ class ImplicitronModelFactory(ModelFactoryBase):
49
+ """
50
+ A factory class that initializes an implicit rendering model.
51
+
52
+ Members:
53
+ model: An ImplicitronModelBase object.
54
+ resume: If True, attempt to load the last checkpoint from `exp_dir`
55
+ passed to __call__. Failure to do so will return a model with ini-
56
+ tial weights unless `force_resume` is True.
57
+ resume_epoch: If `resume` is True: Resume a model at this epoch, or if
58
+ `resume_epoch` <= 0, then resume from the latest checkpoint.
59
+ force_resume: If True, throw a FileNotFoundError if `resume` is True but
60
+ a model checkpoint cannot be found.
61
+
62
+ """
63
+
64
+ # pyre-fixme[13]: Attribute `model` is never initialized.
65
+ model: ImplicitronModelBase
66
+ model_class_type: str = "GenericModel"
67
+ resume: bool = True
68
+ resume_epoch: int = -1
69
+ force_resume: bool = False
70
+
71
+ def __post_init__(self):
72
+ run_auto_creation(self)
73
+
74
+ def __call__(
75
+ self,
76
+ exp_dir: str,
77
+ accelerator: Optional[Accelerator] = None,
78
+ ) -> ImplicitronModelBase:
79
+ """
80
+ Returns an instance of `ImplicitronModelBase`, possibly loaded from a
81
+ checkpoint (if self.resume, self.resume_epoch specify so).
82
+
83
+ Args:
84
+ exp_dir: Root experiment directory.
85
+ accelerator: An Accelerator object.
86
+
87
+ Returns:
88
+ model: The model with optionally loaded weights from checkpoint
89
+
90
+ Raise:
91
+ FileNotFoundError if `force_resume` is True but checkpoint not found.
92
+ """
93
+ # Determine the network outputs that should be logged
94
+ if hasattr(self.model, "log_vars"):
95
+ log_vars = list(self.model.log_vars)
96
+ else:
97
+ log_vars = ["objective"]
98
+
99
+ if self.resume_epoch > 0:
100
+ # Resume from a certain epoch
101
+ model_path = model_io.get_checkpoint(exp_dir, self.resume_epoch)
102
+ if not os.path.isfile(model_path):
103
+ raise ValueError(f"Cannot find model from epoch {self.resume_epoch}.")
104
+ else:
105
+ # Retrieve the last checkpoint
106
+ model_path = model_io.find_last_checkpoint(exp_dir)
107
+
108
+ if model_path is not None:
109
+ logger.info(f"Found previous model {model_path}")
110
+ if self.force_resume or self.resume:
111
+ logger.info("Resuming.")
112
+
113
+ map_location = None
114
+ if accelerator is not None and not accelerator.is_local_main_process:
115
+ map_location = {
116
+ "cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
117
+ }
118
+ model_state_dict = torch.load(
119
+ model_io.get_model_path(model_path), map_location=map_location
120
+ )
121
+
122
+ try:
123
+ self.model.load_state_dict(model_state_dict, strict=True)
124
+ except RuntimeError as e:
125
+ logger.error(e)
126
+ logger.info(
127
+ "Cannot load state dict in strict mode! -> trying non-strict"
128
+ )
129
+ self.model.load_state_dict(model_state_dict, strict=False)
130
+ self.model.log_vars = log_vars
131
+ else:
132
+ logger.info("Not resuming -> starting from scratch.")
133
+ elif self.force_resume:
134
+ raise FileNotFoundError(f"Cannot find a checkpoint in {exp_dir}!")
135
+
136
+ return self.model
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/optimizer_factory.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import inspect
10
+ import logging
11
+ import os
12
+ from collections import defaultdict
13
+ from dataclasses import field
14
+ from typing import Any, Dict, List, Optional, Tuple
15
+
16
+ import torch.optim
17
+
18
+ from accelerate import Accelerator
19
+
20
+ from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
21
+ from pytorch3d.implicitron.tools import model_io
22
+ from pytorch3d.implicitron.tools.config import (
23
+ registry,
24
+ ReplaceableBase,
25
+ run_auto_creation,
26
+ )
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class OptimizerFactoryBase(ReplaceableBase):
32
+ def __call__(
33
+ self, model: ImplicitronModelBase, **kwargs
34
+ ) -> Tuple[torch.optim.Optimizer, Any]:
35
+ """
36
+ Initialize the optimizer and lr scheduler.
37
+
38
+ Args:
39
+ model: The model with optionally loaded weights.
40
+
41
+ Returns:
42
+ An optimizer module (optionally loaded from a checkpoint) and
43
+ a learning rate scheduler module (should be a subclass of torch.optim's
44
+ lr_scheduler._LRScheduler).
45
+ """
46
+ raise NotImplementedError()
47
+
48
+
49
+ @registry.register
50
+ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
51
+ """
52
+ A factory that initializes the optimizer and lr scheduler.
53
+
54
+ Members:
55
+ betas: Beta parameters for the Adam optimizer.
56
+ breed: The type of optimizer to use. We currently support SGD, Adagrad
57
+ and Adam.
58
+ exponential_lr_step_size: With Exponential policy only,
59
+ lr = lr * gamma ** (epoch/step_size)
60
+ gamma: Multiplicative factor of learning rate decay.
61
+ lr: The value for the initial learning rate.
62
+ lr_policy: The policy to use for learning rate. We currently support
63
+ MultiStepLR and Exponential policies.
64
+ momentum: A momentum value (for SGD only).
65
+ multistep_lr_milestones: With MultiStepLR policy only: list of
66
+ increasing epoch indices at which the learning rate is modified.
67
+ momentum: Momentum factor for SGD optimizer.
68
+ weight_decay: The optimizer weight_decay (L2 penalty on model weights).
69
+ foreach: Whether to use new "foreach" implementation of optimizer where
70
+ available (e.g. requires PyTorch 1.12.0 for Adam)
71
+ group_learning_rates: Parameters or modules can be assigned to parameter
72
+ groups. This dictionary has names of those parameter groups as keys
73
+ and learning rates as values. All parameter group names have to be
74
+ defined in this dictionary. Parameters which do not have predefined
75
+ parameter group are put into "default" parameter group which has
76
+ `lr` as its learning rate.
77
+ """
78
+
79
+ betas: Tuple[float, ...] = (0.9, 0.999)
80
+ breed: str = "Adam"
81
+ exponential_lr_step_size: int = 250
82
+ gamma: float = 0.1
83
+ lr: float = 0.0005
84
+ lr_policy: str = "MultiStepLR"
85
+ momentum: float = 0.9
86
+ multistep_lr_milestones: tuple = ()
87
+ weight_decay: float = 0.0
88
+ linear_exponential_lr_milestone: int = 200
89
+ linear_exponential_start_gamma: float = 0.1
90
+ foreach: Optional[bool] = True
91
+ group_learning_rates: Dict[str, float] = field(default_factory=lambda: {})
92
+
93
+ def __post_init__(self):
94
+ run_auto_creation(self)
95
+
96
+ def __call__(
97
+ self,
98
+ last_epoch: int,
99
+ model: ImplicitronModelBase,
100
+ accelerator: Optional[Accelerator] = None,
101
+ exp_dir: Optional[str] = None,
102
+ resume: bool = True,
103
+ resume_epoch: int = -1,
104
+ **kwargs,
105
+ ) -> Tuple[torch.optim.Optimizer, Any]:
106
+ """
107
+ Initialize the optimizer (optionally from a checkpoint) and the lr scheduluer.
108
+
109
+ Args:
110
+ last_epoch: If the model was loaded from checkpoint this will be the
111
+ number of the last epoch that was saved.
112
+ model: The model with optionally loaded weights.
113
+ accelerator: An optional Accelerator instance.
114
+ exp_dir: Root experiment directory.
115
+ resume: If True, attempt to load optimizer checkpoint from exp_dir.
116
+ Failure to do so will return a newly initialized optimizer.
117
+ resume_epoch: If `resume` is True: Resume optimizer at this epoch. If
118
+ `resume_epoch` <= 0, then resume from the latest checkpoint.
119
+ Returns:
120
+ An optimizer module (optionally loaded from a checkpoint) and
121
+ a learning rate scheduler module (should be a subclass of torch.optim's
122
+ lr_scheduler._LRScheduler).
123
+ """
124
+ # Get the parameters to optimize
125
+ if hasattr(model, "_get_param_groups"): # use the model function
126
+ p_groups = model._get_param_groups(self.lr, wd=self.weight_decay)
127
+ else:
128
+ p_groups = [
129
+ {"params": params, "lr": self._get_group_learning_rate(group)}
130
+ for group, params in self._get_param_groups(model).items()
131
+ ]
132
+
133
+ # Intialize the optimizer
134
+ optimizer_kwargs: Dict[str, Any] = {
135
+ "lr": self.lr,
136
+ "weight_decay": self.weight_decay,
137
+ }
138
+ if self.breed == "SGD":
139
+ optimizer_class = torch.optim.SGD
140
+ optimizer_kwargs["momentum"] = self.momentum
141
+ elif self.breed == "Adagrad":
142
+ optimizer_class = torch.optim.Adagrad
143
+ elif self.breed == "Adam":
144
+ optimizer_class = torch.optim.Adam
145
+ optimizer_kwargs["betas"] = self.betas
146
+ else:
147
+ raise ValueError(f"No such solver type {self.breed}")
148
+
149
+ if "foreach" in inspect.signature(optimizer_class.__init__).parameters:
150
+ optimizer_kwargs["foreach"] = self.foreach
151
+ optimizer = optimizer_class(p_groups, **optimizer_kwargs)
152
+ logger.info(f"Solver type = {self.breed}")
153
+
154
+ # Load state from checkpoint
155
+ optimizer_state = self._get_optimizer_state(
156
+ exp_dir,
157
+ accelerator,
158
+ resume_epoch=resume_epoch,
159
+ resume=resume,
160
+ )
161
+ if optimizer_state is not None:
162
+ logger.info("Setting loaded optimizer state.")
163
+ optimizer.load_state_dict(optimizer_state)
164
+
165
+ # Initialize the learning rate scheduler
166
+ if self.lr_policy.casefold() == "MultiStepLR".casefold():
167
+ scheduler = torch.optim.lr_scheduler.MultiStepLR(
168
+ optimizer,
169
+ milestones=self.multistep_lr_milestones,
170
+ gamma=self.gamma,
171
+ )
172
+ elif self.lr_policy.casefold() == "Exponential".casefold():
173
+ scheduler = torch.optim.lr_scheduler.LambdaLR(
174
+ optimizer,
175
+ lambda epoch: self.gamma ** (epoch / self.exponential_lr_step_size),
176
+ verbose=False,
177
+ )
178
+ elif self.lr_policy.casefold() == "LinearExponential".casefold():
179
+ # linear learning rate progression between epochs 0 to
180
+ # self.linear_exponential_lr_milestone, followed by exponential
181
+ # lr decay for the rest of the epochs
182
+ def _get_lr(epoch: int):
183
+ m = self.linear_exponential_lr_milestone
184
+ if epoch < m:
185
+ w = (m - epoch) / m
186
+ gamma = w * self.linear_exponential_start_gamma + (1 - w)
187
+ else:
188
+ epoch_rest = epoch - m
189
+ gamma = self.gamma ** (epoch_rest / self.exponential_lr_step_size)
190
+ return gamma
191
+
192
+ scheduler = torch.optim.lr_scheduler.LambdaLR(
193
+ optimizer, _get_lr, verbose=False
194
+ )
195
+ else:
196
+ raise ValueError("no such lr policy %s" % self.lr_policy)
197
+
198
+ # When loading from checkpoint, this will make sure that the
199
+ # lr is correctly set even after returning.
200
+ for _ in range(last_epoch):
201
+ scheduler.step()
202
+
203
+ optimizer.zero_grad()
204
+
205
+ return optimizer, scheduler
206
+
207
+ def _get_optimizer_state(
208
+ self,
209
+ exp_dir: Optional[str],
210
+ accelerator: Optional[Accelerator] = None,
211
+ resume: bool = True,
212
+ resume_epoch: int = -1,
213
+ ) -> Optional[Dict[str, Any]]:
214
+ """
215
+ Load an optimizer state from a checkpoint.
216
+
217
+ resume: If True, attempt to load the last checkpoint from `exp_dir`
218
+ passed to __call__. Failure to do so will return a newly initialized
219
+ optimizer.
220
+ resume_epoch: If `resume` is True: Resume optimizer at this epoch. If
221
+ `resume_epoch` <= 0, then resume from the latest checkpoint.
222
+ """
223
+ if exp_dir is None or not resume:
224
+ return None
225
+ if resume_epoch > 0:
226
+ save_path = model_io.get_checkpoint(exp_dir, resume_epoch)
227
+ if not os.path.isfile(save_path):
228
+ raise FileNotFoundError(
229
+ f"Cannot find optimizer from epoch {resume_epoch}."
230
+ )
231
+ else:
232
+ save_path = model_io.find_last_checkpoint(exp_dir)
233
+ optimizer_state = None
234
+ if save_path is not None:
235
+ logger.info(f"Found previous optimizer state {save_path} -> resuming.")
236
+ opt_path = model_io.get_optimizer_path(save_path)
237
+
238
+ if os.path.isfile(opt_path):
239
+ map_location = None
240
+ if accelerator is not None and not accelerator.is_local_main_process:
241
+ map_location = {
242
+ "cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
243
+ }
244
+ optimizer_state = torch.load(opt_path, map_location)
245
+ else:
246
+ raise FileNotFoundError(f"Optimizer state {opt_path} does not exist.")
247
+ return optimizer_state
248
+
249
+ def _get_param_groups(
250
+ self, module: torch.nn.Module
251
+ ) -> Dict[str, List[torch.nn.Parameter]]:
252
+ """
253
+ Recursively visits all the modules inside the `module` and sorts all the
254
+ parameters in parameter groups.
255
+
256
+ Uses `param_groups` dictionary member, where keys are names of individual
257
+ parameters or module members and values are the names of the parameter groups
258
+ for those parameters or members. "self" key is used to denote the parameter groups
259
+ at the module level. Possible keys, including the "self" key do not have to
260
+ be defined. By default all parameters have the learning rate defined in the
261
+ optimizer. This can be overridden by setting the parameter group in `param_groups`
262
+ member of a specific module. Values are a parameter group name. The keys
263
+ specify what parameters will be affected as follows:
264
+ - “self”: All the parameters of the module and its child modules
265
+ - name of a parameter: A parameter with that name.
266
+ - name of a module member: All the parameters of the module and its
267
+ child modules.
268
+ This is useful if members do not have `param_groups`, for
269
+ example torch.nn.Linear.
270
+ - <name of module member>.<something>: recursive. Same as if <something>
271
+ was used in param_groups of that submodule/member.
272
+
273
+ Args:
274
+ module: module from which to extract the parameters and their parameter
275
+ groups
276
+ Returns:
277
+ dictionary with parameter groups as keys and lists of parameters as values
278
+ """
279
+
280
+ param_groups = defaultdict(list)
281
+
282
+ def traverse(module, default_group: str, mapping: Dict[str, str]) -> None:
283
+ """
284
+ Visitor for module to assign its parameters to the relevant member of
285
+ param_groups.
286
+
287
+ Args:
288
+ module: the module being visited in a depth-first search
289
+ default_group: the param group to assign parameters to unless
290
+ otherwise overriden.
291
+ mapping: known mappings of parameters to groups for this module,
292
+ destructively modified by this function.
293
+ """
294
+ # If key self is defined in param_groups then chenge the default param
295
+ # group for all parameters and children in the module.
296
+ if hasattr(module, "param_groups") and "self" in module.param_groups:
297
+ default_group = module.param_groups["self"]
298
+
299
+ # Collect all the parameters that are directly inside the `module`,
300
+ # they will be in the default param group if they don't have
301
+ # defined group.
302
+ if hasattr(module, "param_groups"):
303
+ mapping.update(module.param_groups)
304
+
305
+ for name, param in module.named_parameters(recurse=False):
306
+ if param.requires_grad:
307
+ group_name = mapping.get(name, default_group)
308
+ logger.debug(f"Assigning {name} to param_group {group_name}")
309
+ param_groups[group_name].append(param)
310
+
311
+ # If children have defined default param group then use it else pass
312
+ # own default.
313
+ for child_name, child in module.named_children():
314
+ mapping_to_add = {
315
+ name[len(child_name) + 1 :]: group
316
+ for name, group in mapping.items()
317
+ if name.startswith(child_name + ".")
318
+ }
319
+ traverse(child, mapping.get(child_name, default_group), mapping_to_add)
320
+
321
+ traverse(module, "default", {})
322
+ return param_groups
323
+
324
+ def _get_group_learning_rate(self, group_name: str) -> float:
325
+ """
326
+ Wraps the `group_learning_rates` dictionary providing errors and returns
327
+ `self.lr` for "default" group_name.
328
+
329
+ Args:
330
+ group_name: a string representing the name of the group
331
+ Returns:
332
+ learning rate for a specific group
333
+ """
334
+ if group_name == "default":
335
+ return self.lr
336
+ lr = self.group_learning_rates.get(group_name, None)
337
+ if lr is None:
338
+ raise ValueError(f"no learning rate given for group {group_name}")
339
+ return lr
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/training_loop.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import logging
10
+ import os
11
+ import time
12
+ from typing import Any, List, Optional
13
+
14
+ import torch
15
+ from accelerate import Accelerator
16
+ from pytorch3d.implicitron.evaluation.evaluator import EvaluatorBase
17
+ from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
18
+ from pytorch3d.implicitron.models.generic_model import EvaluationMode
19
+ from pytorch3d.implicitron.tools import model_io, vis_utils
20
+ from pytorch3d.implicitron.tools.config import (
21
+ registry,
22
+ ReplaceableBase,
23
+ run_auto_creation,
24
+ )
25
+ from pytorch3d.implicitron.tools.stats import Stats
26
+ from torch.utils.data import DataLoader, Dataset
27
+
28
+ from .utils import seed_all_random_engines
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class TrainingLoopBase(ReplaceableBase):
34
+ """
35
+ Members:
36
+ evaluator: An EvaluatorBase instance, used to evaluate training results.
37
+ """
38
+
39
+ # pyre-fixme[13]: Attribute `evaluator` is never initialized.
40
+ evaluator: Optional[EvaluatorBase]
41
+ evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
42
+
43
+ def run(
44
+ self,
45
+ train_loader: DataLoader,
46
+ val_loader: Optional[DataLoader],
47
+ test_loader: Optional[DataLoader],
48
+ train_dataset: Dataset,
49
+ model: ImplicitronModelBase,
50
+ optimizer: torch.optim.Optimizer,
51
+ scheduler: Any,
52
+ **kwargs,
53
+ ) -> None:
54
+ raise NotImplementedError()
55
+
56
+ def load_stats(
57
+ self,
58
+ log_vars: List[str],
59
+ exp_dir: str,
60
+ resume: bool = True,
61
+ resume_epoch: int = -1,
62
+ **kwargs,
63
+ ) -> Stats:
64
+ raise NotImplementedError()
65
+
66
+
67
+ @registry.register
68
+ class ImplicitronTrainingLoop(TrainingLoopBase):
69
+ """
70
+ Members:
71
+ eval_only: If True, only run evaluation using the test dataloader.
72
+ max_epochs: Train for this many epochs. Note that if the model was
73
+ loaded from a checkpoint, we will restart training at the appropriate
74
+ epoch and run for (max_epochs - checkpoint_epoch) epochs.
75
+ store_checkpoints: If True, store model and optimizer state checkpoints.
76
+ store_checkpoints_purge: If >= 0, remove any checkpoints older or equal
77
+ to this many epochs.
78
+ test_interval: Evaluate on a test dataloader each `test_interval` epochs.
79
+ test_when_finished: If True, evaluate on a test dataloader when training
80
+ completes.
81
+ validation_interval: Validate each `validation_interval` epochs.
82
+ clip_grad: Optionally clip the gradient norms.
83
+ If set to a value <=0.0, no clipping
84
+ metric_print_interval: The batch interval at which the stats should be
85
+ logged.
86
+ visualize_interval: The batch interval at which the visualizations
87
+ should be plotted
88
+ visdom_env: The name of the Visdom environment to use for plotting.
89
+ visdom_port: The Visdom port.
90
+ visdom_server: Address of the Visdom server.
91
+ """
92
+
93
+ # Parameters of the outer training loop.
94
+ eval_only: bool = False
95
+ max_epochs: int = 1000
96
+ store_checkpoints: bool = True
97
+ store_checkpoints_purge: int = 1
98
+ test_interval: int = -1
99
+ test_when_finished: bool = False
100
+ validation_interval: int = 1
101
+
102
+ # Gradient clipping.
103
+ clip_grad: float = 0.0
104
+
105
+ # Visualization/logging parameters.
106
+ metric_print_interval: int = 5
107
+ visualize_interval: int = 1000
108
+ visdom_env: str = ""
109
+ visdom_port: int = int(os.environ.get("VISDOM_PORT", 8097))
110
+ visdom_server: str = "http://127.0.0.1"
111
+
112
+ def __post_init__(self):
113
+ run_auto_creation(self)
114
+
115
+ # pyre-fixme[14]: `run` overrides method defined in `TrainingLoopBase`
116
+ # inconsistently.
117
+ def run(
118
+ self,
119
+ *,
120
+ train_loader: DataLoader,
121
+ val_loader: Optional[DataLoader],
122
+ test_loader: Optional[DataLoader],
123
+ train_dataset: Dataset,
124
+ model: ImplicitronModelBase,
125
+ optimizer: torch.optim.Optimizer,
126
+ scheduler: Any,
127
+ accelerator: Optional[Accelerator],
128
+ device: torch.device,
129
+ exp_dir: str,
130
+ stats: Stats,
131
+ seed: int,
132
+ **kwargs,
133
+ ):
134
+ """
135
+ Entry point to run the training and validation loops
136
+ based on the specified config file.
137
+ """
138
+ start_epoch = stats.epoch + 1
139
+ assert scheduler.last_epoch == stats.epoch + 1
140
+ assert scheduler.last_epoch == start_epoch
141
+
142
+ # only run evaluation on the test dataloader
143
+ if self.eval_only:
144
+ if test_loader is not None:
145
+ # pyre-fixme[16]: `Optional` has no attribute `run`.
146
+ self.evaluator.run(
147
+ dataloader=test_loader,
148
+ device=device,
149
+ dump_to_json=True,
150
+ epoch=stats.epoch,
151
+ exp_dir=exp_dir,
152
+ model=model,
153
+ )
154
+ return
155
+ else:
156
+ raise ValueError(
157
+ "Cannot evaluate and dump results to json, no test data provided."
158
+ )
159
+
160
+ # loop through epochs
161
+ for epoch in range(start_epoch, self.max_epochs):
162
+ # automatic new_epoch and plotting of stats at every epoch start
163
+ with stats:
164
+
165
+ # Make sure to re-seed random generators to ensure reproducibility
166
+ # even after restart.
167
+ seed_all_random_engines(seed + epoch)
168
+
169
+ cur_lr = float(scheduler.get_last_lr()[-1])
170
+ logger.debug(f"scheduler lr = {cur_lr:1.2e}")
171
+
172
+ # train loop
173
+ self._training_or_validation_epoch(
174
+ accelerator=accelerator,
175
+ device=device,
176
+ epoch=epoch,
177
+ loader=train_loader,
178
+ model=model,
179
+ optimizer=optimizer,
180
+ stats=stats,
181
+ validation=False,
182
+ )
183
+
184
+ # val loop (optional)
185
+ if val_loader is not None and epoch % self.validation_interval == 0:
186
+ self._training_or_validation_epoch(
187
+ accelerator=accelerator,
188
+ device=device,
189
+ epoch=epoch,
190
+ loader=val_loader,
191
+ model=model,
192
+ optimizer=optimizer,
193
+ stats=stats,
194
+ validation=True,
195
+ )
196
+
197
+ # eval loop (optional)
198
+ if (
199
+ test_loader is not None
200
+ and self.test_interval > 0
201
+ and epoch % self.test_interval == 0
202
+ ):
203
+ self.evaluator.run(
204
+ device=device,
205
+ dataloader=test_loader,
206
+ model=model,
207
+ )
208
+
209
+ assert stats.epoch == epoch, "inconsistent stats!"
210
+ self._checkpoint(accelerator, epoch, exp_dir, model, optimizer, stats)
211
+
212
+ scheduler.step()
213
+ new_lr = float(scheduler.get_last_lr()[-1])
214
+ if new_lr != cur_lr:
215
+ logger.info(f"LR change! {cur_lr} -> {new_lr}")
216
+
217
+ if self.test_when_finished:
218
+ if test_loader is not None:
219
+ self.evaluator.run(
220
+ device=device,
221
+ dump_to_json=True,
222
+ epoch=stats.epoch,
223
+ exp_dir=exp_dir,
224
+ dataloader=test_loader,
225
+ model=model,
226
+ )
227
+ else:
228
+ raise ValueError(
229
+ "Cannot evaluate and dump results to json, no test data provided."
230
+ )
231
+
232
+ def load_stats(
233
+ self,
234
+ log_vars: List[str],
235
+ exp_dir: str,
236
+ resume: bool = True,
237
+ resume_epoch: int = -1,
238
+ **kwargs,
239
+ ) -> Stats:
240
+ """
241
+ Load Stats that correspond to the model's log_vars and resume_epoch.
242
+
243
+ Args:
244
+ log_vars: A list of variable names to log. Should be a subset of the
245
+ `preds` returned by the forward function of the corresponding
246
+ ImplicitronModelBase instance.
247
+ exp_dir: Root experiment directory.
248
+ resume: If False, do not load stats from the checkpoint speci-
249
+ fied by resume and resume_epoch; instead, create a fresh stats object.
250
+
251
+ stats: The stats structure (optionally loaded from checkpoint)
252
+ """
253
+ # Init the stats struct
254
+ visdom_env_charts = (
255
+ vis_utils.get_visdom_env(self.visdom_env, exp_dir) + "_charts"
256
+ )
257
+ stats = Stats(
258
+ # log_vars should be a list, but OmegaConf might load them as ListConfig
259
+ list(log_vars),
260
+ plot_file=os.path.join(exp_dir, "train_stats.pdf"),
261
+ visdom_env=visdom_env_charts,
262
+ visdom_server=self.visdom_server,
263
+ visdom_port=self.visdom_port,
264
+ )
265
+
266
+ model_path = None
267
+ if resume:
268
+ if resume_epoch > 0:
269
+ model_path = model_io.get_checkpoint(exp_dir, resume_epoch)
270
+ if not os.path.isfile(model_path):
271
+ raise FileNotFoundError(
272
+ f"Cannot find stats from epoch {resume_epoch}."
273
+ )
274
+ else:
275
+ model_path = model_io.find_last_checkpoint(exp_dir)
276
+
277
+ if model_path is not None:
278
+ stats_path = model_io.get_stats_path(model_path)
279
+ stats_load = model_io.load_stats(stats_path)
280
+
281
+ # Determine if stats should be reset
282
+ if resume:
283
+ if stats_load is None:
284
+ logger.warning("\n\n\n\nCORRUPT STATS -> clearing stats\n\n\n\n")
285
+ last_epoch = model_io.parse_epoch_from_model_path(model_path)
286
+ logger.info(f"Estimated resume epoch = {last_epoch}")
287
+
288
+ # Reset the stats struct
289
+ for _ in range(last_epoch + 1):
290
+ stats.new_epoch()
291
+ assert last_epoch == stats.epoch
292
+ else:
293
+ logger.info(f"Found previous stats in {stats_path} -> resuming.")
294
+ stats = stats_load
295
+
296
+ # Update stats properties incase it was reset on load
297
+ stats.visdom_env = visdom_env_charts
298
+ stats.visdom_server = self.visdom_server
299
+ stats.visdom_port = self.visdom_port
300
+ stats.plot_file = os.path.join(exp_dir, "train_stats.pdf")
301
+ stats.synchronize_logged_vars(log_vars)
302
+ else:
303
+ logger.info("Clearing stats")
304
+
305
+ return stats
306
+
307
+ def _training_or_validation_epoch(
308
+ self,
309
+ epoch: int,
310
+ loader: DataLoader,
311
+ model: ImplicitronModelBase,
312
+ optimizer: torch.optim.Optimizer,
313
+ stats: Stats,
314
+ validation: bool,
315
+ *,
316
+ accelerator: Optional[Accelerator],
317
+ bp_var: str = "objective",
318
+ device: torch.device,
319
+ **kwargs,
320
+ ) -> None:
321
+ """
322
+ This is the main loop for training and evaluation including:
323
+ model forward pass, loss computation, backward pass and visualization.
324
+
325
+ Args:
326
+ epoch: The index of the current epoch
327
+ loader: The dataloader to use for the loop
328
+ model: The model module optionally loaded from checkpoint
329
+ optimizer: The optimizer module optionally loaded from checkpoint
330
+ stats: The stats struct, also optionally loaded from checkpoint
331
+ validation: If true, run the loop with the model in eval mode
332
+ and skip the backward pass
333
+ accelerator: An optional Accelerator instance.
334
+ bp_var: The name of the key in the model output `preds` dict which
335
+ should be used as the loss for the backward pass.
336
+ device: The device on which to run the model.
337
+ """
338
+
339
+ if validation:
340
+ model.eval()
341
+ trainmode = "val"
342
+ else:
343
+ model.train()
344
+ trainmode = "train"
345
+
346
+ t_start = time.time()
347
+
348
+ # get the visdom env name
349
+ visdom_env_imgs = stats.visdom_env + "_images_" + trainmode
350
+ viz = vis_utils.get_visdom_connection(
351
+ server=stats.visdom_server,
352
+ port=stats.visdom_port,
353
+ )
354
+
355
+ # Iterate through the batches
356
+ n_batches = len(loader)
357
+ for it, net_input in enumerate(loader):
358
+ last_iter = it == n_batches - 1
359
+
360
+ # move to gpu where possible (in place)
361
+ net_input = net_input.to(device)
362
+
363
+ # run the forward pass
364
+ if not validation:
365
+ optimizer.zero_grad()
366
+ preds = model(
367
+ **{**net_input, "evaluation_mode": EvaluationMode.TRAINING}
368
+ )
369
+ else:
370
+ with torch.no_grad():
371
+ preds = model(
372
+ **{**net_input, "evaluation_mode": EvaluationMode.EVALUATION}
373
+ )
374
+
375
+ # make sure we dont overwrite something
376
+ assert all(k not in preds for k in net_input.keys())
377
+ # merge everything into one big dict
378
+ preds.update(net_input)
379
+
380
+ # update the stats logger
381
+ stats.update(preds, time_start=t_start, stat_set=trainmode)
382
+ # pyre-ignore [16]
383
+ assert stats.it[trainmode] == it, "inconsistent stat iteration number!"
384
+
385
+ # print textual status update
386
+ if it % self.metric_print_interval == 0 or last_iter:
387
+ std_out = stats.get_status_string(stat_set=trainmode, max_it=n_batches)
388
+ logger.info(std_out)
389
+
390
+ # visualize results
391
+ if (
392
+ (accelerator is None or accelerator.is_local_main_process)
393
+ and self.visualize_interval > 0
394
+ and it % self.visualize_interval == 0
395
+ ):
396
+ prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
397
+ if hasattr(model, "visualize"):
398
+ model.visualize(
399
+ viz,
400
+ visdom_env_imgs,
401
+ preds,
402
+ prefix,
403
+ )
404
+
405
+ # optimizer step
406
+ if not validation:
407
+ loss = preds[bp_var]
408
+ assert torch.isfinite(loss).all(), "Non-finite loss!"
409
+ # backprop
410
+ if accelerator is None:
411
+ loss.backward()
412
+ else:
413
+ accelerator.backward(loss)
414
+ if self.clip_grad > 0.0:
415
+ # Optionally clip the gradient norms.
416
+ total_norm = torch.nn.utils.clip_grad_norm(
417
+ model.parameters(), self.clip_grad
418
+ )
419
+ if total_norm > self.clip_grad:
420
+ logger.debug(
421
+ f"Clipping gradient: {total_norm}"
422
+ + f" with coef {self.clip_grad / float(total_norm)}."
423
+ )
424
+
425
+ optimizer.step()
426
+
427
+ def _checkpoint(
428
+ self,
429
+ accelerator: Optional[Accelerator],
430
+ epoch: int,
431
+ exp_dir: str,
432
+ model: ImplicitronModelBase,
433
+ optimizer: torch.optim.Optimizer,
434
+ stats: Stats,
435
+ ):
436
+ """
437
+ Save a model and its corresponding Stats object to a file, if
438
+ `self.store_checkpoints` is True. In addition, if
439
+ `self.store_checkpoints_purge` is True, remove any checkpoints older
440
+ than `self.store_checkpoints_purge` epochs old.
441
+ """
442
+ if self.store_checkpoints and (
443
+ accelerator is None or accelerator.is_local_main_process
444
+ ):
445
+ if self.store_checkpoints_purge > 0:
446
+ for prev_epoch in range(epoch - self.store_checkpoints_purge):
447
+ model_io.purge_epoch(exp_dir, prev_epoch)
448
+ outfile = model_io.get_checkpoint(exp_dir, epoch)
449
+ unwrapped_model = (
450
+ model if accelerator is None else accelerator.unwrap_model(model)
451
+ )
452
+ model_io.safe_save_model(
453
+ unwrapped_model, stats, outfile, optimizer=optimizer
454
+ )
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/impl/utils.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ import random
11
+
12
+ import numpy as np
13
+ import torch
14
+
15
+
16
+ def seed_all_random_engines(seed: int) -> None:
17
+ np.random.seed(seed)
18
+ torch.manual_seed(seed)
19
+ random.seed(seed)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/experiment.yaml ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data_source_class_type: ImplicitronDataSource
2
+ model_factory_class_type: ImplicitronModelFactory
3
+ optimizer_factory_class_type: ImplicitronOptimizerFactory
4
+ training_loop_class_type: ImplicitronTrainingLoop
5
+ seed: 42
6
+ detect_anomaly: false
7
+ exp_dir: ./data/default_experiment/
8
+ hydra:
9
+ run:
10
+ dir: .
11
+ output_subdir: null
12
+ mode: RUN
13
+ data_source_ImplicitronDataSource_args:
14
+ dataset_map_provider_class_type: ???
15
+ data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
16
+ dataset_map_provider_BlenderDatasetMapProvider_args:
17
+ base_dir: ???
18
+ object_name: ???
19
+ path_manager_factory_class_type: PathManagerFactory
20
+ n_known_frames_for_test: null
21
+ path_manager_factory_PathManagerFactory_args:
22
+ silence_logs: true
23
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
24
+ category: ???
25
+ task_str: singlesequence
26
+ dataset_root: ''
27
+ n_frames_per_sequence: -1
28
+ test_on_train: false
29
+ restrict_sequence_name: []
30
+ test_restrict_sequence_id: -1
31
+ assert_single_seq: false
32
+ only_test_set: false
33
+ dataset_class_type: JsonIndexDataset
34
+ path_manager_factory_class_type: PathManagerFactory
35
+ dataset_JsonIndexDataset_args:
36
+ limit_to: 0
37
+ limit_sequences_to: 0
38
+ exclude_sequence: []
39
+ limit_category_to: []
40
+ load_images: true
41
+ load_depths: true
42
+ load_depth_masks: true
43
+ load_masks: true
44
+ load_point_clouds: false
45
+ max_points: 0
46
+ mask_images: false
47
+ mask_depths: false
48
+ image_height: 800
49
+ image_width: 800
50
+ box_crop: true
51
+ box_crop_mask_thr: 0.4
52
+ box_crop_context: 0.3
53
+ remove_empty_masks: true
54
+ seed: 0
55
+ sort_frames: false
56
+ path_manager_factory_PathManagerFactory_args:
57
+ silence_logs: true
58
+ dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
59
+ category: ???
60
+ subset_name: ???
61
+ dataset_root: ''
62
+ test_on_train: false
63
+ only_test_set: false
64
+ load_eval_batches: true
65
+ num_load_workers: 4
66
+ n_known_frames_for_test: 0
67
+ dataset_class_type: JsonIndexDataset
68
+ path_manager_factory_class_type: PathManagerFactory
69
+ dataset_JsonIndexDataset_args:
70
+ limit_to: 0
71
+ limit_sequences_to: 0
72
+ pick_sequence: []
73
+ exclude_sequence: []
74
+ limit_category_to: []
75
+ load_images: true
76
+ load_depths: true
77
+ load_depth_masks: true
78
+ load_masks: true
79
+ load_point_clouds: false
80
+ max_points: 0
81
+ mask_images: false
82
+ mask_depths: false
83
+ image_height: 800
84
+ image_width: 800
85
+ box_crop: true
86
+ box_crop_mask_thr: 0.4
87
+ box_crop_context: 0.3
88
+ remove_empty_masks: true
89
+ n_frames_per_sequence: -1
90
+ seed: 0
91
+ sort_frames: false
92
+ path_manager_factory_PathManagerFactory_args:
93
+ silence_logs: true
94
+ dataset_map_provider_LlffDatasetMapProvider_args:
95
+ base_dir: ???
96
+ object_name: ???
97
+ path_manager_factory_class_type: PathManagerFactory
98
+ n_known_frames_for_test: null
99
+ path_manager_factory_PathManagerFactory_args:
100
+ silence_logs: true
101
+ downscale_factor: 4
102
+ dataset_map_provider_RenderedMeshDatasetMapProvider_args:
103
+ num_views: 40
104
+ data_file: null
105
+ azimuth_range: 180.0
106
+ distance: 2.7
107
+ resolution: 128
108
+ use_point_light: true
109
+ gpu_idx: 0
110
+ path_manager_factory_class_type: PathManagerFactory
111
+ path_manager_factory_PathManagerFactory_args:
112
+ silence_logs: true
113
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
114
+ batch_size: 1
115
+ num_workers: 0
116
+ dataset_length_train: 0
117
+ dataset_length_val: 0
118
+ dataset_length_test: 0
119
+ train_conditioning_type: SAME
120
+ val_conditioning_type: SAME
121
+ test_conditioning_type: KNOWN
122
+ images_per_seq_options: []
123
+ sample_consecutive_frames: false
124
+ consecutive_frames_max_gap: 0
125
+ consecutive_frames_max_gap_seconds: 0.1
126
+ data_loader_map_provider_SimpleDataLoaderMapProvider_args:
127
+ batch_size: 1
128
+ num_workers: 0
129
+ dataset_length_train: 0
130
+ dataset_length_val: 0
131
+ dataset_length_test: 0
132
+ data_loader_map_provider_TrainEvalDataLoaderMapProvider_args:
133
+ batch_size: 1
134
+ num_workers: 0
135
+ dataset_length_train: 0
136
+ dataset_length_val: 0
137
+ dataset_length_test: 0
138
+ train_conditioning_type: SAME
139
+ val_conditioning_type: SAME
140
+ test_conditioning_type: KNOWN
141
+ images_per_seq_options: []
142
+ sample_consecutive_frames: false
143
+ consecutive_frames_max_gap: 0
144
+ consecutive_frames_max_gap_seconds: 0.1
145
+ model_factory_ImplicitronModelFactory_args:
146
+ resume: true
147
+ model_class_type: GenericModel
148
+ resume_epoch: -1
149
+ force_resume: false
150
+ model_GenericModel_args:
151
+ log_vars:
152
+ - loss_rgb_psnr_fg
153
+ - loss_rgb_psnr
154
+ - loss_rgb_mse
155
+ - loss_rgb_huber
156
+ - loss_depth_abs
157
+ - loss_depth_abs_fg
158
+ - loss_mask_neg_iou
159
+ - loss_mask_bce
160
+ - loss_mask_beta_prior
161
+ - loss_eikonal
162
+ - loss_density_tv
163
+ - loss_depth_neg_penalty
164
+ - loss_autodecoder_norm
165
+ - loss_prev_stage_rgb_mse
166
+ - loss_prev_stage_rgb_psnr_fg
167
+ - loss_prev_stage_rgb_psnr
168
+ - loss_prev_stage_mask_bce
169
+ - objective
170
+ - epoch
171
+ - sec/it
172
+ mask_images: true
173
+ mask_depths: true
174
+ render_image_width: 400
175
+ render_image_height: 400
176
+ mask_threshold: 0.5
177
+ output_rasterized_mc: false
178
+ bg_color:
179
+ - 0.0
180
+ - 0.0
181
+ - 0.0
182
+ num_passes: 1
183
+ chunk_size_grid: 4096
184
+ render_features_dimensions: 3
185
+ tqdm_trigger_threshold: 16
186
+ n_train_target_views: 1
187
+ sampling_mode_training: mask_sample
188
+ sampling_mode_evaluation: full_grid
189
+ global_encoder_class_type: null
190
+ raysampler_class_type: AdaptiveRaySampler
191
+ renderer_class_type: MultiPassEmissionAbsorptionRenderer
192
+ image_feature_extractor_class_type: null
193
+ view_pooler_enabled: false
194
+ implicit_function_class_type: NeuralRadianceFieldImplicitFunction
195
+ view_metrics_class_type: ViewMetrics
196
+ regularization_metrics_class_type: RegularizationMetrics
197
+ loss_weights:
198
+ loss_rgb_mse: 1.0
199
+ loss_prev_stage_rgb_mse: 1.0
200
+ loss_mask_bce: 0.0
201
+ loss_prev_stage_mask_bce: 0.0
202
+ global_encoder_HarmonicTimeEncoder_args:
203
+ n_harmonic_functions: 10
204
+ append_input: true
205
+ time_divisor: 1.0
206
+ global_encoder_SequenceAutodecoder_args:
207
+ autodecoder_args:
208
+ encoding_dim: 0
209
+ n_instances: 1
210
+ init_scale: 1.0
211
+ ignore_input: false
212
+ raysampler_AdaptiveRaySampler_args:
213
+ n_pts_per_ray_training: 64
214
+ n_pts_per_ray_evaluation: 64
215
+ n_rays_per_image_sampled_from_mask: 1024
216
+ n_rays_total_training: null
217
+ stratified_point_sampling_training: true
218
+ stratified_point_sampling_evaluation: false
219
+ cast_ray_bundle_as_cone: false
220
+ scene_extent: 8.0
221
+ scene_center:
222
+ - 0.0
223
+ - 0.0
224
+ - 0.0
225
+ raysampler_NearFarRaySampler_args:
226
+ n_pts_per_ray_training: 64
227
+ n_pts_per_ray_evaluation: 64
228
+ n_rays_per_image_sampled_from_mask: 1024
229
+ n_rays_total_training: null
230
+ stratified_point_sampling_training: true
231
+ stratified_point_sampling_evaluation: false
232
+ cast_ray_bundle_as_cone: false
233
+ min_depth: 0.1
234
+ max_depth: 8.0
235
+ renderer_LSTMRenderer_args:
236
+ num_raymarch_steps: 10
237
+ init_depth: 17.0
238
+ init_depth_noise_std: 0.0005
239
+ hidden_size: 16
240
+ n_feature_channels: 256
241
+ bg_color: null
242
+ verbose: false
243
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
244
+ raymarcher_class_type: EmissionAbsorptionRaymarcher
245
+ n_pts_per_ray_fine_training: 64
246
+ n_pts_per_ray_fine_evaluation: 64
247
+ stratified_sampling_coarse_training: true
248
+ stratified_sampling_coarse_evaluation: false
249
+ append_coarse_samples_to_fine: true
250
+ density_noise_std_train: 0.0
251
+ return_weights: false
252
+ blurpool_weights: false
253
+ sample_pdf_eps: 1.0e-05
254
+ raymarcher_CumsumRaymarcher_args:
255
+ surface_thickness: 1
256
+ bg_color:
257
+ - 0.0
258
+ replicate_last_interval: false
259
+ background_opacity: 0.0
260
+ density_relu: true
261
+ blend_output: false
262
+ raymarcher_EmissionAbsorptionRaymarcher_args:
263
+ surface_thickness: 1
264
+ bg_color:
265
+ - 0.0
266
+ replicate_last_interval: false
267
+ background_opacity: 10000000000.0
268
+ density_relu: true
269
+ blend_output: false
270
+ renderer_SignedDistanceFunctionRenderer_args:
271
+ ray_normal_coloring_network_args:
272
+ feature_vector_size: 3
273
+ mode: idr
274
+ d_in: 9
275
+ d_out: 3
276
+ dims:
277
+ - 512
278
+ - 512
279
+ - 512
280
+ - 512
281
+ weight_norm: true
282
+ n_harmonic_functions_dir: 0
283
+ pooled_feature_dim: 0
284
+ bg_color:
285
+ - 0.0
286
+ soft_mask_alpha: 50.0
287
+ ray_tracer_args:
288
+ sdf_threshold: 5.0e-05
289
+ line_search_step: 0.5
290
+ line_step_iters: 1
291
+ sphere_tracing_iters: 10
292
+ n_steps: 100
293
+ n_secant_steps: 8
294
+ image_feature_extractor_ResNetFeatureExtractor_args:
295
+ name: resnet34
296
+ pretrained: true
297
+ stages:
298
+ - 1
299
+ - 2
300
+ - 3
301
+ - 4
302
+ normalize_image: true
303
+ image_rescale: 0.16
304
+ first_max_pool: true
305
+ proj_dim: 32
306
+ l2_norm: true
307
+ add_masks: true
308
+ add_images: true
309
+ global_average_pool: false
310
+ feature_rescale: 1.0
311
+ view_pooler_args:
312
+ feature_aggregator_class_type: AngleWeightedReductionFeatureAggregator
313
+ view_sampler_args:
314
+ masked_sampling: false
315
+ sampling_mode: bilinear
316
+ feature_aggregator_AngleWeightedIdentityFeatureAggregator_args:
317
+ exclude_target_view: true
318
+ exclude_target_view_mask_features: true
319
+ concatenate_output: true
320
+ weight_by_ray_angle_gamma: 1.0
321
+ min_ray_angle_weight: 0.1
322
+ feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
323
+ exclude_target_view: true
324
+ exclude_target_view_mask_features: true
325
+ concatenate_output: true
326
+ reduction_functions:
327
+ - AVG
328
+ - STD
329
+ weight_by_ray_angle_gamma: 1.0
330
+ min_ray_angle_weight: 0.1
331
+ feature_aggregator_IdentityFeatureAggregator_args:
332
+ exclude_target_view: true
333
+ exclude_target_view_mask_features: true
334
+ concatenate_output: true
335
+ feature_aggregator_ReductionFeatureAggregator_args:
336
+ exclude_target_view: true
337
+ exclude_target_view_mask_features: true
338
+ concatenate_output: true
339
+ reduction_functions:
340
+ - AVG
341
+ - STD
342
+ implicit_function_IdrFeatureField_args:
343
+ d_in: 3
344
+ d_out: 1
345
+ dims:
346
+ - 512
347
+ - 512
348
+ - 512
349
+ - 512
350
+ - 512
351
+ - 512
352
+ - 512
353
+ - 512
354
+ geometric_init: true
355
+ bias: 1.0
356
+ skip_in: []
357
+ weight_norm: true
358
+ n_harmonic_functions_xyz: 0
359
+ pooled_feature_dim: 0
360
+ implicit_function_NeRFormerImplicitFunction_args:
361
+ n_harmonic_functions_xyz: 10
362
+ n_harmonic_functions_dir: 4
363
+ n_hidden_neurons_dir: 128
364
+ input_xyz: true
365
+ xyz_ray_dir_in_camera_coords: false
366
+ use_integrated_positional_encoding: false
367
+ transformer_dim_down_factor: 2.0
368
+ n_hidden_neurons_xyz: 80
369
+ n_layers_xyz: 2
370
+ append_xyz:
371
+ - 1
372
+ implicit_function_NeuralRadianceFieldImplicitFunction_args:
373
+ n_harmonic_functions_xyz: 10
374
+ n_harmonic_functions_dir: 4
375
+ n_hidden_neurons_dir: 128
376
+ input_xyz: true
377
+ xyz_ray_dir_in_camera_coords: false
378
+ use_integrated_positional_encoding: false
379
+ transformer_dim_down_factor: 1.0
380
+ n_hidden_neurons_xyz: 256
381
+ n_layers_xyz: 8
382
+ append_xyz:
383
+ - 5
384
+ implicit_function_SRNHyperNetImplicitFunction_args:
385
+ hypernet_args:
386
+ n_harmonic_functions: 3
387
+ n_hidden_units: 256
388
+ n_layers: 2
389
+ n_hidden_units_hypernet: 256
390
+ n_layers_hypernet: 1
391
+ in_features: 3
392
+ out_features: 256
393
+ xyz_in_camera_coords: false
394
+ pixel_generator_args:
395
+ n_harmonic_functions: 4
396
+ n_hidden_units: 256
397
+ n_hidden_units_color: 128
398
+ n_layers: 2
399
+ in_features: 256
400
+ out_features: 3
401
+ ray_dir_in_camera_coords: false
402
+ implicit_function_SRNImplicitFunction_args:
403
+ raymarch_function_args:
404
+ n_harmonic_functions: 3
405
+ n_hidden_units: 256
406
+ n_layers: 2
407
+ in_features: 3
408
+ out_features: 256
409
+ xyz_in_camera_coords: false
410
+ raymarch_function: null
411
+ pixel_generator_args:
412
+ n_harmonic_functions: 4
413
+ n_hidden_units: 256
414
+ n_hidden_units_color: 128
415
+ n_layers: 2
416
+ in_features: 256
417
+ out_features: 3
418
+ ray_dir_in_camera_coords: false
419
+ implicit_function_VoxelGridImplicitFunction_args:
420
+ harmonic_embedder_xyz_density_args:
421
+ n_harmonic_functions: 6
422
+ omega_0: 1.0
423
+ logspace: true
424
+ append_input: true
425
+ harmonic_embedder_xyz_color_args:
426
+ n_harmonic_functions: 6
427
+ omega_0: 1.0
428
+ logspace: true
429
+ append_input: true
430
+ harmonic_embedder_dir_color_args:
431
+ n_harmonic_functions: 6
432
+ omega_0: 1.0
433
+ logspace: true
434
+ append_input: true
435
+ decoder_density_class_type: MLPDecoder
436
+ decoder_color_class_type: MLPDecoder
437
+ use_multiple_streams: true
438
+ xyz_ray_dir_in_camera_coords: false
439
+ scaffold_calculating_epochs: []
440
+ scaffold_resolution:
441
+ - 128
442
+ - 128
443
+ - 128
444
+ scaffold_empty_space_threshold: 0.001
445
+ scaffold_occupancy_chunk_size: -1
446
+ scaffold_max_pool_kernel_size: 3
447
+ scaffold_filter_points: true
448
+ volume_cropping_epochs: []
449
+ voxel_grid_density_args:
450
+ voxel_grid_class_type: FullResolutionVoxelGrid
451
+ extents:
452
+ - 2.0
453
+ - 2.0
454
+ - 2.0
455
+ translation:
456
+ - 0.0
457
+ - 0.0
458
+ - 0.0
459
+ init_std: 0.1
460
+ init_mean: 0.0
461
+ hold_voxel_grid_as_parameters: true
462
+ param_groups: {}
463
+ voxel_grid_CPFactorizedVoxelGrid_args:
464
+ align_corners: true
465
+ padding: zeros
466
+ mode: bilinear
467
+ n_features: 1
468
+ resolution_changes:
469
+ 0:
470
+ - 128
471
+ - 128
472
+ - 128
473
+ n_components: 24
474
+ basis_matrix: true
475
+ voxel_grid_FullResolutionVoxelGrid_args:
476
+ align_corners: true
477
+ padding: zeros
478
+ mode: bilinear
479
+ n_features: 1
480
+ resolution_changes:
481
+ 0:
482
+ - 128
483
+ - 128
484
+ - 128
485
+ voxel_grid_VMFactorizedVoxelGrid_args:
486
+ align_corners: true
487
+ padding: zeros
488
+ mode: bilinear
489
+ n_features: 1
490
+ resolution_changes:
491
+ 0:
492
+ - 128
493
+ - 128
494
+ - 128
495
+ n_components: null
496
+ distribution_of_components: null
497
+ basis_matrix: true
498
+ voxel_grid_color_args:
499
+ voxel_grid_class_type: FullResolutionVoxelGrid
500
+ extents:
501
+ - 2.0
502
+ - 2.0
503
+ - 2.0
504
+ translation:
505
+ - 0.0
506
+ - 0.0
507
+ - 0.0
508
+ init_std: 0.1
509
+ init_mean: 0.0
510
+ hold_voxel_grid_as_parameters: true
511
+ param_groups: {}
512
+ voxel_grid_CPFactorizedVoxelGrid_args:
513
+ align_corners: true
514
+ padding: zeros
515
+ mode: bilinear
516
+ n_features: 1
517
+ resolution_changes:
518
+ 0:
519
+ - 128
520
+ - 128
521
+ - 128
522
+ n_components: 24
523
+ basis_matrix: true
524
+ voxel_grid_FullResolutionVoxelGrid_args:
525
+ align_corners: true
526
+ padding: zeros
527
+ mode: bilinear
528
+ n_features: 1
529
+ resolution_changes:
530
+ 0:
531
+ - 128
532
+ - 128
533
+ - 128
534
+ voxel_grid_VMFactorizedVoxelGrid_args:
535
+ align_corners: true
536
+ padding: zeros
537
+ mode: bilinear
538
+ n_features: 1
539
+ resolution_changes:
540
+ 0:
541
+ - 128
542
+ - 128
543
+ - 128
544
+ n_components: null
545
+ distribution_of_components: null
546
+ basis_matrix: true
547
+ decoder_density_ElementwiseDecoder_args:
548
+ scale: 1.0
549
+ shift: 0.0
550
+ operation: IDENTITY
551
+ decoder_density_MLPDecoder_args:
552
+ param_groups: {}
553
+ network_args:
554
+ n_layers: 8
555
+ output_dim: 256
556
+ skip_dim: 39
557
+ hidden_dim: 256
558
+ input_skips:
559
+ - 5
560
+ skip_affine_trans: false
561
+ last_layer_bias_init: null
562
+ last_activation: RELU
563
+ use_xavier_init: true
564
+ decoder_color_ElementwiseDecoder_args:
565
+ scale: 1.0
566
+ shift: 0.0
567
+ operation: IDENTITY
568
+ decoder_color_MLPDecoder_args:
569
+ param_groups: {}
570
+ network_args:
571
+ n_layers: 8
572
+ output_dim: 256
573
+ skip_dim: 39
574
+ hidden_dim: 256
575
+ input_skips:
576
+ - 5
577
+ skip_affine_trans: false
578
+ last_layer_bias_init: null
579
+ last_activation: RELU
580
+ use_xavier_init: true
581
+ view_metrics_ViewMetrics_args: {}
582
+ regularization_metrics_RegularizationMetrics_args: {}
583
+ model_OverfitModel_args:
584
+ log_vars:
585
+ - loss_rgb_psnr_fg
586
+ - loss_rgb_psnr
587
+ - loss_rgb_mse
588
+ - loss_rgb_huber
589
+ - loss_depth_abs
590
+ - loss_depth_abs_fg
591
+ - loss_mask_neg_iou
592
+ - loss_mask_bce
593
+ - loss_mask_beta_prior
594
+ - loss_eikonal
595
+ - loss_density_tv
596
+ - loss_depth_neg_penalty
597
+ - loss_autodecoder_norm
598
+ - loss_prev_stage_rgb_mse
599
+ - loss_prev_stage_rgb_psnr_fg
600
+ - loss_prev_stage_rgb_psnr
601
+ - loss_prev_stage_mask_bce
602
+ - objective
603
+ - epoch
604
+ - sec/it
605
+ mask_images: true
606
+ mask_depths: true
607
+ render_image_width: 400
608
+ render_image_height: 400
609
+ mask_threshold: 0.5
610
+ output_rasterized_mc: false
611
+ bg_color:
612
+ - 0.0
613
+ - 0.0
614
+ - 0.0
615
+ chunk_size_grid: 4096
616
+ render_features_dimensions: 3
617
+ tqdm_trigger_threshold: 16
618
+ n_train_target_views: 1
619
+ sampling_mode_training: mask_sample
620
+ sampling_mode_evaluation: full_grid
621
+ global_encoder_class_type: null
622
+ raysampler_class_type: AdaptiveRaySampler
623
+ renderer_class_type: MultiPassEmissionAbsorptionRenderer
624
+ share_implicit_function_across_passes: false
625
+ implicit_function_class_type: NeuralRadianceFieldImplicitFunction
626
+ coarse_implicit_function_class_type: null
627
+ view_metrics_class_type: ViewMetrics
628
+ regularization_metrics_class_type: RegularizationMetrics
629
+ loss_weights:
630
+ loss_rgb_mse: 1.0
631
+ loss_prev_stage_rgb_mse: 1.0
632
+ loss_mask_bce: 0.0
633
+ loss_prev_stage_mask_bce: 0.0
634
+ global_encoder_HarmonicTimeEncoder_args:
635
+ n_harmonic_functions: 10
636
+ append_input: true
637
+ time_divisor: 1.0
638
+ global_encoder_SequenceAutodecoder_args:
639
+ autodecoder_args:
640
+ encoding_dim: 0
641
+ n_instances: 1
642
+ init_scale: 1.0
643
+ ignore_input: false
644
+ raysampler_AdaptiveRaySampler_args:
645
+ n_pts_per_ray_training: 64
646
+ n_pts_per_ray_evaluation: 64
647
+ n_rays_per_image_sampled_from_mask: 1024
648
+ n_rays_total_training: null
649
+ stratified_point_sampling_training: true
650
+ stratified_point_sampling_evaluation: false
651
+ cast_ray_bundle_as_cone: false
652
+ scene_extent: 8.0
653
+ scene_center:
654
+ - 0.0
655
+ - 0.0
656
+ - 0.0
657
+ raysampler_NearFarRaySampler_args:
658
+ n_pts_per_ray_training: 64
659
+ n_pts_per_ray_evaluation: 64
660
+ n_rays_per_image_sampled_from_mask: 1024
661
+ n_rays_total_training: null
662
+ stratified_point_sampling_training: true
663
+ stratified_point_sampling_evaluation: false
664
+ cast_ray_bundle_as_cone: false
665
+ min_depth: 0.1
666
+ max_depth: 8.0
667
+ renderer_LSTMRenderer_args:
668
+ num_raymarch_steps: 10
669
+ init_depth: 17.0
670
+ init_depth_noise_std: 0.0005
671
+ hidden_size: 16
672
+ n_feature_channels: 256
673
+ bg_color: null
674
+ verbose: false
675
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
676
+ raymarcher_class_type: EmissionAbsorptionRaymarcher
677
+ n_pts_per_ray_fine_training: 64
678
+ n_pts_per_ray_fine_evaluation: 64
679
+ stratified_sampling_coarse_training: true
680
+ stratified_sampling_coarse_evaluation: false
681
+ append_coarse_samples_to_fine: true
682
+ density_noise_std_train: 0.0
683
+ return_weights: false
684
+ blurpool_weights: false
685
+ sample_pdf_eps: 1.0e-05
686
+ raymarcher_CumsumRaymarcher_args:
687
+ surface_thickness: 1
688
+ bg_color:
689
+ - 0.0
690
+ replicate_last_interval: false
691
+ background_opacity: 0.0
692
+ density_relu: true
693
+ blend_output: false
694
+ raymarcher_EmissionAbsorptionRaymarcher_args:
695
+ surface_thickness: 1
696
+ bg_color:
697
+ - 0.0
698
+ replicate_last_interval: false
699
+ background_opacity: 10000000000.0
700
+ density_relu: true
701
+ blend_output: false
702
+ renderer_SignedDistanceFunctionRenderer_args:
703
+ ray_normal_coloring_network_args:
704
+ feature_vector_size: 3
705
+ mode: idr
706
+ d_in: 9
707
+ d_out: 3
708
+ dims:
709
+ - 512
710
+ - 512
711
+ - 512
712
+ - 512
713
+ weight_norm: true
714
+ n_harmonic_functions_dir: 0
715
+ pooled_feature_dim: 0
716
+ bg_color:
717
+ - 0.0
718
+ soft_mask_alpha: 50.0
719
+ ray_tracer_args:
720
+ sdf_threshold: 5.0e-05
721
+ line_search_step: 0.5
722
+ line_step_iters: 1
723
+ sphere_tracing_iters: 10
724
+ n_steps: 100
725
+ n_secant_steps: 8
726
+ implicit_function_IdrFeatureField_args:
727
+ d_in: 3
728
+ d_out: 1
729
+ dims:
730
+ - 512
731
+ - 512
732
+ - 512
733
+ - 512
734
+ - 512
735
+ - 512
736
+ - 512
737
+ - 512
738
+ geometric_init: true
739
+ bias: 1.0
740
+ skip_in: []
741
+ weight_norm: true
742
+ n_harmonic_functions_xyz: 0
743
+ pooled_feature_dim: 0
744
+ implicit_function_NeRFormerImplicitFunction_args:
745
+ n_harmonic_functions_xyz: 10
746
+ n_harmonic_functions_dir: 4
747
+ n_hidden_neurons_dir: 128
748
+ input_xyz: true
749
+ xyz_ray_dir_in_camera_coords: false
750
+ use_integrated_positional_encoding: false
751
+ transformer_dim_down_factor: 2.0
752
+ n_hidden_neurons_xyz: 80
753
+ n_layers_xyz: 2
754
+ append_xyz:
755
+ - 1
756
+ implicit_function_NeuralRadianceFieldImplicitFunction_args:
757
+ n_harmonic_functions_xyz: 10
758
+ n_harmonic_functions_dir: 4
759
+ n_hidden_neurons_dir: 128
760
+ input_xyz: true
761
+ xyz_ray_dir_in_camera_coords: false
762
+ use_integrated_positional_encoding: false
763
+ transformer_dim_down_factor: 1.0
764
+ n_hidden_neurons_xyz: 256
765
+ n_layers_xyz: 8
766
+ append_xyz:
767
+ - 5
768
+ implicit_function_SRNHyperNetImplicitFunction_args:
769
+ latent_dim_hypernet: 0
770
+ hypernet_args:
771
+ n_harmonic_functions: 3
772
+ n_hidden_units: 256
773
+ n_layers: 2
774
+ n_hidden_units_hypernet: 256
775
+ n_layers_hypernet: 1
776
+ in_features: 3
777
+ out_features: 256
778
+ xyz_in_camera_coords: false
779
+ pixel_generator_args:
780
+ n_harmonic_functions: 4
781
+ n_hidden_units: 256
782
+ n_hidden_units_color: 128
783
+ n_layers: 2
784
+ in_features: 256
785
+ out_features: 3
786
+ ray_dir_in_camera_coords: false
787
+ implicit_function_SRNImplicitFunction_args:
788
+ raymarch_function_args:
789
+ n_harmonic_functions: 3
790
+ n_hidden_units: 256
791
+ n_layers: 2
792
+ in_features: 3
793
+ out_features: 256
794
+ xyz_in_camera_coords: false
795
+ raymarch_function: null
796
+ pixel_generator_args:
797
+ n_harmonic_functions: 4
798
+ n_hidden_units: 256
799
+ n_hidden_units_color: 128
800
+ n_layers: 2
801
+ in_features: 256
802
+ out_features: 3
803
+ ray_dir_in_camera_coords: false
804
+ implicit_function_VoxelGridImplicitFunction_args:
805
+ harmonic_embedder_xyz_density_args:
806
+ n_harmonic_functions: 6
807
+ omega_0: 1.0
808
+ logspace: true
809
+ append_input: true
810
+ harmonic_embedder_xyz_color_args:
811
+ n_harmonic_functions: 6
812
+ omega_0: 1.0
813
+ logspace: true
814
+ append_input: true
815
+ harmonic_embedder_dir_color_args:
816
+ n_harmonic_functions: 6
817
+ omega_0: 1.0
818
+ logspace: true
819
+ append_input: true
820
+ decoder_density_class_type: MLPDecoder
821
+ decoder_color_class_type: MLPDecoder
822
+ use_multiple_streams: true
823
+ xyz_ray_dir_in_camera_coords: false
824
+ scaffold_calculating_epochs: []
825
+ scaffold_resolution:
826
+ - 128
827
+ - 128
828
+ - 128
829
+ scaffold_empty_space_threshold: 0.001
830
+ scaffold_occupancy_chunk_size: -1
831
+ scaffold_max_pool_kernel_size: 3
832
+ scaffold_filter_points: true
833
+ volume_cropping_epochs: []
834
+ voxel_grid_density_args:
835
+ voxel_grid_class_type: FullResolutionVoxelGrid
836
+ extents:
837
+ - 2.0
838
+ - 2.0
839
+ - 2.0
840
+ translation:
841
+ - 0.0
842
+ - 0.0
843
+ - 0.0
844
+ init_std: 0.1
845
+ init_mean: 0.0
846
+ hold_voxel_grid_as_parameters: true
847
+ param_groups: {}
848
+ voxel_grid_CPFactorizedVoxelGrid_args:
849
+ align_corners: true
850
+ padding: zeros
851
+ mode: bilinear
852
+ n_features: 1
853
+ resolution_changes:
854
+ 0:
855
+ - 128
856
+ - 128
857
+ - 128
858
+ n_components: 24
859
+ basis_matrix: true
860
+ voxel_grid_FullResolutionVoxelGrid_args:
861
+ align_corners: true
862
+ padding: zeros
863
+ mode: bilinear
864
+ n_features: 1
865
+ resolution_changes:
866
+ 0:
867
+ - 128
868
+ - 128
869
+ - 128
870
+ voxel_grid_VMFactorizedVoxelGrid_args:
871
+ align_corners: true
872
+ padding: zeros
873
+ mode: bilinear
874
+ n_features: 1
875
+ resolution_changes:
876
+ 0:
877
+ - 128
878
+ - 128
879
+ - 128
880
+ n_components: null
881
+ distribution_of_components: null
882
+ basis_matrix: true
883
+ voxel_grid_color_args:
884
+ voxel_grid_class_type: FullResolutionVoxelGrid
885
+ extents:
886
+ - 2.0
887
+ - 2.0
888
+ - 2.0
889
+ translation:
890
+ - 0.0
891
+ - 0.0
892
+ - 0.0
893
+ init_std: 0.1
894
+ init_mean: 0.0
895
+ hold_voxel_grid_as_parameters: true
896
+ param_groups: {}
897
+ voxel_grid_CPFactorizedVoxelGrid_args:
898
+ align_corners: true
899
+ padding: zeros
900
+ mode: bilinear
901
+ n_features: 1
902
+ resolution_changes:
903
+ 0:
904
+ - 128
905
+ - 128
906
+ - 128
907
+ n_components: 24
908
+ basis_matrix: true
909
+ voxel_grid_FullResolutionVoxelGrid_args:
910
+ align_corners: true
911
+ padding: zeros
912
+ mode: bilinear
913
+ n_features: 1
914
+ resolution_changes:
915
+ 0:
916
+ - 128
917
+ - 128
918
+ - 128
919
+ voxel_grid_VMFactorizedVoxelGrid_args:
920
+ align_corners: true
921
+ padding: zeros
922
+ mode: bilinear
923
+ n_features: 1
924
+ resolution_changes:
925
+ 0:
926
+ - 128
927
+ - 128
928
+ - 128
929
+ n_components: null
930
+ distribution_of_components: null
931
+ basis_matrix: true
932
+ decoder_density_ElementwiseDecoder_args:
933
+ scale: 1.0
934
+ shift: 0.0
935
+ operation: IDENTITY
936
+ decoder_density_MLPDecoder_args:
937
+ param_groups: {}
938
+ network_args:
939
+ n_layers: 8
940
+ output_dim: 256
941
+ skip_dim: 39
942
+ hidden_dim: 256
943
+ input_skips:
944
+ - 5
945
+ skip_affine_trans: false
946
+ last_layer_bias_init: null
947
+ last_activation: RELU
948
+ use_xavier_init: true
949
+ decoder_color_ElementwiseDecoder_args:
950
+ scale: 1.0
951
+ shift: 0.0
952
+ operation: IDENTITY
953
+ decoder_color_MLPDecoder_args:
954
+ param_groups: {}
955
+ network_args:
956
+ n_layers: 8
957
+ output_dim: 256
958
+ skip_dim: 39
959
+ hidden_dim: 256
960
+ input_skips:
961
+ - 5
962
+ skip_affine_trans: false
963
+ last_layer_bias_init: null
964
+ last_activation: RELU
965
+ use_xavier_init: true
966
+ coarse_implicit_function_IdrFeatureField_args:
967
+ d_in: 3
968
+ d_out: 1
969
+ dims:
970
+ - 512
971
+ - 512
972
+ - 512
973
+ - 512
974
+ - 512
975
+ - 512
976
+ - 512
977
+ - 512
978
+ geometric_init: true
979
+ bias: 1.0
980
+ skip_in: []
981
+ weight_norm: true
982
+ n_harmonic_functions_xyz: 0
983
+ pooled_feature_dim: 0
984
+ coarse_implicit_function_NeRFormerImplicitFunction_args:
985
+ n_harmonic_functions_xyz: 10
986
+ n_harmonic_functions_dir: 4
987
+ n_hidden_neurons_dir: 128
988
+ input_xyz: true
989
+ xyz_ray_dir_in_camera_coords: false
990
+ use_integrated_positional_encoding: false
991
+ transformer_dim_down_factor: 2.0
992
+ n_hidden_neurons_xyz: 80
993
+ n_layers_xyz: 2
994
+ append_xyz:
995
+ - 1
996
+ coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args:
997
+ n_harmonic_functions_xyz: 10
998
+ n_harmonic_functions_dir: 4
999
+ n_hidden_neurons_dir: 128
1000
+ input_xyz: true
1001
+ xyz_ray_dir_in_camera_coords: false
1002
+ use_integrated_positional_encoding: false
1003
+ transformer_dim_down_factor: 1.0
1004
+ n_hidden_neurons_xyz: 256
1005
+ n_layers_xyz: 8
1006
+ append_xyz:
1007
+ - 5
1008
+ coarse_implicit_function_SRNHyperNetImplicitFunction_args:
1009
+ latent_dim_hypernet: 0
1010
+ hypernet_args:
1011
+ n_harmonic_functions: 3
1012
+ n_hidden_units: 256
1013
+ n_layers: 2
1014
+ n_hidden_units_hypernet: 256
1015
+ n_layers_hypernet: 1
1016
+ in_features: 3
1017
+ out_features: 256
1018
+ xyz_in_camera_coords: false
1019
+ pixel_generator_args:
1020
+ n_harmonic_functions: 4
1021
+ n_hidden_units: 256
1022
+ n_hidden_units_color: 128
1023
+ n_layers: 2
1024
+ in_features: 256
1025
+ out_features: 3
1026
+ ray_dir_in_camera_coords: false
1027
+ coarse_implicit_function_SRNImplicitFunction_args:
1028
+ raymarch_function_args:
1029
+ n_harmonic_functions: 3
1030
+ n_hidden_units: 256
1031
+ n_layers: 2
1032
+ in_features: 3
1033
+ out_features: 256
1034
+ xyz_in_camera_coords: false
1035
+ raymarch_function: null
1036
+ pixel_generator_args:
1037
+ n_harmonic_functions: 4
1038
+ n_hidden_units: 256
1039
+ n_hidden_units_color: 128
1040
+ n_layers: 2
1041
+ in_features: 256
1042
+ out_features: 3
1043
+ ray_dir_in_camera_coords: false
1044
+ coarse_implicit_function_VoxelGridImplicitFunction_args:
1045
+ harmonic_embedder_xyz_density_args:
1046
+ n_harmonic_functions: 6
1047
+ omega_0: 1.0
1048
+ logspace: true
1049
+ append_input: true
1050
+ harmonic_embedder_xyz_color_args:
1051
+ n_harmonic_functions: 6
1052
+ omega_0: 1.0
1053
+ logspace: true
1054
+ append_input: true
1055
+ harmonic_embedder_dir_color_args:
1056
+ n_harmonic_functions: 6
1057
+ omega_0: 1.0
1058
+ logspace: true
1059
+ append_input: true
1060
+ decoder_density_class_type: MLPDecoder
1061
+ decoder_color_class_type: MLPDecoder
1062
+ use_multiple_streams: true
1063
+ xyz_ray_dir_in_camera_coords: false
1064
+ scaffold_calculating_epochs: []
1065
+ scaffold_resolution:
1066
+ - 128
1067
+ - 128
1068
+ - 128
1069
+ scaffold_empty_space_threshold: 0.001
1070
+ scaffold_occupancy_chunk_size: -1
1071
+ scaffold_max_pool_kernel_size: 3
1072
+ scaffold_filter_points: true
1073
+ volume_cropping_epochs: []
1074
+ voxel_grid_density_args:
1075
+ voxel_grid_class_type: FullResolutionVoxelGrid
1076
+ extents:
1077
+ - 2.0
1078
+ - 2.0
1079
+ - 2.0
1080
+ translation:
1081
+ - 0.0
1082
+ - 0.0
1083
+ - 0.0
1084
+ init_std: 0.1
1085
+ init_mean: 0.0
1086
+ hold_voxel_grid_as_parameters: true
1087
+ param_groups: {}
1088
+ voxel_grid_CPFactorizedVoxelGrid_args:
1089
+ align_corners: true
1090
+ padding: zeros
1091
+ mode: bilinear
1092
+ n_features: 1
1093
+ resolution_changes:
1094
+ 0:
1095
+ - 128
1096
+ - 128
1097
+ - 128
1098
+ n_components: 24
1099
+ basis_matrix: true
1100
+ voxel_grid_FullResolutionVoxelGrid_args:
1101
+ align_corners: true
1102
+ padding: zeros
1103
+ mode: bilinear
1104
+ n_features: 1
1105
+ resolution_changes:
1106
+ 0:
1107
+ - 128
1108
+ - 128
1109
+ - 128
1110
+ voxel_grid_VMFactorizedVoxelGrid_args:
1111
+ align_corners: true
1112
+ padding: zeros
1113
+ mode: bilinear
1114
+ n_features: 1
1115
+ resolution_changes:
1116
+ 0:
1117
+ - 128
1118
+ - 128
1119
+ - 128
1120
+ n_components: null
1121
+ distribution_of_components: null
1122
+ basis_matrix: true
1123
+ voxel_grid_color_args:
1124
+ voxel_grid_class_type: FullResolutionVoxelGrid
1125
+ extents:
1126
+ - 2.0
1127
+ - 2.0
1128
+ - 2.0
1129
+ translation:
1130
+ - 0.0
1131
+ - 0.0
1132
+ - 0.0
1133
+ init_std: 0.1
1134
+ init_mean: 0.0
1135
+ hold_voxel_grid_as_parameters: true
1136
+ param_groups: {}
1137
+ voxel_grid_CPFactorizedVoxelGrid_args:
1138
+ align_corners: true
1139
+ padding: zeros
1140
+ mode: bilinear
1141
+ n_features: 1
1142
+ resolution_changes:
1143
+ 0:
1144
+ - 128
1145
+ - 128
1146
+ - 128
1147
+ n_components: 24
1148
+ basis_matrix: true
1149
+ voxel_grid_FullResolutionVoxelGrid_args:
1150
+ align_corners: true
1151
+ padding: zeros
1152
+ mode: bilinear
1153
+ n_features: 1
1154
+ resolution_changes:
1155
+ 0:
1156
+ - 128
1157
+ - 128
1158
+ - 128
1159
+ voxel_grid_VMFactorizedVoxelGrid_args:
1160
+ align_corners: true
1161
+ padding: zeros
1162
+ mode: bilinear
1163
+ n_features: 1
1164
+ resolution_changes:
1165
+ 0:
1166
+ - 128
1167
+ - 128
1168
+ - 128
1169
+ n_components: null
1170
+ distribution_of_components: null
1171
+ basis_matrix: true
1172
+ decoder_density_ElementwiseDecoder_args:
1173
+ scale: 1.0
1174
+ shift: 0.0
1175
+ operation: IDENTITY
1176
+ decoder_density_MLPDecoder_args:
1177
+ param_groups: {}
1178
+ network_args:
1179
+ n_layers: 8
1180
+ output_dim: 256
1181
+ skip_dim: 39
1182
+ hidden_dim: 256
1183
+ input_skips:
1184
+ - 5
1185
+ skip_affine_trans: false
1186
+ last_layer_bias_init: null
1187
+ last_activation: RELU
1188
+ use_xavier_init: true
1189
+ decoder_color_ElementwiseDecoder_args:
1190
+ scale: 1.0
1191
+ shift: 0.0
1192
+ operation: IDENTITY
1193
+ decoder_color_MLPDecoder_args:
1194
+ param_groups: {}
1195
+ network_args:
1196
+ n_layers: 8
1197
+ output_dim: 256
1198
+ skip_dim: 39
1199
+ hidden_dim: 256
1200
+ input_skips:
1201
+ - 5
1202
+ skip_affine_trans: false
1203
+ last_layer_bias_init: null
1204
+ last_activation: RELU
1205
+ use_xavier_init: true
1206
+ view_metrics_ViewMetrics_args: {}
1207
+ regularization_metrics_RegularizationMetrics_args: {}
1208
+ optimizer_factory_ImplicitronOptimizerFactory_args:
1209
+ betas:
1210
+ - 0.9
1211
+ - 0.999
1212
+ breed: Adam
1213
+ exponential_lr_step_size: 250
1214
+ gamma: 0.1
1215
+ lr: 0.0005
1216
+ lr_policy: MultiStepLR
1217
+ momentum: 0.9
1218
+ multistep_lr_milestones: []
1219
+ weight_decay: 0.0
1220
+ linear_exponential_lr_milestone: 200
1221
+ linear_exponential_start_gamma: 0.1
1222
+ foreach: true
1223
+ group_learning_rates: {}
1224
+ training_loop_ImplicitronTrainingLoop_args:
1225
+ evaluator_class_type: ImplicitronEvaluator
1226
+ evaluator_ImplicitronEvaluator_args:
1227
+ is_multisequence: false
1228
+ camera_difficulty_bin_breaks:
1229
+ - 0.97
1230
+ - 0.98
1231
+ eval_only: false
1232
+ max_epochs: 1000
1233
+ store_checkpoints: true
1234
+ store_checkpoints_purge: 1
1235
+ test_interval: -1
1236
+ test_when_finished: false
1237
+ validation_interval: 1
1238
+ clip_grad: 0.0
1239
+ metric_print_interval: 5
1240
+ visualize_interval: 1000
1241
+ visdom_env: ''
1242
+ visdom_port: 8097
1243
+ visdom_server: http://127.0.0.1
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_experiment.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import os
10
+ import tempfile
11
+ import unittest
12
+ from pathlib import Path
13
+
14
+ import torch
15
+
16
+ from hydra import compose, initialize_config_dir
17
+ from omegaconf import OmegaConf
18
+ from projects.implicitron_trainer.impl.optimizer_factory import (
19
+ ImplicitronOptimizerFactory,
20
+ )
21
+
22
+ from .. import experiment
23
+ from .utils import interactive_testing_requested, intercept_logs
24
+
25
+ internal = os.environ.get("FB_TEST", False)
26
+
27
+
28
+ DATA_DIR = Path(__file__).resolve().parent
29
+ IMPLICITRON_CONFIGS_DIR = Path(__file__).resolve().parent.parent / "configs"
30
+ DEBUG: bool = False
31
+
32
+ # TODO:
33
+ # - add enough files to skateboard_first_5 that this works on RE.
34
+ # - share common code with PyTorch3D tests?
35
+
36
+
37
+ def _parse_float_from_log(line):
38
+ return float(line.split()[-1])
39
+
40
+
41
+ class TestExperiment(unittest.TestCase):
42
+ def setUp(self):
43
+ self.maxDiff = None
44
+
45
+ def test_from_defaults(self):
46
+ # Test making minimal changes to the dataclass defaults.
47
+ if not interactive_testing_requested() or not internal:
48
+ return
49
+
50
+ # Manually override config values. Note that this is not necessary out-
51
+ # side of the tests!
52
+ cfg = OmegaConf.structured(experiment.Experiment)
53
+ cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
54
+ "JsonIndexDatasetMapProvider"
55
+ )
56
+ dataset_args = (
57
+ cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
58
+ )
59
+ dataloader_args = (
60
+ cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
61
+ )
62
+ dataset_args.category = "skateboard"
63
+ dataset_args.test_restrict_sequence_id = 0
64
+ dataset_args.dataset_root = "manifold://co3d/tree/extracted"
65
+ dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 5
66
+ dataset_args.dataset_JsonIndexDataset_args.image_height = 80
67
+ dataset_args.dataset_JsonIndexDataset_args.image_width = 80
68
+ dataloader_args.dataset_length_train = 1
69
+ dataloader_args.dataset_length_val = 1
70
+ cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 2
71
+ cfg.training_loop_ImplicitronTrainingLoop_args.store_checkpoints = False
72
+ cfg.optimizer_factory_ImplicitronOptimizerFactory_args.multistep_lr_milestones = [
73
+ 0,
74
+ 1,
75
+ ]
76
+
77
+ if DEBUG:
78
+ experiment.dump_cfg(cfg)
79
+ with intercept_logs(
80
+ logger_name="projects.implicitron_trainer.impl.training_loop",
81
+ regexp="LR change!",
82
+ ) as intercepted_logs:
83
+ experiment_runner = experiment.Experiment(**cfg)
84
+ experiment_runner.run()
85
+
86
+ # Make sure LR decreased on 0th and 1st epoch 10fold.
87
+ self.assertEqual(intercepted_logs[0].split()[-1], "5e-06")
88
+
89
+ def test_exponential_lr(self):
90
+ # Test making minimal changes to the dataclass defaults.
91
+ if not interactive_testing_requested():
92
+ return
93
+ cfg = OmegaConf.structured(experiment.Experiment)
94
+ cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
95
+ "JsonIndexDatasetMapProvider"
96
+ )
97
+ dataset_args = (
98
+ cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
99
+ )
100
+ dataloader_args = (
101
+ cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
102
+ )
103
+ dataset_args.category = "skateboard"
104
+ dataset_args.test_restrict_sequence_id = 0
105
+ dataset_args.dataset_root = "manifold://co3d/tree/extracted"
106
+ dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 5
107
+ dataset_args.dataset_JsonIndexDataset_args.image_height = 80
108
+ dataset_args.dataset_JsonIndexDataset_args.image_width = 80
109
+ dataloader_args.dataset_length_train = 1
110
+ dataloader_args.dataset_length_val = 1
111
+ cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 2
112
+ cfg.training_loop_ImplicitronTrainingLoop_args.store_checkpoints = False
113
+ cfg.optimizer_factory_ImplicitronOptimizerFactory_args.lr_policy = "Exponential"
114
+ cfg.optimizer_factory_ImplicitronOptimizerFactory_args.exponential_lr_step_size = (
115
+ 2
116
+ )
117
+
118
+ if DEBUG:
119
+ experiment.dump_cfg(cfg)
120
+ with intercept_logs(
121
+ logger_name="projects.implicitron_trainer.impl.training_loop",
122
+ regexp="LR change!",
123
+ ) as intercepted_logs:
124
+ experiment_runner = experiment.Experiment(**cfg)
125
+ experiment_runner.run()
126
+
127
+ # Make sure we followed the exponential lr schedule with gamma=0.1,
128
+ # exponential_lr_step_size=2 -- so after two epochs, should
129
+ # decrease lr 10x to 5e-5.
130
+ self.assertEqual(intercepted_logs[0].split()[-1], "0.00015811388300841897")
131
+ self.assertEqual(intercepted_logs[1].split()[-1], "5e-05")
132
+
133
+ def test_yaml_contents(self):
134
+ # Check that the default config values, defined by Experiment and its
135
+ # members, is what we expect it to be.
136
+ cfg = OmegaConf.structured(experiment.Experiment)
137
+ # the following removes the possible effect of env variables
138
+ ds_arg = cfg.data_source_ImplicitronDataSource_args
139
+ ds_arg.dataset_map_provider_JsonIndexDatasetMapProvider_args.dataset_root = ""
140
+ ds_arg.dataset_map_provider_JsonIndexDatasetMapProviderV2_args.dataset_root = ""
141
+ if "dataset_map_provider_SqlIndexDatasetMapProvider_args" in ds_arg:
142
+ del ds_arg.dataset_map_provider_SqlIndexDatasetMapProvider_args
143
+ cfg.training_loop_ImplicitronTrainingLoop_args.visdom_port = 8097
144
+ yaml = OmegaConf.to_yaml(cfg, sort_keys=False)
145
+ if DEBUG:
146
+ (DATA_DIR / "experiment.yaml").write_text(yaml)
147
+ self.assertEqual(yaml, (DATA_DIR / "experiment.yaml").read_text())
148
+
149
+ def test_load_configs(self):
150
+ # Check that all the pre-prepared configs are valid.
151
+ config_files = []
152
+
153
+ for pattern in (
154
+ "repro_singleseq*.yaml",
155
+ "repro_multiseq*.yaml",
156
+ "overfit_singleseq*.yaml",
157
+ ):
158
+ config_files.extend(
159
+ [
160
+ f
161
+ for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
162
+ if not f.name.endswith("_base.yaml")
163
+ ]
164
+ )
165
+
166
+ for file in config_files:
167
+ with self.subTest(file.name):
168
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
169
+ compose(file.name)
170
+
171
+ def test_optimizer_factory(self):
172
+ model = torch.nn.Linear(2, 2)
173
+
174
+ adam, sched = ImplicitronOptimizerFactory(breed="Adam")(0, model)
175
+ self.assertIsInstance(adam, torch.optim.Adam)
176
+ sgd, sched = ImplicitronOptimizerFactory(breed="SGD")(0, model)
177
+ self.assertIsInstance(sgd, torch.optim.SGD)
178
+ adagrad, sched = ImplicitronOptimizerFactory(breed="Adagrad")(0, model)
179
+ self.assertIsInstance(adagrad, torch.optim.Adagrad)
180
+
181
+
182
+ class TestNerfRepro(unittest.TestCase):
183
+ @unittest.skip("This test runs full blender training.")
184
+ def test_nerf_blender(self):
185
+ # Train vanilla NERF.
186
+ # Set env vars BLENDER_DATASET_ROOT and BLENDER_SINGLESEQ_CLASS first!
187
+ if not interactive_testing_requested():
188
+ return
189
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
190
+ cfg = compose(config_name="repro_singleseq_nerf_blender", overrides=[])
191
+ experiment_runner = experiment.Experiment(**cfg)
192
+ experiment.dump_cfg(cfg)
193
+ experiment_runner.run()
194
+
195
+ @unittest.skip("This test runs full llff training.")
196
+ def test_nerf_llff(self):
197
+ # Train vanilla NERF.
198
+ # Set env vars LLFF_DATASET_ROOT and LLFF_SINGLESEQ_CLASS first!
199
+ LLFF_SINGLESEQ_CLASS = os.environ["LLFF_SINGLESEQ_CLASS"]
200
+ if not interactive_testing_requested():
201
+ return
202
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
203
+ cfg = compose(
204
+ config_name=f"repro_singleseq_nerf_llff_{LLFF_SINGLESEQ_CLASS}",
205
+ overrides=[],
206
+ )
207
+ experiment_runner = experiment.Experiment(**cfg)
208
+ experiment.dump_cfg(cfg)
209
+ experiment_runner.run()
210
+
211
+ @unittest.skip("This test runs nerf training on co3d v2 - manyview.")
212
+ def test_nerf_co3dv2_manyview(self):
213
+ # Train NERF
214
+ if not interactive_testing_requested():
215
+ return
216
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
217
+ cfg = compose(
218
+ config_name="repro_singleseq_v2_nerf",
219
+ overrides=[],
220
+ )
221
+ experiment_runner = experiment.Experiment(**cfg)
222
+ experiment.dump_cfg(cfg)
223
+ experiment_runner.run()
224
+
225
+ @unittest.skip("This test runs nerformer training on co3d v2 - fewview.")
226
+ def test_nerformer_co3dv2_fewview(self):
227
+ # Train NeRFormer
228
+ if not interactive_testing_requested():
229
+ return
230
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
231
+ cfg = compose(
232
+ config_name="repro_multiseq_v2_nerformer",
233
+ overrides=[],
234
+ )
235
+ experiment_runner = experiment.Experiment(**cfg)
236
+ experiment.dump_cfg(cfg)
237
+ experiment_runner.run()
238
+
239
+ @unittest.skip("This test checks resuming of the NeRF training.")
240
+ def test_nerf_blender_resume(self):
241
+ # Train one train batch of NeRF, then resume for one more batch.
242
+ # Set env vars BLENDER_DATASET_ROOT and BLENDER_SINGLESEQ_CLASS first!
243
+ if not interactive_testing_requested():
244
+ return
245
+ with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
246
+ with tempfile.TemporaryDirectory() as exp_dir:
247
+ cfg = compose(config_name="repro_singleseq_nerf_blender", overrides=[])
248
+ cfg.exp_dir = exp_dir
249
+
250
+ # set dataset len to 1
251
+
252
+ # fmt: off
253
+ (
254
+ cfg
255
+ .data_source_ImplicitronDataSource_args
256
+ .data_loader_map_provider_SequenceDataLoaderMapProvider_args
257
+ .dataset_length_train
258
+ ) = 1
259
+ # fmt: on
260
+
261
+ # run for one epoch
262
+ cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 1
263
+ experiment_runner = experiment.Experiment(**cfg)
264
+ experiment.dump_cfg(cfg)
265
+ experiment_runner.run()
266
+
267
+ # update num epochs + 2, let the optimizer resume
268
+ cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 3
269
+ experiment_runner = experiment.Experiment(**cfg)
270
+ experiment_runner.run()
271
+
272
+ # start from scratch
273
+ cfg.model_factory_ImplicitronModelFactory_args.resume = False
274
+ experiment_runner = experiment.Experiment(**cfg)
275
+ experiment_runner.run()
276
+
277
+ # force resume from epoch 1
278
+ cfg.model_factory_ImplicitronModelFactory_args.resume = True
279
+ cfg.model_factory_ImplicitronModelFactory_args.force_resume = True
280
+ cfg.model_factory_ImplicitronModelFactory_args.resume_epoch = 1
281
+ experiment_runner = experiment.Experiment(**cfg)
282
+ experiment_runner.run()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_optimizer_factory.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import logging
10
+ import os
11
+ import unittest
12
+
13
+ import torch
14
+ from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
15
+
16
+ from ..impl.optimizer_factory import (
17
+ ImplicitronOptimizerFactory,
18
+ logger as factory_logger,
19
+ )
20
+
21
+ internal = os.environ.get("FB_TEST", False)
22
+
23
+
24
+ class TestOptimizerFactory(unittest.TestCase):
25
+ def setUp(self) -> None:
26
+ torch.manual_seed(42)
27
+ expand_args_fields(ImplicitronOptimizerFactory)
28
+
29
+ def _get_param_groups(self, model):
30
+ default_cfg = get_default_args(ImplicitronOptimizerFactory)
31
+ factory = ImplicitronOptimizerFactory(default_cfg)
32
+ oldlevel = factory_logger.level
33
+ factory_logger.setLevel(logging.ERROR)
34
+ out = factory._get_param_groups(model)
35
+ factory_logger.setLevel(oldlevel)
36
+ return out
37
+
38
+ def _assert_allin(self, a, param_groups, key):
39
+ """
40
+ Asserts that all the parameters in a are in the group
41
+ named by key.
42
+ """
43
+ with self.subTest(f"Testing key {key}"):
44
+ b = param_groups[key]
45
+ for el in a:
46
+ if el not in b:
47
+ raise ValueError(
48
+ f"Element {el}\n\n from:\n\n {a}\n\n not in:\n\n {b}\n\n."
49
+ + f" Full param groups = \n\n{param_groups}"
50
+ )
51
+ for el in b:
52
+ if el not in a:
53
+ raise ValueError(
54
+ f"Element {el}\n\n from:\n\n {b}\n\n not in:\n\n {a}\n\n."
55
+ + f" Full param groups = \n\n{param_groups}"
56
+ )
57
+
58
+ def test_default_param_group_assignment(self):
59
+ pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
60
+ na, nb = Node(params=[pa]), Node(params=[pb])
61
+ root = Node(children=[na, nb], params=[pc])
62
+ param_groups = self._get_param_groups(root)
63
+ self._assert_allin([pa, pb, pc], param_groups, "default")
64
+
65
+ def test_member_overrides_default_param_group_assignment(self):
66
+ pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
67
+ na, nb = Node(params=[pa]), Node(params=[pb])
68
+ root = Node(children=[na, nb], params=[pc], param_groups={"m1": "pb"})
69
+ param_groups = self._get_param_groups(root)
70
+ self._assert_allin([pa, pc], param_groups, "default")
71
+ self._assert_allin([pb], param_groups, "pb")
72
+
73
+ def test_self_overrides_member_param_group_assignment(self):
74
+ pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
75
+ na, nb = Node(params=[pa]), Node(params=[pb], param_groups={"self": "pb_self"})
76
+ root = Node(children=[na, nb], params=[pc], param_groups={"m1": "pb_member"})
77
+ param_groups = self._get_param_groups(root)
78
+ self._assert_allin([pa, pc], param_groups, "default")
79
+ self._assert_allin([pb], param_groups, "pb_self")
80
+ assert len(param_groups["pb_member"]) == 0, param_groups
81
+
82
+ def test_param_overrides_self_param_group_assignment(self):
83
+ pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
84
+ na, nb = Node(params=[pa]), Node(
85
+ params=[pb], param_groups={"self": "pb_self", "p1": "pb_param"}
86
+ )
87
+ root = Node(children=[na, nb], params=[pc], param_groups={"m1": "pb_member"})
88
+ param_groups = self._get_param_groups(root)
89
+ self._assert_allin([pa, pc], param_groups, "default")
90
+ self._assert_allin([pb], param_groups, "pb_self")
91
+ assert len(param_groups["pb_member"]) == 0, param_groups
92
+
93
+ def test_no_param_groups_defined(self):
94
+ pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
95
+ na, nb = Node(params=[pa]), Node(params=[pb])
96
+ root = Node(children=[na, nb], params=[pc])
97
+ param_groups = self._get_param_groups(root)
98
+ self._assert_allin([pa, pb, pc], param_groups, "default")
99
+
100
+ def test_double_dotted(self):
101
+ pa, pb = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(2)]
102
+ na = Node(params=[pa, pb])
103
+ nb = Node(children=[na])
104
+ root = Node(children=[nb], param_groups={"m0.m0.p0": "X", "m0.m0": "Y"})
105
+ param_groups = self._get_param_groups(root)
106
+ self._assert_allin([pa], param_groups, "X")
107
+ self._assert_allin([pb], param_groups, "Y")
108
+
109
+ def test_tree_param_groups_defined(self):
110
+ """
111
+ Test generic tree assignment.
112
+
113
+ A0
114
+ |---------------------------
115
+ | | |
116
+ Bb M J-
117
+ |----- |-------
118
+ | | | |
119
+ C Ddg K Ll
120
+ |--------------
121
+ | | | |
122
+ E4 Ff G H-
123
+
124
+ All nodes have one parameter. Character next to the capital
125
+ letter means they have added something to their `parameter_groups`:
126
+ - small letter same as capital means self is set to that letter
127
+ - small letter different then capital means that member is set
128
+ (the one that is named like that)
129
+ - number means parameter's parameter_group is set like that
130
+ - "-" means it does not have `parameter_groups` member
131
+ """
132
+ p = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(12)]
133
+ L = Node(params=[p[11]], param_groups={"self": "l"})
134
+ K = Node(params=[p[10]], param_groups={})
135
+ J = Node(params=[p[9]], param_groups=None, children=[K, L])
136
+ M = Node(params=[p[8]], param_groups={})
137
+
138
+ E = Node(params=[p[4]], param_groups={"p0": "4"})
139
+ F = Node(params=[p[5]], param_groups={"self": "f"})
140
+ G = Node(params=[p[6]], param_groups={})
141
+ H = Node(params=[p[7]], param_groups=None)
142
+
143
+ D = Node(
144
+ params=[p[3]], param_groups={"self": "d", "m2": "g"}, children=[E, F, G, H]
145
+ )
146
+ C = Node(params=[p[2]], param_groups={})
147
+
148
+ B = Node(params=[p[1]], param_groups={"self": "b"}, children=[C, D])
149
+
150
+ A = Node(params=[p[0]], param_groups={"p0": "0"}, children=[B, M, J])
151
+
152
+ param_groups = self._get_param_groups(A)
153
+
154
+ # if parts of the group belong to two different categories assert is repeated
155
+ # parameter level
156
+ self._assert_allin([p[0]], param_groups, "0")
157
+ self._assert_allin([p[4]], param_groups, "4")
158
+ # self level
159
+ self._assert_allin([p[5]], param_groups, "f")
160
+ self._assert_allin([p[11]], param_groups, "l")
161
+ self._assert_allin([p[2], p[1]], param_groups, "b")
162
+ self._assert_allin([p[7], p[3]], param_groups, "d")
163
+ # member level
164
+ self._assert_allin([p[6]], param_groups, "g")
165
+ # inherit level
166
+ self._assert_allin([p[7], p[3]], param_groups, "d")
167
+ self._assert_allin([p[2], p[1]], param_groups, "b")
168
+ # default level
169
+ self._assert_allin([p[8], p[9], p[10]], param_groups, "default")
170
+
171
+
172
+ class Node(torch.nn.Module):
173
+ def __init__(self, children=(), params=(), param_groups=None):
174
+ super().__init__()
175
+ for i, child in enumerate(children):
176
+ self.add_module("m" + str(i), child)
177
+ for i, param in enumerate(params):
178
+ setattr(self, "p" + str(i), param)
179
+ if param_groups is not None:
180
+ self.param_groups = param_groups
181
+
182
+ def __str__(self):
183
+ return (
184
+ "modules:\n" + str(self._modules) + "\nparameters\n" + str(self._parameters)
185
+ )
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/test_visualize.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import os
10
+ import unittest
11
+
12
+ from .. import visualize_reconstruction
13
+ from .utils import interactive_testing_requested
14
+
15
+ internal = os.environ.get("FB_TEST", False)
16
+
17
+
18
+ class TestVisualize(unittest.TestCase):
19
+ def test_from_defaults(self):
20
+ if not interactive_testing_requested():
21
+ return
22
+ checkpoint_dir = os.environ["exp_dir"]
23
+ argv = [
24
+ f"exp_dir={checkpoint_dir}",
25
+ "n_eval_cameras=40",
26
+ "render_size=[64,64]",
27
+ "video_size=[256,256]",
28
+ ]
29
+ visualize_reconstruction.main(argv)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/tests/utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import contextlib
10
+ import logging
11
+ import os
12
+ import re
13
+
14
+
15
+ @contextlib.contextmanager
16
+ def intercept_logs(logger_name: str, regexp: str):
17
+ # Intercept logs that match a regexp, from a given logger.
18
+ intercepted_messages = []
19
+ logger = logging.getLogger(logger_name)
20
+
21
+ class LoggerInterceptor(logging.Filter):
22
+ def filter(self, record):
23
+ message = record.getMessage()
24
+ if re.search(regexp, message):
25
+ intercepted_messages.append(message)
26
+ return True
27
+
28
+ interceptor = LoggerInterceptor()
29
+ logger.addFilter(interceptor)
30
+ try:
31
+ yield intercepted_messages
32
+ finally:
33
+ logger.removeFilter(interceptor)
34
+
35
+
36
+ def interactive_testing_requested() -> bool:
37
+ """
38
+ Certain tests are only useful when run interactively, and so are not regularly run.
39
+ These are activated by this funciton returning True, which the user requests by
40
+ setting the environment variable `PYTORCH3D_INTERACTIVE_TESTING` to 1.
41
+ """
42
+ return os.environ.get("PYTORCH3D_INTERACTIVE_TESTING", "") == "1"
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/visualize_reconstruction.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ # pyre-unsafe
9
+
10
+ """
11
+ Script to visualize a previously trained model. Example call:
12
+
13
+ pytorch3d_implicitron_visualizer \
14
+ exp_dir='./exps/checkpoint_dir' visdom_show_preds=True visdom_port=8097 \
15
+ n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
16
+ """
17
+
18
+ import os
19
+ import sys
20
+ from typing import Optional, Tuple
21
+
22
+ import numpy as np
23
+ import torch
24
+ from omegaconf import DictConfig, OmegaConf
25
+ from pytorch3d.implicitron.models.visualization.render_flyaround import render_flyaround
26
+ from pytorch3d.implicitron.tools.config import enable_get_default_args, get_default_args
27
+
28
+ from .experiment import Experiment
29
+
30
+
31
+ def visualize_reconstruction(
32
+ exp_dir: str = "",
33
+ restrict_sequence_name: Optional[str] = None,
34
+ output_directory: Optional[str] = None,
35
+ render_size: Tuple[int, int] = (512, 512),
36
+ video_size: Optional[Tuple[int, int]] = None,
37
+ split: str = "train",
38
+ n_source_views: int = 9,
39
+ n_eval_cameras: int = 40,
40
+ visdom_show_preds: bool = False,
41
+ visdom_server: str = "http://127.0.0.1",
42
+ visdom_port: int = 8097,
43
+ visdom_env: Optional[str] = None,
44
+ **render_flyaround_kwargs,
45
+ ) -> None:
46
+ """
47
+ Given an `exp_dir` containing a trained Implicitron model, generates videos consisting
48
+ of renderes of sequences from the dataset used to train and evaluate the trained
49
+ Implicitron model.
50
+
51
+ Args:
52
+ exp_dir: Implicitron experiment directory.
53
+ restrict_sequence_name: If set, defines the list of sequences to visualize.
54
+ output_directory: If set, defines a custom directory to output visualizations to.
55
+ render_size: The size (HxW) of the generated renders.
56
+ video_size: The size (HxW) of the output video.
57
+ split: The dataset split to use for visualization.
58
+ Can be "train" / "val" / "test".
59
+ n_source_views: The number of source views added to each rendered batch. These
60
+ views are required inputs for models such as NeRFormer / NeRF-WCE.
61
+ n_eval_cameras: The number of cameras each fly-around trajectory.
62
+ visdom_show_preds: If `True`, outputs visualizations to visdom.
63
+ visdom_server: The address of the visdom server.
64
+ visdom_port: The port of the visdom server.
65
+ visdom_env: If set, defines a custom name for the visdom environment.
66
+ render_flyaround_kwargs: Keyword arguments passed to the invoked `render_flyaround`
67
+ function (see `pytorch3d.implicitron.models.visualization.render_flyaround`).
68
+ """
69
+
70
+ # In case an output directory is specified use it. If no output_directory
71
+ # is specified create a vis folder inside the experiment directory
72
+ if output_directory is None:
73
+ output_directory = os.path.join(exp_dir, "vis")
74
+ os.makedirs(output_directory, exist_ok=True)
75
+
76
+ # Set the random seeds
77
+ torch.manual_seed(0)
78
+ np.random.seed(0)
79
+
80
+ # Get the config from the experiment_directory,
81
+ # and overwrite relevant fields
82
+ config = _get_config_from_experiment_directory(exp_dir)
83
+ config.exp_dir = exp_dir
84
+ # important so that the CO3D dataset gets loaded in full
85
+ data_source_args = config.data_source_ImplicitronDataSource_args
86
+ if "dataset_map_provider_JsonIndexDatasetMapProvider_args" in data_source_args:
87
+ dataset_args = (
88
+ data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
89
+ )
90
+ dataset_args.test_on_train = False
91
+ if restrict_sequence_name is not None:
92
+ dataset_args.restrict_sequence_name = restrict_sequence_name
93
+
94
+ # Set the rendering image size
95
+ model_factory_args = config.model_factory_ImplicitronModelFactory_args
96
+ model_factory_args.force_resume = True
97
+ model_args = model_factory_args.model_GenericModel_args
98
+ model_args.render_image_width = render_size[0]
99
+ model_args.render_image_height = render_size[1]
100
+
101
+ # Load the previously trained model
102
+ experiment = Experiment(**config)
103
+ model = experiment.model_factory(exp_dir=exp_dir)
104
+ device = torch.device("cuda")
105
+ model.to(device)
106
+ model.eval()
107
+
108
+ # Setup the dataset
109
+ data_source = experiment.data_source
110
+ dataset_map, _ = data_source.get_datasets_and_dataloaders()
111
+ dataset = dataset_map[split]
112
+ if dataset is None:
113
+ raise ValueError(f"{split} dataset not provided")
114
+
115
+ if visdom_env is None:
116
+ visdom_env = (
117
+ "visualizer_" + config.training_loop_ImplicitronTrainingLoop_args.visdom_env
118
+ )
119
+
120
+ # iterate over the sequences in the dataset
121
+ for sequence_name in dataset.sequence_names():
122
+ with torch.no_grad():
123
+ render_kwargs = {
124
+ "dataset": dataset,
125
+ "sequence_name": sequence_name,
126
+ "model": model,
127
+ "output_video_path": os.path.join(output_directory, "video"),
128
+ "n_source_views": n_source_views,
129
+ "visdom_show_preds": visdom_show_preds,
130
+ "n_flyaround_poses": n_eval_cameras,
131
+ "visdom_server": visdom_server,
132
+ "visdom_port": visdom_port,
133
+ "visdom_environment": visdom_env,
134
+ "video_resize": video_size,
135
+ "device": device,
136
+ **render_flyaround_kwargs,
137
+ }
138
+ render_flyaround(**render_kwargs)
139
+
140
+
141
+ enable_get_default_args(visualize_reconstruction)
142
+
143
+
144
+ def _get_config_from_experiment_directory(experiment_directory) -> DictConfig:
145
+ cfg_file = os.path.join(experiment_directory, "expconfig.yaml")
146
+ config = OmegaConf.load(cfg_file)
147
+ # pyre-ignore[7]
148
+ return OmegaConf.merge(get_default_args(Experiment), config)
149
+
150
+
151
+ def main(argv=sys.argv) -> None:
152
+ # automatically parses arguments of visualize_reconstruction
153
+ cfg = OmegaConf.create(get_default_args(visualize_reconstruction))
154
+ cfg.update(OmegaConf.from_cli(argv))
155
+ with torch.no_grad():
156
+ visualize_reconstruction(**cfg)
157
+
158
+
159
+ if __name__ == "__main__":
160
+ main()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ checkpoints
2
+ outputs
3
+ data/*.png
4
+ data/*.pth
5
+ data/*_license.txt
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Neural Radiance Fields in PyTorch3D
2
+ ===================================
3
+
4
+ This project implements the Neural Radiance Fields (NeRF) from [1].
5
+
6
+ <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/nerf_project_logo.gif" width="600" height="338"/>
7
+
8
+
9
+ Installation
10
+ ------------
11
+ 1) [Install PyTorch3D](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md)
12
+
13
+ 2) Install other dependencies:
14
+ - [`visdom`](https://github.com/facebookresearch/visdom)
15
+ - [`hydra`](https://github.com/facebookresearch/hydra)
16
+ - [`Pillow`](https://python-pillow.org/)
17
+ - [`requests`](https://pypi.org/project/requests/)
18
+
19
+ E.g. using `pip`:
20
+ ```
21
+ pip install visdom
22
+ pip install hydra-core --upgrade
23
+ pip install Pillow
24
+ pip install requests
25
+ ```
26
+
27
+ Exporting videos further requires a working `ffmpeg`.
28
+
29
+ Training NeRF
30
+ -------------
31
+ ```
32
+ python ./train_nerf.py --config-name lego
33
+ ```
34
+ will train the model from [1] on the Lego dataset.
35
+
36
+ Note that the script outputs visualizations to `Visdom`. In order to enable this, make sure to start the visdom server (before launching the training) with the following command:
37
+ ```
38
+ python -m visdom.server
39
+ ```
40
+ Note that training on the "lego" scene takes roughly 24 hours on a single Tesla V100.
41
+
42
+ #### Training data
43
+ Note that the `train_nerf.py` script will automatically download the relevant dataset in case it is missing.
44
+
45
+ Testing NeRF
46
+ ------------
47
+ ```
48
+ python ./test_nerf.py --config-name lego
49
+ ```
50
+ Will load a trained model from the `./checkpoints` directory and evaluate it on the test split of the corresponding dataset (Lego in the case above).
51
+
52
+ ### Exporting multi-view video of the radiance field
53
+ Furthermore, the codebase supports generating videos of the neural radiance field.
54
+ The following generates a turntable video of the Lego scene:
55
+ ```
56
+ python ./test_nerf.py --config-name=lego test.mode='export_video'
57
+ ```
58
+ Note that this requires a working `ffmpeg` for generating the video from exported frames.
59
+
60
+ Additionally, note that generation of the video in the original resolution is quite slow. In order to speed up the process, one can decrease the resolution of the output video by setting the `data.image_size` flag:
61
+ ```
62
+ python ./test_nerf.py --config-name=lego test.mode='export_video' data.image_size="[128,128]"
63
+ ```
64
+ This will generate the video in a lower `128 x 128` resolution.
65
+
66
+
67
+ Training & testing on other datasets
68
+ ------------------------------------
69
+ Currently we support the following datasets:
70
+ - lego `python ./train_nerf.py --config-name lego`
71
+ - fern `python ./train_nerf.py --config-name fern`
72
+ - pt3logo `python ./train_nerf.py --config-name pt3logo`
73
+
74
+ The dataset files are located in the following public S3 bucket:
75
+ https://dl.fbaipublicfiles.com/pytorch3d_nerf_data
76
+
77
+ Attribution: `lego` and `fern` are data from the original code release of [1] in https://drive.google.com/drive/folders/128yBriW1IG_3NJ5Rp7APSTZsJqdJdfc1, which are hosted under the CC-BY license (https://creativecommons.org/licenses/by/4.0/) The S3 bucket files contains the same images while the camera matrices have been adjusted to follow the PyTorch3D convention.
78
+
79
+ #### Quantitative results
80
+ Below are the comparisons between our implementation and the official [`TensorFlow code`](https://github.com/bmild/nerf). The speed is measured on NVidia Quadro GP100.
81
+ ```
82
+ +----------------+------------------+------------------+-----------------+
83
+ | Implementation | Lego: test PSNR | Fern: test PSNR | training speed |
84
+ +----------------+------------------+------------------+-----------------+
85
+ | TF (official) | 31.0 | 27.5 | 0.24 sec/it |
86
+ | PyTorch3D | 32.7 | 27.9 | 0.18 sec/it |
87
+ +----------------+------------------+------------------+-----------------+
88
+ ```
89
+
90
+ #### References
91
+ [1] Ben Mildenhall and Pratul P. Srinivasan and Matthew Tancik and Jonathan T. Barron and Ravi Ramamoorthi and Ren Ng, NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis, ECCV2020
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/fern.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 3
2
+ resume: True
3
+ stats_print_interval: 10
4
+ validation_epoch_interval: 150
5
+ checkpoint_epoch_interval: 150
6
+ checkpoint_path: 'checkpoints/fern_pt3d.pth'
7
+ data:
8
+ dataset_name: 'fern'
9
+ image_size: [378, 504] # [height, width]
10
+ precache_rays: True
11
+ test:
12
+ mode: 'evaluation'
13
+ trajectory_type: 'figure_eight'
14
+ up: [0.0, 1.0, 0.0]
15
+ scene_center: [0.0, 0.0, -2.0]
16
+ n_frames: 100
17
+ fps: 20
18
+ trajectory_scale: 1.0
19
+ optimizer:
20
+ max_epochs: 37500
21
+ lr: 0.0005
22
+ lr_scheduler_step_size: 12500
23
+ lr_scheduler_gamma: 0.1
24
+ visualization:
25
+ history_size: 10
26
+ visdom: True
27
+ visdom_server: 'localhost'
28
+ visdom_port: 8097
29
+ visdom_env: 'nerf_pytorch3d'
30
+ raysampler:
31
+ n_pts_per_ray: 64
32
+ n_pts_per_ray_fine: 64
33
+ n_rays_per_image: 1024
34
+ min_depth: 1.2
35
+ max_depth: 6.28
36
+ stratified: True
37
+ stratified_test: False
38
+ chunk_size_test: 6000
39
+ implicit_function:
40
+ n_harmonic_functions_xyz: 10
41
+ n_harmonic_functions_dir: 4
42
+ n_hidden_neurons_xyz: 256
43
+ n_hidden_neurons_dir: 128
44
+ density_noise_std: 0.0
45
+ n_layers_xyz: 8
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/lego.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 3
2
+ resume: True
3
+ stats_print_interval: 10
4
+ validation_epoch_interval: 30
5
+ checkpoint_epoch_interval: 30
6
+ checkpoint_path: 'checkpoints/lego_pt3d.pth'
7
+ data:
8
+ dataset_name: 'lego'
9
+ image_size: [800, 800] # [height, width]
10
+ precache_rays: True
11
+ test:
12
+ mode: 'evaluation'
13
+ trajectory_type: 'circular'
14
+ up: [0.0, 0.0, 1.0]
15
+ scene_center: [0.0, 0.0, 0.0]
16
+ n_frames: 100
17
+ fps: 20
18
+ trajectory_scale: 0.2
19
+ optimizer:
20
+ max_epochs: 20000
21
+ lr: 0.0005
22
+ lr_scheduler_step_size: 5000
23
+ lr_scheduler_gamma: 0.1
24
+ visualization:
25
+ history_size: 10
26
+ visdom: True
27
+ visdom_server: 'localhost'
28
+ visdom_port: 8097
29
+ visdom_env: 'nerf_pytorch3d'
30
+ raysampler:
31
+ n_pts_per_ray: 64
32
+ n_pts_per_ray_fine: 64
33
+ n_rays_per_image: 1024
34
+ min_depth: 2.0
35
+ max_depth: 6.0
36
+ stratified: True
37
+ stratified_test: False
38
+ chunk_size_test: 6000
39
+ implicit_function:
40
+ n_harmonic_functions_xyz: 10
41
+ n_harmonic_functions_dir: 4
42
+ n_hidden_neurons_xyz: 256
43
+ n_hidden_neurons_dir: 128
44
+ density_noise_std: 0.0
45
+ n_layers_xyz: 8
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/configs/pt3logo.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 3
2
+ resume: True
3
+ stats_print_interval: 10
4
+ validation_epoch_interval: 30
5
+ checkpoint_epoch_interval: 30
6
+ checkpoint_path: 'checkpoints/pt3logo_pt3d.pth'
7
+ data:
8
+ dataset_name: 'pt3logo'
9
+ image_size: [512, 1024] # [height, width]
10
+ precache_rays: True
11
+ test:
12
+ mode: 'export_video'
13
+ trajectory_type: 'figure_eight'
14
+ up: [0.0, -1.0, 0.0]
15
+ scene_center: [0.0, 0.0, 0.0]
16
+ n_frames: 100
17
+ fps: 20
18
+ trajectory_scale: 0.2
19
+ optimizer:
20
+ max_epochs: 100000
21
+ lr: 0.0005
22
+ lr_scheduler_step_size: 10000
23
+ lr_scheduler_gamma: 0.1
24
+ visualization:
25
+ history_size: 20
26
+ visdom: True
27
+ visdom_server: 'localhost'
28
+ visdom_port: 8097
29
+ visdom_env: 'nerf_pytorch3d'
30
+ raysampler:
31
+ n_pts_per_ray: 64
32
+ n_pts_per_ray_fine: 64
33
+ n_rays_per_image: 1024
34
+ min_depth: 8.0
35
+ max_depth: 23.0
36
+ stratified: True
37
+ stratified_test: False
38
+ chunk_size_test: 6000
39
+ implicit_function:
40
+ n_harmonic_functions_xyz: 10
41
+ n_harmonic_functions_dir: 4
42
+ n_hidden_neurons_xyz: 256
43
+ n_hidden_neurons_dir: 128
44
+ density_noise_std: 0.0
45
+ n_layers_xyz: 8
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/dataset.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import os
8
+ from typing import List, Optional, Tuple
9
+
10
+ import numpy as np
11
+ import requests
12
+ import torch
13
+ from PIL import Image
14
+ from pytorch3d.renderer import PerspectiveCameras
15
+ from torch.utils.data import Dataset
16
+
17
+
18
+ DEFAULT_DATA_ROOT = os.path.join(
19
+ os.path.dirname(os.path.realpath(__file__)), "..", "data"
20
+ )
21
+
22
+ DEFAULT_URL_ROOT = "https://dl.fbaipublicfiles.com/pytorch3d_nerf_data"
23
+
24
+ ALL_DATASETS = ("lego", "fern", "pt3logo")
25
+
26
+
27
+ def trivial_collate(batch):
28
+ """
29
+ A trivial collate function that merely returns the uncollated batch.
30
+ """
31
+ return batch
32
+
33
+
34
+ class ListDataset(Dataset):
35
+ """
36
+ A simple dataset made of a list of entries.
37
+ """
38
+
39
+ def __init__(self, entries: List) -> None:
40
+ """
41
+ Args:
42
+ entries: The list of dataset entries.
43
+ """
44
+ self._entries = entries
45
+
46
+ def __len__(
47
+ self,
48
+ ) -> int:
49
+ return len(self._entries)
50
+
51
+ def __getitem__(self, index):
52
+ return self._entries[index]
53
+
54
+
55
+ def get_nerf_datasets(
56
+ dataset_name: str, # 'lego | fern'
57
+ image_size: Tuple[int, int],
58
+ data_root: str = DEFAULT_DATA_ROOT,
59
+ autodownload: bool = True,
60
+ ) -> Tuple[Dataset, Dataset, Dataset]:
61
+ """
62
+ Obtains the training and validation dataset object for a dataset specified
63
+ with the `dataset_name` argument.
64
+
65
+ Args:
66
+ dataset_name: The name of the dataset to load.
67
+ image_size: A tuple (height, width) denoting the sizes of the loaded dataset images.
68
+ data_root: The root folder at which the data is stored.
69
+ autodownload: Auto-download the dataset files in case they are missing.
70
+
71
+ Returns:
72
+ train_dataset: The training dataset object.
73
+ val_dataset: The validation dataset object.
74
+ test_dataset: The testing dataset object.
75
+ """
76
+
77
+ if dataset_name not in ALL_DATASETS:
78
+ raise ValueError(f"'{dataset_name}'' does not refer to a known dataset.")
79
+
80
+ print(f"Loading dataset {dataset_name}, image size={str(image_size)} ...")
81
+
82
+ cameras_path = os.path.join(data_root, dataset_name + ".pth")
83
+ image_path = cameras_path.replace(".pth", ".png")
84
+
85
+ if autodownload and any(not os.path.isfile(p) for p in (cameras_path, image_path)):
86
+ # Automatically download the data files if missing.
87
+ download_data((dataset_name,), data_root=data_root)
88
+
89
+ train_data = torch.load(cameras_path)
90
+ n_cameras = train_data["cameras"]["R"].shape[0]
91
+
92
+ _image_max_image_pixels = Image.MAX_IMAGE_PIXELS
93
+ Image.MAX_IMAGE_PIXELS = None # The dataset image is very large ...
94
+ images = torch.FloatTensor(np.array(Image.open(image_path))) / 255.0
95
+ images = torch.stack(torch.chunk(images, n_cameras, dim=0))[..., :3]
96
+ Image.MAX_IMAGE_PIXELS = _image_max_image_pixels
97
+
98
+ scale_factors = [s_new / s for s, s_new in zip(images.shape[1:3], image_size)]
99
+ if abs(scale_factors[0] - scale_factors[1]) > 1e-3:
100
+ raise ValueError(
101
+ "Non-isotropic scaling is not allowed. Consider changing the 'image_size' argument."
102
+ )
103
+ scale_factor = sum(scale_factors) * 0.5
104
+
105
+ if scale_factor != 1.0:
106
+ print(f"Rescaling dataset (factor={scale_factor})")
107
+ images = torch.nn.functional.interpolate(
108
+ images.permute(0, 3, 1, 2),
109
+ size=tuple(image_size),
110
+ mode="bilinear",
111
+ ).permute(0, 2, 3, 1)
112
+
113
+ cameras = [
114
+ PerspectiveCameras(
115
+ **{k: v[cami][None] for k, v in train_data["cameras"].items()}
116
+ ).to("cpu")
117
+ for cami in range(n_cameras)
118
+ ]
119
+
120
+ train_idx, val_idx, test_idx = train_data["split"]
121
+
122
+ train_dataset, val_dataset, test_dataset = [
123
+ ListDataset(
124
+ [
125
+ {"image": images[i], "camera": cameras[i], "camera_idx": int(i)}
126
+ for i in idx
127
+ ]
128
+ )
129
+ for idx in [train_idx, val_idx, test_idx]
130
+ ]
131
+
132
+ return train_dataset, val_dataset, test_dataset
133
+
134
+
135
+ def download_data(
136
+ dataset_names: Optional[List[str]] = None,
137
+ data_root: str = DEFAULT_DATA_ROOT,
138
+ url_root: str = DEFAULT_URL_ROOT,
139
+ ) -> None:
140
+ """
141
+ Downloads the relevant dataset files.
142
+
143
+ Args:
144
+ dataset_names: A list of the names of datasets to download. If `None`,
145
+ downloads all available datasets.
146
+ """
147
+
148
+ if dataset_names is None:
149
+ dataset_names = ALL_DATASETS
150
+
151
+ os.makedirs(data_root, exist_ok=True)
152
+
153
+ for dataset_name in dataset_names:
154
+ cameras_file = dataset_name + ".pth"
155
+ images_file = cameras_file.replace(".pth", ".png")
156
+ license_file = cameras_file.replace(".pth", "_license.txt")
157
+
158
+ for fl in (cameras_file, images_file, license_file):
159
+ local_fl = os.path.join(data_root, fl)
160
+ remote_fl = os.path.join(url_root, fl)
161
+
162
+ print(f"Downloading dataset {dataset_name} from {remote_fl} to {local_fl}.")
163
+
164
+ r = requests.get(remote_fl)
165
+ with open(local_fl, "wb") as f:
166
+ f.write(r.content)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/eval_video_utils.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Tuple
9
+
10
+ import torch
11
+ from pytorch3d.renderer import look_at_view_transform, PerspectiveCameras
12
+ from torch.utils.data.dataset import Dataset
13
+
14
+
15
+ def generate_eval_video_cameras(
16
+ train_dataset,
17
+ n_eval_cams: int = 100,
18
+ trajectory_type: str = "figure_eight",
19
+ trajectory_scale: float = 0.2,
20
+ scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0),
21
+ up: Tuple[float, float, float] = (0.0, 0.0, 1.0),
22
+ ) -> Dataset[torch.Tensor]:
23
+ """
24
+ Generate a camera trajectory for visualizing a NeRF model.
25
+
26
+ Args:
27
+ train_dataset: The training dataset object.
28
+ n_eval_cams: Number of cameras in the trajectory.
29
+ trajectory_type: The type of the camera trajectory. Can be one of:
30
+ circular: Rotating around the center of the scene at a fixed radius.
31
+ figure_eight: Figure-of-8 trajectory around the center of the
32
+ central camera of the training dataset.
33
+ trefoil_knot: Same as 'figure_eight', but the trajectory has a shape
34
+ of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot).
35
+ figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape
36
+ of a figure-eight knot
37
+ (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)).
38
+ trajectory_scale: The extent of the trajectory.
39
+ up: The "up" vector of the scene (=the normal of the scene floor).
40
+ Active for the `trajectory_type="circular"`.
41
+ scene_center: The center of the scene in world coordinates which all
42
+ the cameras from the generated trajectory look at.
43
+ Returns:
44
+ Dictionary of camera instances which can be used as the test dataset
45
+ """
46
+ if trajectory_type in ("figure_eight", "trefoil_knot", "figure_eight_knot"):
47
+ cam_centers = torch.cat(
48
+ [e["camera"].get_camera_center() for e in train_dataset]
49
+ )
50
+ # get the nearest camera center to the mean of centers
51
+ mean_camera_idx = (
52
+ ((cam_centers - cam_centers.mean(dim=0)[None]) ** 2)
53
+ .sum(dim=1)
54
+ .min(dim=0)
55
+ .indices
56
+ )
57
+ # generate the knot trajectory in canonical coords
58
+ time = torch.linspace(0, 2 * math.pi, n_eval_cams + 1)[:n_eval_cams]
59
+ if trajectory_type == "trefoil_knot":
60
+ traj = _trefoil_knot(time)
61
+ elif trajectory_type == "figure_eight_knot":
62
+ traj = _figure_eight_knot(time)
63
+ elif trajectory_type == "figure_eight":
64
+ traj = _figure_eight(time)
65
+ traj[:, 2] -= traj[:, 2].max()
66
+
67
+ # transform the canonical knot to the coord frame of the mean camera
68
+ traj_trans = (
69
+ train_dataset[mean_camera_idx]["camera"]
70
+ .get_world_to_view_transform()
71
+ .inverse()
72
+ )
73
+ traj_trans = traj_trans.scale(cam_centers.std(dim=0).mean() * trajectory_scale)
74
+ traj = traj_trans.transform_points(traj)
75
+
76
+ elif trajectory_type == "circular":
77
+ cam_centers = torch.cat(
78
+ [e["camera"].get_camera_center() for e in train_dataset]
79
+ )
80
+
81
+ # fit plane to the camera centers
82
+ plane_mean = cam_centers.mean(dim=0)
83
+ cam_centers_c = cam_centers - plane_mean[None]
84
+
85
+ if up is not None:
86
+ # us the up vector instead of the plane through the camera centers
87
+ plane_normal = torch.FloatTensor(up)
88
+ else:
89
+ cov = (cam_centers_c.t() @ cam_centers_c) / cam_centers_c.shape[0]
90
+ _, e_vec = torch.linalg.eigh(cov, UPLO="U")
91
+ plane_normal = e_vec[:, 0]
92
+
93
+ plane_dist = (plane_normal[None] * cam_centers_c).sum(dim=-1)
94
+ cam_centers_on_plane = cam_centers_c - plane_dist[:, None] * plane_normal[None]
95
+
96
+ cov = (
97
+ cam_centers_on_plane.t() @ cam_centers_on_plane
98
+ ) / cam_centers_on_plane.shape[0]
99
+ _, e_vec = torch.linalg.eigh(cov, UPLO="U")
100
+ traj_radius = (cam_centers_on_plane**2).sum(dim=1).sqrt().mean()
101
+ angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams)
102
+ traj = traj_radius * torch.stack(
103
+ (torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1
104
+ )
105
+ traj = traj @ e_vec.t() + plane_mean[None]
106
+
107
+ else:
108
+ raise ValueError(f"Unknown trajectory_type {trajectory_type}.")
109
+
110
+ # point all cameras towards the center of the scene
111
+ R, T = look_at_view_transform(
112
+ eye=traj,
113
+ at=(scene_center,), # (1, 3)
114
+ up=(up,), # (1, 3)
115
+ device=traj.device,
116
+ )
117
+
118
+ # get the average focal length and principal point
119
+ focal = torch.cat([e["camera"].focal_length for e in train_dataset]).mean(dim=0)
120
+ p0 = torch.cat([e["camera"].principal_point for e in train_dataset]).mean(dim=0)
121
+
122
+ # assemble the dataset
123
+ test_dataset = [
124
+ {
125
+ "image": None,
126
+ "camera": PerspectiveCameras(
127
+ focal_length=focal[None],
128
+ principal_point=p0[None],
129
+ R=R_[None],
130
+ T=T_[None],
131
+ ),
132
+ "camera_idx": i,
133
+ }
134
+ for i, (R_, T_) in enumerate(zip(R, T))
135
+ ]
136
+
137
+ return test_dataset
138
+
139
+
140
+ def _figure_eight_knot(t: torch.Tensor, z_scale: float = 0.5):
141
+ x = (2 + (2 * t).cos()) * (3 * t).cos()
142
+ y = (2 + (2 * t).cos()) * (3 * t).sin()
143
+ z = (4 * t).sin() * z_scale
144
+ return torch.stack((x, y, z), dim=-1)
145
+
146
+
147
+ def _trefoil_knot(t: torch.Tensor, z_scale: float = 0.5):
148
+ x = t.sin() + 2 * (2 * t).sin()
149
+ y = t.cos() - 2 * (2 * t).cos()
150
+ z = -(3 * t).sin() * z_scale
151
+ return torch.stack((x, y, z), dim=-1)
152
+
153
+
154
+ def _figure_eight(t: torch.Tensor, z_scale: float = 0.5):
155
+ x = t.cos()
156
+ y = (2 * t).sin() / 2
157
+ z = t.sin() * z_scale
158
+ return torch.stack((x, y, z), dim=-1)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/implicit_function.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Tuple
8
+
9
+ import torch
10
+ from pytorch3d.common.linear_with_repeat import LinearWithRepeat
11
+ from pytorch3d.renderer import HarmonicEmbedding, ray_bundle_to_ray_points, RayBundle
12
+
13
+
14
+ def _xavier_init(linear):
15
+ """
16
+ Performs the Xavier weight initialization of the linear layer `linear`.
17
+ """
18
+ torch.nn.init.xavier_uniform_(linear.weight.data)
19
+
20
+
21
+ class NeuralRadianceField(torch.nn.Module):
22
+ def __init__(
23
+ self,
24
+ n_harmonic_functions_xyz: int = 6,
25
+ n_harmonic_functions_dir: int = 4,
26
+ n_hidden_neurons_xyz: int = 256,
27
+ n_hidden_neurons_dir: int = 128,
28
+ n_layers_xyz: int = 8,
29
+ append_xyz: Tuple[int, ...] = (5,),
30
+ use_multiple_streams: bool = True,
31
+ **kwargs,
32
+ ):
33
+ """
34
+ Args:
35
+ n_harmonic_functions_xyz: The number of harmonic functions
36
+ used to form the harmonic embedding of 3D point locations.
37
+ n_harmonic_functions_dir: The number of harmonic functions
38
+ used to form the harmonic embedding of the ray directions.
39
+ n_hidden_neurons_xyz: The number of hidden units in the
40
+ fully connected layers of the MLP that accepts the 3D point
41
+ locations and outputs the occupancy field with the intermediate
42
+ features.
43
+ n_hidden_neurons_dir: The number of hidden units in the
44
+ fully connected layers of the MLP that accepts the intermediate
45
+ features and ray directions and outputs the radiance field
46
+ (per-point colors).
47
+ n_layers_xyz: The number of layers of the MLP that outputs the
48
+ occupancy field.
49
+ append_xyz: The list of indices of the skip layers of the occupancy MLP.
50
+ use_multiple_streams: Whether density and color should be calculated on
51
+ separate CUDA streams.
52
+ """
53
+ super().__init__()
54
+
55
+ # The harmonic embedding layer converts input 3D coordinates
56
+ # to a representation that is more suitable for
57
+ # processing with a deep neural network.
58
+ self.harmonic_embedding_xyz = HarmonicEmbedding(n_harmonic_functions_xyz)
59
+ self.harmonic_embedding_dir = HarmonicEmbedding(n_harmonic_functions_dir)
60
+ embedding_dim_xyz = n_harmonic_functions_xyz * 2 * 3 + 3
61
+ embedding_dim_dir = n_harmonic_functions_dir * 2 * 3 + 3
62
+
63
+ self.mlp_xyz = MLPWithInputSkips(
64
+ n_layers_xyz,
65
+ embedding_dim_xyz,
66
+ n_hidden_neurons_xyz,
67
+ embedding_dim_xyz,
68
+ n_hidden_neurons_xyz,
69
+ input_skips=append_xyz,
70
+ )
71
+
72
+ self.intermediate_linear = torch.nn.Linear(
73
+ n_hidden_neurons_xyz, n_hidden_neurons_xyz
74
+ )
75
+ _xavier_init(self.intermediate_linear)
76
+
77
+ self.density_layer = torch.nn.Linear(n_hidden_neurons_xyz, 1)
78
+ _xavier_init(self.density_layer)
79
+
80
+ # Zero the bias of the density layer to avoid
81
+ # a completely transparent initialization.
82
+ self.density_layer.bias.data[:] = 0.0 # fixme: Sometimes this is not enough
83
+
84
+ self.color_layer = torch.nn.Sequential(
85
+ LinearWithRepeat(
86
+ n_hidden_neurons_xyz + embedding_dim_dir, n_hidden_neurons_dir
87
+ ),
88
+ torch.nn.ReLU(True),
89
+ torch.nn.Linear(n_hidden_neurons_dir, 3),
90
+ torch.nn.Sigmoid(),
91
+ )
92
+ self.use_multiple_streams = use_multiple_streams
93
+
94
+ def _get_densities(
95
+ self,
96
+ features: torch.Tensor,
97
+ depth_values: torch.Tensor,
98
+ density_noise_std: float,
99
+ ) -> torch.Tensor:
100
+ """
101
+ This function takes `features` predicted by `self.mlp_xyz`
102
+ and converts them to `raw_densities` with `self.density_layer`.
103
+ `raw_densities` are later re-weighted using the depth step sizes
104
+ and mapped to [0-1] range with 1 - inverse exponential of `raw_densities`.
105
+ """
106
+ raw_densities = self.density_layer(features)
107
+ deltas = torch.cat(
108
+ (
109
+ depth_values[..., 1:] - depth_values[..., :-1],
110
+ 1e10 * torch.ones_like(depth_values[..., :1]),
111
+ ),
112
+ dim=-1,
113
+ )[..., None]
114
+ if density_noise_std > 0.0:
115
+ raw_densities = (
116
+ raw_densities + torch.randn_like(raw_densities) * density_noise_std
117
+ )
118
+ densities = 1 - (-deltas * torch.relu(raw_densities)).exp()
119
+ return densities
120
+
121
+ def _get_colors(
122
+ self, features: torch.Tensor, rays_directions: torch.Tensor
123
+ ) -> torch.Tensor:
124
+ """
125
+ This function takes per-point `features` predicted by `self.mlp_xyz`
126
+ and evaluates the color model in order to attach to each
127
+ point a 3D vector of its RGB color.
128
+ """
129
+ # Normalize the ray_directions to unit l2 norm.
130
+ rays_directions_normed = torch.nn.functional.normalize(rays_directions, dim=-1)
131
+
132
+ # Obtain the harmonic embedding of the normalized ray directions.
133
+ rays_embedding = self.harmonic_embedding_dir(rays_directions_normed)
134
+
135
+ return self.color_layer((self.intermediate_linear(features), rays_embedding))
136
+
137
+ def _get_densities_and_colors(
138
+ self, features: torch.Tensor, ray_bundle: RayBundle, density_noise_std: float
139
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
140
+ """
141
+ The second part of the forward calculation.
142
+
143
+ Args:
144
+ features: the output of the common mlp (the prior part of the
145
+ calculation), shape
146
+ (minibatch x ... x self.n_hidden_neurons_xyz).
147
+ ray_bundle: As for forward().
148
+ density_noise_std: As for forward().
149
+
150
+ Returns:
151
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
152
+ denoting the opacity of each ray point.
153
+ rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
154
+ denoting the color of each ray point.
155
+ """
156
+ if self.use_multiple_streams and features.is_cuda:
157
+ current_stream = torch.cuda.current_stream(features.device)
158
+ other_stream = torch.cuda.Stream(features.device)
159
+ other_stream.wait_stream(current_stream)
160
+
161
+ with torch.cuda.stream(other_stream):
162
+ rays_densities = self._get_densities(
163
+ features, ray_bundle.lengths, density_noise_std
164
+ )
165
+ # rays_densities.shape = [minibatch x ... x 1] in [0-1]
166
+
167
+ rays_colors = self._get_colors(features, ray_bundle.directions)
168
+ # rays_colors.shape = [minibatch x ... x 3] in [0-1]
169
+
170
+ current_stream.wait_stream(other_stream)
171
+ else:
172
+ # Same calculation as above, just serial.
173
+ rays_densities = self._get_densities(
174
+ features, ray_bundle.lengths, density_noise_std
175
+ )
176
+ rays_colors = self._get_colors(features, ray_bundle.directions)
177
+ return rays_densities, rays_colors
178
+
179
+ def forward(
180
+ self,
181
+ ray_bundle: RayBundle,
182
+ density_noise_std: float = 0.0,
183
+ **kwargs,
184
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
185
+ """
186
+ The forward function accepts the parametrizations of
187
+ 3D points sampled along projection rays. The forward
188
+ pass is responsible for attaching a 3D vector
189
+ and a 1D scalar representing the point's
190
+ RGB color and opacity respectively.
191
+
192
+ Args:
193
+ ray_bundle: A RayBundle object containing the following variables:
194
+ origins: A tensor of shape `(minibatch, ..., 3)` denoting the
195
+ origins of the sampling rays in world coords.
196
+ directions: A tensor of shape `(minibatch, ..., 3)`
197
+ containing the direction vectors of sampling rays in world coords.
198
+ lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`
199
+ containing the lengths at which the rays are sampled.
200
+ density_noise_std: A floating point value representing the
201
+ variance of the random normal noise added to the output of
202
+ the opacity function. This can prevent floating artifacts.
203
+
204
+ Returns:
205
+ rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`
206
+ denoting the opacity of each ray point.
207
+ rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`
208
+ denoting the color of each ray point.
209
+ """
210
+ # We first convert the ray parametrizations to world
211
+ # coordinates with `ray_bundle_to_ray_points`.
212
+ rays_points_world = ray_bundle_to_ray_points(ray_bundle)
213
+ # rays_points_world.shape = [minibatch x ... x 3]
214
+
215
+ # For each 3D world coordinate, we obtain its harmonic embedding.
216
+ embeds_xyz = self.harmonic_embedding_xyz(rays_points_world)
217
+ # embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions*6 + 3]
218
+
219
+ # self.mlp maps each harmonic embedding to a latent feature space.
220
+ features = self.mlp_xyz(embeds_xyz, embeds_xyz)
221
+ # features.shape = [minibatch x ... x self.n_hidden_neurons_xyz]
222
+
223
+ rays_densities, rays_colors = self._get_densities_and_colors(
224
+ features, ray_bundle, density_noise_std
225
+ )
226
+ return rays_densities, rays_colors
227
+
228
+
229
+ class MLPWithInputSkips(torch.nn.Module):
230
+ """
231
+ Implements the multi-layer perceptron architecture of the Neural Radiance Field.
232
+
233
+ As such, `MLPWithInputSkips` is a multi layer perceptron consisting
234
+ of a sequence of linear layers with ReLU activations.
235
+
236
+ Additionally, for a set of predefined layers `input_skips`, the forward pass
237
+ appends a skip tensor `z` to the output of the preceding layer.
238
+
239
+ Note that this follows the architecture described in the Supplementary
240
+ Material (Fig. 7) of [1].
241
+
242
+ References:
243
+ [1] Ben Mildenhall and Pratul P. Srinivasan and Matthew Tancik
244
+ and Jonathan T. Barron and Ravi Ramamoorthi and Ren Ng:
245
+ NeRF: Representing Scenes as Neural Radiance Fields for View
246
+ Synthesis, ECCV2020
247
+ """
248
+
249
+ def __init__(
250
+ self,
251
+ n_layers: int,
252
+ input_dim: int,
253
+ output_dim: int,
254
+ skip_dim: int,
255
+ hidden_dim: int,
256
+ input_skips: Tuple[int, ...] = (),
257
+ ):
258
+ """
259
+ Args:
260
+ n_layers: The number of linear layers of the MLP.
261
+ input_dim: The number of channels of the input tensor.
262
+ output_dim: The number of channels of the output.
263
+ skip_dim: The number of channels of the tensor `z` appended when
264
+ evaluating the skip layers.
265
+ hidden_dim: The number of hidden units of the MLP.
266
+ input_skips: The list of layer indices at which we append the skip
267
+ tensor `z`.
268
+ """
269
+ super().__init__()
270
+ layers = []
271
+ for layeri in range(n_layers):
272
+ if layeri == 0:
273
+ dimin = input_dim
274
+ dimout = hidden_dim
275
+ elif layeri in input_skips:
276
+ dimin = hidden_dim + skip_dim
277
+ dimout = hidden_dim
278
+ else:
279
+ dimin = hidden_dim
280
+ dimout = hidden_dim
281
+ linear = torch.nn.Linear(dimin, dimout)
282
+ _xavier_init(linear)
283
+ layers.append(torch.nn.Sequential(linear, torch.nn.ReLU(True)))
284
+ self.mlp = torch.nn.ModuleList(layers)
285
+ self._input_skips = set(input_skips)
286
+
287
+ def forward(self, x: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
288
+ """
289
+ Args:
290
+ x: The input tensor of shape `(..., input_dim)`.
291
+ z: The input skip tensor of shape `(..., skip_dim)` which is appended
292
+ to layers whose indices are specified by `input_skips`.
293
+ Returns:
294
+ y: The output tensor of shape `(..., output_dim)`.
295
+ """
296
+ y = x
297
+ for li, layer in enumerate(self.mlp):
298
+ if li in self._input_skips:
299
+ y = torch.cat((y, z), dim=-1)
300
+ y = layer(y)
301
+ return y
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/nerf_renderer.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional, Tuple
8
+
9
+ import torch
10
+ from pytorch3d.renderer import ImplicitRenderer, ray_bundle_to_ray_points
11
+ from pytorch3d.renderer.cameras import CamerasBase
12
+ from pytorch3d.structures import Pointclouds
13
+ from pytorch3d.vis.plotly_vis import plot_scene
14
+ from visdom import Visdom
15
+
16
+ from .implicit_function import NeuralRadianceField
17
+ from .raymarcher import EmissionAbsorptionNeRFRaymarcher
18
+ from .raysampler import NeRFRaysampler, ProbabilisticRaysampler
19
+ from .utils import calc_mse, calc_psnr, sample_images_at_mc_locs
20
+
21
+
22
+ class RadianceFieldRenderer(torch.nn.Module):
23
+ """
24
+ Implements a renderer of a Neural Radiance Field.
25
+
26
+ This class holds pointers to the fine and coarse renderer objects, which are
27
+ instances of `pytorch3d.renderer.ImplicitRenderer`, and pointers to the
28
+ neural networks representing the fine and coarse Neural Radiance Fields,
29
+ which are instances of `NeuralRadianceField`.
30
+
31
+ The rendering forward pass proceeds as follows:
32
+ 1) For a given input camera, rendering rays are generated with the
33
+ `NeRFRaysampler` object of `self._renderer['coarse']`.
34
+ In the training mode (`self.training==True`), the rays are a set
35
+ of `n_rays_per_image` random 2D locations of the image grid.
36
+ In the evaluation mode (`self.training==False`), the rays correspond
37
+ to the full image grid. The rays are further split to
38
+ `chunk_size_test`-sized chunks to prevent out-of-memory errors.
39
+ 2) For each ray point, the coarse `NeuralRadianceField` MLP is evaluated.
40
+ The pointer to this MLP is stored in `self._implicit_function['coarse']`
41
+ 3) The coarse radiance field is rendered with the
42
+ `EmissionAbsorptionNeRFRaymarcher` object of `self._renderer['coarse']`.
43
+ 4) The coarse raymarcher outputs a probability distribution that guides
44
+ the importance raysampling of the fine rendering pass. The
45
+ `ProbabilisticRaysampler` stored in `self._renderer['fine'].raysampler`
46
+ implements the importance ray-sampling.
47
+ 5) Similar to 2) the fine MLP in `self._implicit_function['fine']`
48
+ labels the ray points with occupancies and colors.
49
+ 6) self._renderer['fine'].raymarcher` generates the final fine render.
50
+ 7) The fine and coarse renders are compared to the ground truth input image
51
+ with PSNR and MSE metrics.
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ image_size: Tuple[int, int],
57
+ n_pts_per_ray: int,
58
+ n_pts_per_ray_fine: int,
59
+ n_rays_per_image: int,
60
+ min_depth: float,
61
+ max_depth: float,
62
+ stratified: bool,
63
+ stratified_test: bool,
64
+ chunk_size_test: int,
65
+ n_harmonic_functions_xyz: int = 6,
66
+ n_harmonic_functions_dir: int = 4,
67
+ n_hidden_neurons_xyz: int = 256,
68
+ n_hidden_neurons_dir: int = 128,
69
+ n_layers_xyz: int = 8,
70
+ append_xyz: Tuple[int, ...] = (5,),
71
+ density_noise_std: float = 0.0,
72
+ visualization: bool = False,
73
+ ):
74
+ """
75
+ Args:
76
+ image_size: The size of the rendered image (`[height, width]`).
77
+ n_pts_per_ray: The number of points sampled along each ray for the
78
+ coarse rendering pass.
79
+ n_pts_per_ray_fine: The number of points sampled along each ray for the
80
+ fine rendering pass.
81
+ n_rays_per_image: Number of Monte Carlo ray samples when training
82
+ (`self.training==True`).
83
+ min_depth: The minimum depth of a sampled ray-point for the coarse rendering.
84
+ max_depth: The maximum depth of a sampled ray-point for the coarse rendering.
85
+ stratified: If `True`, stratifies (=randomly offsets) the depths
86
+ of each ray point during training (`self.training==True`).
87
+ stratified_test: If `True`, stratifies (=randomly offsets) the depths
88
+ of each ray point during evaluation (`self.training==False`).
89
+ chunk_size_test: The number of rays in each chunk of image rays.
90
+ Active only when `self.training==True`.
91
+ n_harmonic_functions_xyz: The number of harmonic functions
92
+ used to form the harmonic embedding of 3D point locations.
93
+ n_harmonic_functions_dir: The number of harmonic functions
94
+ used to form the harmonic embedding of the ray directions.
95
+ n_hidden_neurons_xyz: The number of hidden units in the
96
+ fully connected layers of the MLP that accepts the 3D point
97
+ locations and outputs the occupancy field with the intermediate
98
+ features.
99
+ n_hidden_neurons_dir: The number of hidden units in the
100
+ fully connected layers of the MLP that accepts the intermediate
101
+ features and ray directions and outputs the radiance field
102
+ (per-point colors).
103
+ n_layers_xyz: The number of layers of the MLP that outputs the
104
+ occupancy field.
105
+ append_xyz: The list of indices of the skip layers of the occupancy MLP.
106
+ Prior to evaluating the skip layers, the tensor which was input to MLP
107
+ is appended to the skip layer input.
108
+ density_noise_std: The standard deviation of the random normal noise
109
+ added to the output of the occupancy MLP.
110
+ Active only when `self.training==True`.
111
+ visualization: whether to store extra output for visualization.
112
+ """
113
+
114
+ super().__init__()
115
+
116
+ # The renderers and implicit functions are stored under the fine/coarse
117
+ # keys in ModuleDict PyTorch modules.
118
+ self._renderer = torch.nn.ModuleDict()
119
+ self._implicit_function = torch.nn.ModuleDict()
120
+
121
+ # Init the EA raymarcher used by both passes.
122
+ raymarcher = EmissionAbsorptionNeRFRaymarcher()
123
+
124
+ # Parse out image dimensions.
125
+ image_height, image_width = image_size
126
+
127
+ for render_pass in ("coarse", "fine"):
128
+ if render_pass == "coarse":
129
+ # Initialize the coarse raysampler.
130
+ raysampler = NeRFRaysampler(
131
+ n_pts_per_ray=n_pts_per_ray,
132
+ min_depth=min_depth,
133
+ max_depth=max_depth,
134
+ stratified=stratified,
135
+ stratified_test=stratified_test,
136
+ n_rays_per_image=n_rays_per_image,
137
+ image_height=image_height,
138
+ image_width=image_width,
139
+ )
140
+ elif render_pass == "fine":
141
+ # Initialize the fine raysampler.
142
+ raysampler = ProbabilisticRaysampler(
143
+ n_pts_per_ray=n_pts_per_ray_fine,
144
+ stratified=stratified,
145
+ stratified_test=stratified_test,
146
+ )
147
+ else:
148
+ raise ValueError(f"No such rendering pass {render_pass}")
149
+
150
+ # Initialize the fine/coarse renderer.
151
+ self._renderer[render_pass] = ImplicitRenderer(
152
+ raysampler=raysampler,
153
+ raymarcher=raymarcher,
154
+ )
155
+
156
+ # Instantiate the fine/coarse NeuralRadianceField module.
157
+ self._implicit_function[render_pass] = NeuralRadianceField(
158
+ n_harmonic_functions_xyz=n_harmonic_functions_xyz,
159
+ n_harmonic_functions_dir=n_harmonic_functions_dir,
160
+ n_hidden_neurons_xyz=n_hidden_neurons_xyz,
161
+ n_hidden_neurons_dir=n_hidden_neurons_dir,
162
+ n_layers_xyz=n_layers_xyz,
163
+ append_xyz=append_xyz,
164
+ )
165
+
166
+ self._density_noise_std = density_noise_std
167
+ self._chunk_size_test = chunk_size_test
168
+ self._image_size = image_size
169
+ self.visualization = visualization
170
+
171
+ def precache_rays(
172
+ self,
173
+ cache_cameras: List[CamerasBase],
174
+ cache_camera_hashes: List[str],
175
+ ):
176
+ """
177
+ Precaches the rays emitted from the list of cameras `cache_cameras`,
178
+ where each camera is uniquely identified with the corresponding hash
179
+ from `cache_camera_hashes`.
180
+
181
+ The cached rays are moved to cpu and stored in
182
+ `self._renderer['coarse']._ray_cache`.
183
+
184
+ Raises `ValueError` when caching two cameras with the same hash.
185
+
186
+ Args:
187
+ cache_cameras: A list of `N` cameras for which the rays are pre-cached.
188
+ cache_camera_hashes: A list of `N` unique identifiers for each
189
+ camera from `cameras`.
190
+ """
191
+ self._renderer["coarse"].raysampler.precache_rays(
192
+ cache_cameras,
193
+ cache_camera_hashes,
194
+ )
195
+
196
+ def _process_ray_chunk(
197
+ self,
198
+ camera_hash: Optional[str],
199
+ camera: CamerasBase,
200
+ image: torch.Tensor,
201
+ chunk_idx: int,
202
+ ) -> dict:
203
+ """
204
+ Samples and renders a chunk of rays.
205
+
206
+ Args:
207
+ camera_hash: A unique identifier of a pre-cached camera.
208
+ If `None`, the cache is not searched and the sampled rays are
209
+ calculated from scratch.
210
+ camera: A batch of cameras from which the scene is rendered.
211
+ image: A batch of corresponding ground truth images of shape
212
+ ('batch_size', ·, ·, 3).
213
+ chunk_idx: The index of the currently rendered ray chunk.
214
+ Returns:
215
+ out: `dict` containing the outputs of the rendering:
216
+ `rgb_coarse`: The result of the coarse rendering pass.
217
+ `rgb_fine`: The result of the fine rendering pass.
218
+ `rgb_gt`: The corresponding ground-truth RGB values.
219
+ """
220
+ # Initialize the outputs of the coarse rendering to None.
221
+ coarse_ray_bundle = None
222
+ coarse_weights = None
223
+
224
+ # First evaluate the coarse rendering pass, then the fine one.
225
+ for renderer_pass in ("coarse", "fine"):
226
+ (rgb, weights), ray_bundle_out = self._renderer[renderer_pass](
227
+ cameras=camera,
228
+ volumetric_function=self._implicit_function[renderer_pass],
229
+ chunksize=self._chunk_size_test,
230
+ chunk_idx=chunk_idx,
231
+ density_noise_std=(self._density_noise_std if self.training else 0.0),
232
+ input_ray_bundle=coarse_ray_bundle,
233
+ ray_weights=coarse_weights,
234
+ camera_hash=camera_hash,
235
+ )
236
+
237
+ if renderer_pass == "coarse":
238
+ rgb_coarse = rgb
239
+ # Store the weights and the rays of the first rendering pass
240
+ # for the ensuing importance ray-sampling of the fine render.
241
+ coarse_ray_bundle = ray_bundle_out
242
+ coarse_weights = weights
243
+ if image is not None:
244
+ # Sample the ground truth images at the xy locations of the
245
+ # rendering ray pixels.
246
+ rgb_gt = sample_images_at_mc_locs(
247
+ image[..., :3][None],
248
+ ray_bundle_out.xys,
249
+ )
250
+ else:
251
+ rgb_gt = None
252
+
253
+ elif renderer_pass == "fine":
254
+ rgb_fine = rgb
255
+
256
+ else:
257
+ raise ValueError(f"No such rendering pass {renderer_pass}")
258
+
259
+ out = {"rgb_fine": rgb_fine, "rgb_coarse": rgb_coarse, "rgb_gt": rgb_gt}
260
+ if self.visualization:
261
+ # Store the coarse rays/weights only for visualization purposes.
262
+ out["coarse_ray_bundle"] = type(coarse_ray_bundle)(
263
+ *[v.detach().cpu() for k, v in coarse_ray_bundle._asdict().items()]
264
+ )
265
+ out["coarse_weights"] = coarse_weights.detach().cpu()
266
+
267
+ return out
268
+
269
+ def forward(
270
+ self,
271
+ camera_hash: Optional[str],
272
+ camera: CamerasBase,
273
+ image: torch.Tensor,
274
+ ) -> Tuple[dict, dict]:
275
+ """
276
+ Performs the coarse and fine rendering passes of the radiance field
277
+ from the viewpoint of the input `camera`.
278
+ Afterwards, both renders are compared to the input ground truth `image`
279
+ by evaluating the peak signal-to-noise ratio and the mean-squared error.
280
+
281
+ The rendering result depends on the `self.training` flag:
282
+ - In the training mode (`self.training==True`), the function renders
283
+ a random subset of image rays (Monte Carlo rendering).
284
+ - In evaluation mode (`self.training==False`), the function renders
285
+ the full image. In order to prevent out-of-memory errors,
286
+ when `self.training==False`, the rays are sampled and rendered
287
+ in batches of size `chunksize`.
288
+
289
+ Args:
290
+ camera_hash: A unique identifier of a pre-cached camera.
291
+ If `None`, the cache is not searched and the sampled rays are
292
+ calculated from scratch.
293
+ camera: A batch of cameras from which the scene is rendered.
294
+ image: A batch of corresponding ground truth images of shape
295
+ ('batch_size', ·, ·, 3).
296
+ Returns:
297
+ out: `dict` containing the outputs of the rendering:
298
+ `rgb_coarse`: The result of the coarse rendering pass.
299
+ `rgb_fine`: The result of the fine rendering pass.
300
+ `rgb_gt`: The corresponding ground-truth RGB values.
301
+
302
+ The shape of `rgb_coarse`, `rgb_fine`, `rgb_gt` depends on the
303
+ `self.training` flag:
304
+ If `==True`, all 3 tensors are of shape
305
+ `(batch_size, n_rays_per_image, 3)` and contain the result
306
+ of the Monte Carlo training rendering pass.
307
+ If `==False`, all 3 tensors are of shape
308
+ `(batch_size, image_size[0], image_size[1], 3)` and contain
309
+ the result of the full image rendering pass.
310
+ metrics: `dict` containing the error metrics comparing the fine and
311
+ coarse renders to the ground truth:
312
+ `mse_coarse`: Mean-squared error between the coarse render and
313
+ the input `image`
314
+ `mse_fine`: Mean-squared error between the fine render and
315
+ the input `image`
316
+ `psnr_coarse`: Peak signal-to-noise ratio between the coarse render and
317
+ the input `image`
318
+ `psnr_fine`: Peak signal-to-noise ratio between the fine render and
319
+ the input `image`
320
+ """
321
+ if not self.training:
322
+ # Full evaluation pass.
323
+ n_chunks = self._renderer["coarse"].raysampler.get_n_chunks(
324
+ self._chunk_size_test,
325
+ camera.R.shape[0],
326
+ )
327
+ else:
328
+ # MonteCarlo ray sampling.
329
+ n_chunks = 1
330
+
331
+ # Process the chunks of rays.
332
+ chunk_outputs = [
333
+ self._process_ray_chunk(
334
+ camera_hash,
335
+ camera,
336
+ image,
337
+ chunk_idx,
338
+ )
339
+ for chunk_idx in range(n_chunks)
340
+ ]
341
+
342
+ if not self.training:
343
+ # For a full render pass concatenate the output chunks,
344
+ # and reshape to image size.
345
+ out = {
346
+ k: (
347
+ torch.cat(
348
+ [ch_o[k] for ch_o in chunk_outputs],
349
+ dim=1,
350
+ ).view(-1, *self._image_size, 3)
351
+ if chunk_outputs[0][k] is not None
352
+ else None
353
+ )
354
+ for k in ("rgb_fine", "rgb_coarse", "rgb_gt")
355
+ }
356
+ else:
357
+ out = chunk_outputs[0]
358
+
359
+ # Calc the error metrics.
360
+ metrics = {}
361
+ if image is not None:
362
+ for render_pass in ("coarse", "fine"):
363
+ for metric_name, metric_fun in zip(
364
+ ("mse", "psnr"), (calc_mse, calc_psnr)
365
+ ):
366
+ metrics[f"{metric_name}_{render_pass}"] = metric_fun(
367
+ out["rgb_" + render_pass][..., :3],
368
+ out["rgb_gt"][..., :3],
369
+ )
370
+
371
+ return out, metrics
372
+
373
+
374
+ def visualize_nerf_outputs(
375
+ nerf_out: dict, output_cache: List, viz: Visdom, visdom_env: str
376
+ ):
377
+ """
378
+ Visualizes the outputs of the `RadianceFieldRenderer`.
379
+
380
+ Args:
381
+ nerf_out: An output of the validation rendering pass.
382
+ output_cache: A list with outputs of several training render passes.
383
+ viz: A visdom connection object.
384
+ visdom_env: The name of visdom environment for visualization.
385
+ """
386
+
387
+ # Show the training images.
388
+ ims = torch.stack([o["image"] for o in output_cache])
389
+ ims = torch.cat(list(ims), dim=1)
390
+ viz.image(
391
+ ims.permute(2, 0, 1),
392
+ env=visdom_env,
393
+ win="images",
394
+ opts={"title": "train_images"},
395
+ )
396
+
397
+ # Show the coarse and fine renders together with the ground truth images.
398
+ ims_full = torch.cat(
399
+ [
400
+ nerf_out[imvar][0].permute(2, 0, 1).detach().cpu().clamp(0.0, 1.0)
401
+ for imvar in ("rgb_coarse", "rgb_fine", "rgb_gt")
402
+ ],
403
+ dim=2,
404
+ )
405
+ viz.image(
406
+ ims_full,
407
+ env=visdom_env,
408
+ win="images_full",
409
+ opts={"title": "coarse | fine | target"},
410
+ )
411
+
412
+ # Make a 3D plot of training cameras and their emitted rays.
413
+ camera_trace = {
414
+ f"camera_{ci:03d}": o["camera"].cpu() for ci, o in enumerate(output_cache)
415
+ }
416
+ ray_pts_trace = {
417
+ f"ray_pts_{ci:03d}": Pointclouds(
418
+ ray_bundle_to_ray_points(o["coarse_ray_bundle"])
419
+ .detach()
420
+ .cpu()
421
+ .view(1, -1, 3)
422
+ )
423
+ for ci, o in enumerate(output_cache)
424
+ }
425
+ plotly_plot = plot_scene(
426
+ {
427
+ "training_scene": {
428
+ **camera_trace,
429
+ **ray_pts_trace,
430
+ },
431
+ },
432
+ pointcloud_max_points=5000,
433
+ pointcloud_marker_size=1,
434
+ camera_scale=0.3,
435
+ )
436
+ viz.plotlyplot(plotly_plot, env=visdom_env, win="scenes")
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/raymarcher.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from pytorch3d.renderer import EmissionAbsorptionRaymarcher
9
+ from pytorch3d.renderer.implicit.raymarching import (
10
+ _check_density_bounds,
11
+ _check_raymarcher_inputs,
12
+ _shifted_cumprod,
13
+ )
14
+
15
+
16
+ class EmissionAbsorptionNeRFRaymarcher(EmissionAbsorptionRaymarcher):
17
+ """
18
+ This is essentially the `pytorch3d.renderer.EmissionAbsorptionRaymarcher`
19
+ which additionally returns the rendering weights. It also skips returning
20
+ the computation of the alpha-mask which is, in case of NeRF, equal to 1
21
+ everywhere.
22
+
23
+ The weights are later used in the NeRF pipeline to carry out the importance
24
+ ray-sampling for the fine rendering pass.
25
+
26
+ For more details about the EmissionAbsorptionRaymarcher please refer to
27
+ the documentation of `pytorch3d.renderer.EmissionAbsorptionRaymarcher`.
28
+ """
29
+
30
+ def forward(
31
+ self,
32
+ rays_densities: torch.Tensor,
33
+ rays_features: torch.Tensor,
34
+ eps: float = 1e-10,
35
+ **kwargs,
36
+ ) -> torch.Tensor:
37
+ """
38
+ Args:
39
+ rays_densities: Per-ray density values represented with a tensor
40
+ of shape `(..., n_points_per_ray, 1)` whose values range in [0, 1].
41
+ rays_features: Per-ray feature values represented with a tensor
42
+ of shape `(..., n_points_per_ray, feature_dim)`.
43
+ eps: A lower bound added to `rays_densities` before computing
44
+ the absorption function (cumprod of `1-rays_densities` along
45
+ each ray). This prevents the cumprod to yield exact 0
46
+ which would inhibit any gradient-based learning.
47
+
48
+ Returns:
49
+ features: A tensor of shape `(..., feature_dim)` containing
50
+ the rendered features for each ray.
51
+ weights: A tensor of shape `(..., n_points_per_ray)` containing
52
+ the ray-specific emission-absorption distribution.
53
+ Each ray distribution `(..., :)` is a valid probability
54
+ distribution, i.e. it contains non-negative values that integrate
55
+ to 1, such that `weights.sum(dim=-1)==1).all()` yields `True`.
56
+ """
57
+ _check_raymarcher_inputs(
58
+ rays_densities,
59
+ rays_features,
60
+ None,
61
+ z_can_be_none=True,
62
+ features_can_be_none=False,
63
+ density_1d=True,
64
+ )
65
+ _check_density_bounds(rays_densities)
66
+ rays_densities = rays_densities[..., 0]
67
+ absorption = _shifted_cumprod(
68
+ (1.0 + eps) - rays_densities, shift=self.surface_thickness
69
+ )
70
+ weights = rays_densities * absorption
71
+ features = (weights[..., None] * rays_features).sum(dim=-2)
72
+
73
+ return features, weights
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/raysampler.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import List
9
+
10
+ import torch
11
+ from pytorch3d.renderer import MonteCarloRaysampler, NDCMultinomialRaysampler, RayBundle
12
+ from pytorch3d.renderer.cameras import CamerasBase
13
+ from pytorch3d.renderer.implicit.sample_pdf import sample_pdf
14
+
15
+
16
+ class ProbabilisticRaysampler(torch.nn.Module):
17
+ """
18
+ Implements the importance sampling of points along rays.
19
+ The input is a `RayBundle` object with a `ray_weights` tensor
20
+ which specifies the probabilities of sampling a point along each ray.
21
+
22
+ This raysampler is used for the fine rendering pass of NeRF.
23
+ As such, the forward pass accepts the RayBundle output by the
24
+ raysampling of the coarse rendering pass. Hence, it does not
25
+ take cameras as input.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ n_pts_per_ray: int,
31
+ stratified: bool,
32
+ stratified_test: bool,
33
+ add_input_samples: bool = True,
34
+ ):
35
+ """
36
+ Args:
37
+ n_pts_per_ray: The number of points to sample along each ray.
38
+ stratified: If `True`, the input `ray_weights` are assumed to be
39
+ sampled at equidistant intervals.
40
+ stratified_test: Same as `stratified` with the difference that this
41
+ setting is applied when the module is in the `eval` mode
42
+ (`self.training==False`).
43
+ add_input_samples: Concatenates and returns the sampled values
44
+ together with the input samples.
45
+ """
46
+ super().__init__()
47
+ self._n_pts_per_ray = n_pts_per_ray
48
+ self._stratified = stratified
49
+ self._stratified_test = stratified_test
50
+ self._add_input_samples = add_input_samples
51
+
52
+ def forward(
53
+ self,
54
+ input_ray_bundle: RayBundle,
55
+ ray_weights: torch.Tensor,
56
+ **kwargs,
57
+ ) -> RayBundle:
58
+ """
59
+ Args:
60
+ input_ray_bundle: An instance of `RayBundle` specifying the
61
+ source rays for sampling of the probability distribution.
62
+ ray_weights: A tensor of shape
63
+ `(..., input_ray_bundle.legths.shape[-1])` with non-negative
64
+ elements defining the probability distribution to sample
65
+ ray points from.
66
+
67
+ Returns:
68
+ ray_bundle: A new `RayBundle` instance containing the input ray
69
+ points together with `n_pts_per_ray` additional sampled
70
+ points per ray.
71
+ """
72
+
73
+ # Calculate the mid-points between the ray depths.
74
+ z_vals = input_ray_bundle.lengths
75
+ batch_size = z_vals.shape[0]
76
+
77
+ # Carry out the importance sampling.
78
+ with torch.no_grad():
79
+ z_vals_mid = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
80
+ z_samples = sample_pdf(
81
+ z_vals_mid.view(-1, z_vals_mid.shape[-1]),
82
+ ray_weights.view(-1, ray_weights.shape[-1])[..., 1:-1],
83
+ self._n_pts_per_ray,
84
+ det=not (
85
+ (self._stratified and self.training)
86
+ or (self._stratified_test and not self.training)
87
+ ),
88
+ ).view(batch_size, z_vals.shape[1], self._n_pts_per_ray)
89
+
90
+ if self._add_input_samples:
91
+ # Add the new samples to the input ones.
92
+ z_vals = torch.cat((z_vals, z_samples), dim=-1)
93
+ else:
94
+ z_vals = z_samples
95
+ # Resort by depth.
96
+ z_vals, _ = torch.sort(z_vals, dim=-1)
97
+
98
+ return RayBundle(
99
+ origins=input_ray_bundle.origins,
100
+ directions=input_ray_bundle.directions,
101
+ lengths=z_vals,
102
+ xys=input_ray_bundle.xys,
103
+ )
104
+
105
+
106
+ class NeRFRaysampler(torch.nn.Module):
107
+ """
108
+ Implements the raysampler of NeRF.
109
+
110
+ Depending on the `self.training` flag, the raysampler either samples
111
+ a chunk of random rays (`self.training==True`), or returns a subset of rays
112
+ of the full image grid (`self.training==False`).
113
+ The chunking of rays allows for efficient evaluation of the NeRF implicit
114
+ surface function without encountering out-of-GPU-memory errors.
115
+
116
+ Additionally, this raysampler supports pre-caching of the ray bundles
117
+ for a set of input cameras (`self.precache_rays`).
118
+ Pre-caching the rays before training greatly speeds-up the ensuing
119
+ raysampling step of the training NeRF iterations.
120
+ """
121
+
122
+ def __init__(
123
+ self,
124
+ n_pts_per_ray: int,
125
+ min_depth: float,
126
+ max_depth: float,
127
+ n_rays_per_image: int,
128
+ image_width: int,
129
+ image_height: int,
130
+ stratified: bool = False,
131
+ stratified_test: bool = False,
132
+ ):
133
+ """
134
+ Args:
135
+ n_pts_per_ray: The number of points sampled along each ray.
136
+ min_depth: The minimum depth of a ray-point.
137
+ max_depth: The maximum depth of a ray-point.
138
+ n_rays_per_image: Number of Monte Carlo ray samples when training
139
+ (`self.training==True`).
140
+ image_width: The horizontal size of the image grid.
141
+ image_height: The vertical size of the image grid.
142
+ stratified: If `True`, stratifies (=randomly offsets) the depths
143
+ of each ray point during training (`self.training==True`).
144
+ stratified_test: If `True`, stratifies (=randomly offsets) the depths
145
+ of each ray point during evaluation (`self.training==False`).
146
+ """
147
+
148
+ super().__init__()
149
+ self._stratified = stratified
150
+ self._stratified_test = stratified_test
151
+
152
+ # Initialize the grid ray sampler.
153
+ self._grid_raysampler = NDCMultinomialRaysampler(
154
+ image_width=image_width,
155
+ image_height=image_height,
156
+ n_pts_per_ray=n_pts_per_ray,
157
+ min_depth=min_depth,
158
+ max_depth=max_depth,
159
+ )
160
+
161
+ # Initialize the Monte Carlo ray sampler.
162
+ self._mc_raysampler = MonteCarloRaysampler(
163
+ min_x=-1.0,
164
+ max_x=1.0,
165
+ min_y=-1.0,
166
+ max_y=1.0,
167
+ n_rays_per_image=n_rays_per_image,
168
+ n_pts_per_ray=n_pts_per_ray,
169
+ min_depth=min_depth,
170
+ max_depth=max_depth,
171
+ )
172
+
173
+ # create empty ray cache
174
+ self._ray_cache = {}
175
+
176
+ def get_n_chunks(self, chunksize: int, batch_size: int):
177
+ """
178
+ Returns the total number of `chunksize`-sized chunks
179
+ of the raysampler's rays.
180
+
181
+ Args:
182
+ chunksize: The number of rays per chunk.
183
+ batch_size: The size of the batch of the raysampler.
184
+
185
+ Returns:
186
+ n_chunks: The total number of chunks.
187
+ """
188
+ return int(
189
+ math.ceil(
190
+ (self._grid_raysampler._xy_grid.numel() * 0.5 * batch_size) / chunksize
191
+ )
192
+ )
193
+
194
+ def _print_precaching_progress(self, i, total, bar_len=30):
195
+ """
196
+ Print a progress bar for ray precaching.
197
+ """
198
+ position = round((i + 1) / total * bar_len)
199
+ pbar = "[" + "█" * position + " " * (bar_len - position) + "]"
200
+ print(pbar, end="\r")
201
+
202
+ def precache_rays(self, cameras: List[CamerasBase], camera_hashes: List):
203
+ """
204
+ Precaches the rays emitted from the list of cameras `cameras`,
205
+ where each camera is uniquely identified with the corresponding hash
206
+ from `camera_hashes`.
207
+
208
+ The cached rays are moved to cpu and stored in `self._ray_cache`.
209
+ Raises `ValueError` when caching two cameras with the same hash.
210
+
211
+ Args:
212
+ cameras: A list of `N` cameras for which the rays are pre-cached.
213
+ camera_hashes: A list of `N` unique identifiers of each
214
+ camera from `cameras`.
215
+ """
216
+ print(f"Precaching {len(cameras)} ray bundles ...")
217
+ full_chunksize = (
218
+ self._grid_raysampler._xy_grid.numel()
219
+ // 2
220
+ * self._grid_raysampler._n_pts_per_ray
221
+ )
222
+ if self.get_n_chunks(full_chunksize, 1) != 1:
223
+ raise ValueError("There has to be one chunk for precaching rays!")
224
+ for camera_i, (camera, camera_hash) in enumerate(zip(cameras, camera_hashes)):
225
+ ray_bundle = self.forward(
226
+ camera,
227
+ caching=True,
228
+ chunksize=full_chunksize,
229
+ )
230
+ if camera_hash in self._ray_cache:
231
+ raise ValueError("There are redundant cameras!")
232
+ self._ray_cache[camera_hash] = RayBundle(
233
+ *[v.to("cpu").detach() for v in ray_bundle]
234
+ )
235
+ self._print_precaching_progress(camera_i, len(cameras))
236
+ print("")
237
+
238
+ def _stratify_ray_bundle(self, ray_bundle: RayBundle):
239
+ """
240
+ Stratifies the lengths of the input `ray_bundle`.
241
+
242
+ More specifically, the stratification replaces each ray points' depth `z`
243
+ with a sample from a uniform random distribution on
244
+ `[z - delta_depth, z+delta_depth]`, where `delta_depth` is the difference
245
+ of depths of the consecutive ray depth values.
246
+
247
+ Args:
248
+ `ray_bundle`: The input `RayBundle`.
249
+
250
+ Returns:
251
+ `stratified_ray_bundle`: `ray_bundle` whose `lengths` field is replaced
252
+ with the stratified samples.
253
+ """
254
+ z_vals = ray_bundle.lengths
255
+ # Get intervals between samples.
256
+ mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
257
+ upper = torch.cat((mids, z_vals[..., -1:]), dim=-1)
258
+ lower = torch.cat((z_vals[..., :1], mids), dim=-1)
259
+ # Stratified samples in those intervals.
260
+ z_vals = lower + (upper - lower) * torch.rand_like(lower)
261
+ return ray_bundle._replace(lengths=z_vals)
262
+
263
+ def _normalize_raybundle(self, ray_bundle: RayBundle):
264
+ """
265
+ Normalizes the ray directions of the input `RayBundle` to unit norm.
266
+ """
267
+ ray_bundle = ray_bundle._replace(
268
+ directions=torch.nn.functional.normalize(ray_bundle.directions, dim=-1)
269
+ )
270
+ return ray_bundle
271
+
272
+ def forward(
273
+ self,
274
+ cameras: CamerasBase,
275
+ chunksize: int = None,
276
+ chunk_idx: int = 0,
277
+ camera_hash: str = None,
278
+ caching: bool = False,
279
+ **kwargs,
280
+ ) -> RayBundle:
281
+ """
282
+ Args:
283
+ cameras: A batch of `batch_size` cameras from which the rays are emitted.
284
+ chunksize: The number of rays per chunk.
285
+ Active only when `self.training==False`.
286
+ chunk_idx: The index of the ray chunk. The number has to be in
287
+ `[0, self.get_n_chunks(chunksize, batch_size)-1]`.
288
+ Active only when `self.training==False`.
289
+ camera_hash: A unique identifier of a pre-cached camera. If `None`,
290
+ the cache is not searched and the rays are calculated from scratch.
291
+ caching: If `True`, activates the caching mode that returns the `RayBundle`
292
+ that should be stored into the cache.
293
+ Returns:
294
+ A named tuple `RayBundle` with the following fields:
295
+ origins: A tensor of shape
296
+ `(batch_size, n_rays_per_image, 3)`
297
+ denoting the locations of ray origins in the world coordinates.
298
+ directions: A tensor of shape
299
+ `(batch_size, n_rays_per_image, 3)`
300
+ denoting the directions of each ray in the world coordinates.
301
+ lengths: A tensor of shape
302
+ `(batch_size, n_rays_per_image, n_pts_per_ray)`
303
+ containing the z-coordinate (=depth) of each ray in world units.
304
+ xys: A tensor of shape
305
+ `(batch_size, n_rays_per_image, 2)`
306
+ containing the 2D image coordinates of each ray.
307
+ """
308
+
309
+ batch_size = cameras.R.shape[0] # pyre-ignore
310
+ device = cameras.device
311
+
312
+ if (camera_hash is None) and (not caching) and self.training:
313
+ # Sample random rays from scratch.
314
+ ray_bundle = self._mc_raysampler(cameras)
315
+ ray_bundle = self._normalize_raybundle(ray_bundle)
316
+ else:
317
+ if camera_hash is not None:
318
+ # The case where we retrieve a camera from cache.
319
+ if batch_size != 1:
320
+ raise NotImplementedError(
321
+ "Ray caching works only for batches with a single camera!"
322
+ )
323
+ full_ray_bundle = self._ray_cache[camera_hash]
324
+ else:
325
+ # We generate a full ray grid from scratch.
326
+ full_ray_bundle = self._grid_raysampler(cameras)
327
+ full_ray_bundle = self._normalize_raybundle(full_ray_bundle)
328
+
329
+ n_pixels = full_ray_bundle.directions.shape[:-1].numel()
330
+
331
+ if self.training:
332
+ # During training we randomly subsample rays.
333
+ sel_rays = torch.randperm(
334
+ n_pixels, device=full_ray_bundle.lengths.device
335
+ )[: self._mc_raysampler._n_rays_per_image]
336
+ else:
337
+ # In case we test, we take only the requested chunk.
338
+ if chunksize is None:
339
+ chunksize = n_pixels * batch_size
340
+ start = chunk_idx * chunksize * batch_size
341
+ end = min(start + chunksize, n_pixels)
342
+ sel_rays = torch.arange(
343
+ start,
344
+ end,
345
+ dtype=torch.long,
346
+ device=full_ray_bundle.lengths.device,
347
+ )
348
+
349
+ # Take the "sel_rays" rays from the full ray bundle.
350
+ ray_bundle = RayBundle(
351
+ *[
352
+ v.view(n_pixels, -1)[sel_rays]
353
+ .view(batch_size, sel_rays.numel() // batch_size, -1)
354
+ .to(device)
355
+ for v in full_ray_bundle
356
+ ]
357
+ )
358
+
359
+ if (
360
+ (self._stratified and self.training)
361
+ or (self._stratified_test and not self.training)
362
+ ) and not caching: # Make sure not to stratify when caching!
363
+ ray_bundle = self._stratify_ray_bundle(ray_bundle)
364
+
365
+ return ray_bundle
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/stats.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import time
8
+ import warnings
9
+ from itertools import cycle
10
+ from typing import List, Optional
11
+
12
+ import matplotlib
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np
15
+ from matplotlib import colors as mcolors
16
+ from visdom import Visdom
17
+
18
+
19
+ class AverageMeter:
20
+ """
21
+ Computes and stores the average and current value.
22
+ Tracks the exact history of the added values in every epoch.
23
+ """
24
+
25
+ def __init__(self) -> None:
26
+ """
27
+ Initialize the structure with empty history and zero-ed moving average.
28
+ """
29
+ self.history = []
30
+ self.reset()
31
+
32
+ def reset(self) -> None:
33
+ """
34
+ Reset the running average meter.
35
+ """
36
+ self.val = 0
37
+ self.avg = 0
38
+ self.sum = 0
39
+ self.count = 0
40
+
41
+ def update(self, val: float, n: int = 1, epoch: int = 0) -> None:
42
+ """
43
+ Updates the average meter with a value `val`.
44
+
45
+ Args:
46
+ val: A float to be added to the meter.
47
+ n: Represents the number of entities to be added.
48
+ epoch: The epoch to which the number should be added.
49
+ """
50
+ # make sure the history is of the same len as epoch
51
+ while len(self.history) <= epoch:
52
+ self.history.append([])
53
+ self.history[epoch].append(val / n)
54
+ self.val = val
55
+ self.sum += val * n
56
+ self.count += n
57
+ self.avg = self.sum / self.count
58
+
59
+ def get_epoch_averages(self):
60
+ """
61
+ Returns:
62
+ averages: A list of average values of the metric for each epoch
63
+ in the history buffer.
64
+ """
65
+ if len(self.history) == 0:
66
+ return None
67
+ return [
68
+ (float(np.array(h).mean()) if len(h) > 0 else float("NaN"))
69
+ for h in self.history
70
+ ]
71
+
72
+
73
+ class Stats:
74
+ """
75
+ Stats logging object useful for gathering statistics of training
76
+ a deep network in PyTorch.
77
+
78
+ Example:
79
+ ```
80
+ # Init stats structure that logs statistics 'objective' and 'top1e'.
81
+ stats = Stats( ('objective','top1e') )
82
+
83
+ network = init_net() # init a pytorch module (=neural network)
84
+ dataloader = init_dataloader() # init a dataloader
85
+
86
+ for epoch in range(10):
87
+
88
+ # start of epoch -> call new_epoch
89
+ stats.new_epoch()
90
+
91
+ # Iterate over batches.
92
+ for batch in dataloader:
93
+ # Run a model and save into a dict of output variables "output"
94
+ output = network(batch)
95
+
96
+ # stats.update() automatically parses the 'objective' and 'top1e'
97
+ # from the "output" dict and stores this into the db.
98
+ stats.update(output)
99
+ stats.print() # prints the averages over given epoch
100
+
101
+ # Stores the training plots into '/tmp/epoch_stats.pdf'
102
+ # and plots into a visdom server running at localhost (if running).
103
+ stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
104
+ ```
105
+ """
106
+
107
+ def __init__(
108
+ self,
109
+ log_vars: List[str],
110
+ verbose: bool = False,
111
+ epoch: int = -1,
112
+ plot_file: Optional[str] = None,
113
+ ) -> None:
114
+ """
115
+ Args:
116
+ log_vars: The list of variable names to be logged.
117
+ verbose: Print status messages.
118
+ epoch: The initial epoch of the object.
119
+ plot_file: The path to the file that will hold the training plots.
120
+ """
121
+ self.verbose = verbose
122
+ self.log_vars = log_vars
123
+ self.plot_file = plot_file
124
+ self.hard_reset(epoch=epoch)
125
+
126
+ def reset(self) -> None:
127
+ """
128
+ Called before an epoch to clear current epoch buffers.
129
+ """
130
+ stat_sets = list(self.stats.keys())
131
+ if self.verbose:
132
+ print("stats: epoch %d - reset" % self.epoch)
133
+ self.it = {k: -1 for k in stat_sets}
134
+ for stat_set in stat_sets:
135
+ for stat in self.stats[stat_set]:
136
+ self.stats[stat_set][stat].reset()
137
+
138
+ # Set a new timestamp.
139
+ self._epoch_start = time.time()
140
+
141
+ def hard_reset(self, epoch: int = -1) -> None:
142
+ """
143
+ Erases all logged data.
144
+ """
145
+ self._epoch_start = None
146
+ self.epoch = epoch
147
+ if self.verbose:
148
+ print("stats: epoch %d - hard reset" % self.epoch)
149
+ self.stats = {}
150
+ self.reset()
151
+
152
+ def new_epoch(self) -> None:
153
+ """
154
+ Initializes a new epoch.
155
+ """
156
+ if self.verbose:
157
+ print("stats: new epoch %d" % (self.epoch + 1))
158
+ self.epoch += 1 # increase epoch counter
159
+ self.reset() # zero the stats
160
+
161
+ def _gather_value(self, val):
162
+ if isinstance(val, float):
163
+ pass
164
+ else:
165
+ val = val.data.cpu().numpy()
166
+ val = float(val.sum())
167
+ return val
168
+
169
+ def update(self, preds: dict, stat_set: str = "train") -> None:
170
+ """
171
+ Update the internal logs with metrics of a training step.
172
+
173
+ Each metric is stored as an instance of an AverageMeter.
174
+
175
+ Args:
176
+ preds: Dict of values to be added to the logs.
177
+ stat_set: The set of statistics to be updated (e.g. "train", "val").
178
+ """
179
+
180
+ if self.epoch == -1: # uninitialized
181
+ warnings.warn(
182
+ "self.epoch==-1 means uninitialized stats structure"
183
+ " -> new_epoch() called"
184
+ )
185
+ self.new_epoch()
186
+
187
+ if stat_set not in self.stats:
188
+ self.stats[stat_set] = {}
189
+ self.it[stat_set] = -1
190
+
191
+ self.it[stat_set] += 1
192
+
193
+ epoch = self.epoch
194
+ it = self.it[stat_set]
195
+
196
+ for stat in self.log_vars:
197
+
198
+ if stat not in self.stats[stat_set]:
199
+ self.stats[stat_set][stat] = AverageMeter()
200
+
201
+ if stat == "sec/it": # compute speed
202
+ elapsed = time.time() - self._epoch_start
203
+ time_per_it = float(elapsed) / float(it + 1)
204
+ val = time_per_it
205
+ else:
206
+ if stat in preds:
207
+ val = self._gather_value(preds[stat])
208
+ else:
209
+ val = None
210
+
211
+ if val is not None:
212
+ self.stats[stat_set][stat].update(val, epoch=epoch, n=1)
213
+
214
+ def print(self, max_it: Optional[int] = None, stat_set: str = "train") -> None:
215
+ """
216
+ Print the current values of all stored stats.
217
+
218
+ Args:
219
+ max_it: Maximum iteration number to be displayed.
220
+ If None, the maximum iteration number is not displayed.
221
+ stat_set: The set of statistics to be printed.
222
+ """
223
+
224
+ epoch = self.epoch
225
+ stats = self.stats
226
+
227
+ str_out = ""
228
+
229
+ it = self.it[stat_set]
230
+ stat_str = ""
231
+ stats_print = sorted(stats[stat_set].keys())
232
+ for stat in stats_print:
233
+ if stats[stat_set][stat].count == 0:
234
+ continue
235
+ stat_str += " {0:.12}: {1:1.3f} |".format(stat, stats[stat_set][stat].avg)
236
+
237
+ head_str = f"[{stat_set}] | epoch {epoch} | it {it}"
238
+ if max_it:
239
+ head_str += f"/ {max_it}"
240
+
241
+ str_out = f"{head_str} | {stat_str}"
242
+
243
+ print(str_out)
244
+
245
+ def plot_stats(
246
+ self,
247
+ viz: Visdom = None,
248
+ visdom_env: Optional[str] = None,
249
+ plot_file: Optional[str] = None,
250
+ ) -> None:
251
+ """
252
+ Plot the line charts of the history of the stats.
253
+
254
+ Args:
255
+ viz: The Visdom object holding the connection to a Visdom server.
256
+ visdom_env: The visdom environment for storing the graphs.
257
+ plot_file: The path to a file with training plots.
258
+ """
259
+
260
+ stat_sets = list(self.stats.keys())
261
+
262
+ if viz is None:
263
+ withvisdom = False
264
+ elif not viz.check_connection():
265
+ warnings.warn("Cannot connect to the visdom server! Skipping visdom plots.")
266
+ withvisdom = False
267
+ else:
268
+ withvisdom = True
269
+
270
+ lines = []
271
+
272
+ for stat in self.log_vars:
273
+ vals = []
274
+ stat_sets_now = []
275
+ for stat_set in stat_sets:
276
+ val = self.stats[stat_set][stat].get_epoch_averages()
277
+ if val is None:
278
+ continue
279
+ else:
280
+ val = np.array(val).reshape(-1)
281
+ stat_sets_now.append(stat_set)
282
+ vals.append(val)
283
+
284
+ if len(vals) == 0:
285
+ continue
286
+
287
+ vals = np.stack(vals, axis=1)
288
+ x = np.arange(vals.shape[0])
289
+
290
+ lines.append((stat_sets_now, stat, x, vals))
291
+
292
+ if withvisdom:
293
+ for tmodes, stat, x, vals in lines:
294
+ title = "%s" % stat
295
+ opts = {"title": title, "legend": list(tmodes)}
296
+ for i, (tmode, val) in enumerate(zip(tmodes, vals.T)):
297
+ update = "append" if i > 0 else None
298
+ valid = np.where(np.isfinite(val))
299
+ if len(valid) == 0:
300
+ continue
301
+ viz.line(
302
+ Y=val[valid],
303
+ X=x[valid],
304
+ env=visdom_env,
305
+ opts=opts,
306
+ win=f"stat_plot_{title}",
307
+ name=tmode,
308
+ update=update,
309
+ )
310
+
311
+ if plot_file is None:
312
+ plot_file = self.plot_file
313
+
314
+ if plot_file is not None:
315
+ print("Exporting stats to %s" % plot_file)
316
+ ncol = 3
317
+ nrow = int(np.ceil(float(len(lines)) / ncol))
318
+ matplotlib.rcParams.update({"font.size": 5})
319
+ color = cycle(plt.cm.tab10(np.linspace(0, 1, 10)))
320
+ fig = plt.figure(1)
321
+ plt.clf()
322
+ for idx, (tmodes, stat, x, vals) in enumerate(lines):
323
+ c = next(color)
324
+ plt.subplot(nrow, ncol, idx + 1)
325
+ for vali, vals_ in enumerate(vals.T):
326
+ c_ = c * (1.0 - float(vali) * 0.3)
327
+ valid = np.where(np.isfinite(vals_))
328
+ if len(valid) == 0:
329
+ continue
330
+ plt.plot(x[valid], vals_[valid], c=c_, linewidth=1)
331
+ plt.ylabel(stat)
332
+ plt.xlabel("epoch")
333
+ plt.gca().yaxis.label.set_color(c[0:3] * 0.75)
334
+ plt.legend(tmodes)
335
+ gcolor = np.array(mcolors.to_rgba("lightgray"))
336
+ plt.grid(
337
+ b=True, which="major", color=gcolor, linestyle="-", linewidth=0.4
338
+ )
339
+ plt.grid(
340
+ b=True, which="minor", color=gcolor, linestyle="--", linewidth=0.2
341
+ )
342
+ plt.minorticks_on()
343
+
344
+ plt.tight_layout()
345
+ plt.show()
346
+ fig.savefig(plot_file)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/nerf/utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+
9
+
10
+ def calc_mse(x: torch.Tensor, y: torch.Tensor):
11
+ """
12
+ Calculates the mean square error between tensors `x` and `y`.
13
+ """
14
+ return torch.mean((x - y) ** 2)
15
+
16
+
17
+ def calc_psnr(x: torch.Tensor, y: torch.Tensor):
18
+ """
19
+ Calculates the Peak-signal-to-noise ratio between tensors `x` and `y`.
20
+ """
21
+ mse = calc_mse(x, y)
22
+ psnr = -10.0 * torch.log10(mse)
23
+ return psnr
24
+
25
+
26
+ def sample_images_at_mc_locs(
27
+ target_images: torch.Tensor,
28
+ sampled_rays_xy: torch.Tensor,
29
+ ):
30
+ """
31
+ Given a set of pixel locations `sampled_rays_xy` this method samples the tensor
32
+ `target_images` at the respective 2D locations.
33
+
34
+ This function is used in order to extract the colors from ground truth images
35
+ that correspond to the colors rendered using a Monte Carlo rendering.
36
+
37
+ Args:
38
+ target_images: A tensor of shape `(batch_size, ..., 3)`.
39
+ sampled_rays_xy: A tensor of shape `(batch_size, S_1, ..., S_N, 2)`.
40
+
41
+ Returns:
42
+ images_sampled: A tensor of shape `(batch_size, S_1, ..., S_N, 3)`
43
+ containing `target_images` sampled at `sampled_rays_xy`.
44
+ """
45
+ ba = target_images.shape[0]
46
+ dim = target_images.shape[-1]
47
+ spatial_size = sampled_rays_xy.shape[1:-1]
48
+
49
+ # The coordinate grid convention for grid_sample has both x and y
50
+ # directions inverted.
51
+ xy_sample = -sampled_rays_xy.view(ba, -1, 1, 2).clone()
52
+
53
+ images_sampled = torch.nn.functional.grid_sample(
54
+ target_images.permute(0, 3, 1, 2),
55
+ xy_sample,
56
+ align_corners=True,
57
+ mode="bilinear",
58
+ )
59
+ return images_sampled.permute(0, 2, 3, 1).view(ba, *spatial_size, dim)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/test_nerf.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import os
9
+ import warnings
10
+
11
+ import hydra
12
+ import numpy as np
13
+ import torch
14
+ from nerf.dataset import get_nerf_datasets, trivial_collate
15
+ from nerf.eval_video_utils import generate_eval_video_cameras
16
+ from nerf.nerf_renderer import RadianceFieldRenderer
17
+ from nerf.stats import Stats
18
+ from omegaconf import DictConfig
19
+ from PIL import Image
20
+
21
+
22
+ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
23
+
24
+
25
+ @hydra.main(config_path=CONFIG_DIR, config_name="lego")
26
+ def main(cfg: DictConfig):
27
+
28
+ # Device on which to run.
29
+ if torch.cuda.is_available():
30
+ device = "cuda"
31
+ else:
32
+ warnings.warn(
33
+ "Please note that although executing on CPU is supported,"
34
+ + "the testing is unlikely to finish in reasonable time."
35
+ )
36
+ device = "cpu"
37
+
38
+ # Initialize the Radiance Field model.
39
+ model = RadianceFieldRenderer(
40
+ image_size=cfg.data.image_size,
41
+ n_pts_per_ray=cfg.raysampler.n_pts_per_ray,
42
+ n_pts_per_ray_fine=cfg.raysampler.n_pts_per_ray,
43
+ n_rays_per_image=cfg.raysampler.n_rays_per_image,
44
+ min_depth=cfg.raysampler.min_depth,
45
+ max_depth=cfg.raysampler.max_depth,
46
+ stratified=cfg.raysampler.stratified,
47
+ stratified_test=cfg.raysampler.stratified_test,
48
+ chunk_size_test=cfg.raysampler.chunk_size_test,
49
+ n_harmonic_functions_xyz=cfg.implicit_function.n_harmonic_functions_xyz,
50
+ n_harmonic_functions_dir=cfg.implicit_function.n_harmonic_functions_dir,
51
+ n_hidden_neurons_xyz=cfg.implicit_function.n_hidden_neurons_xyz,
52
+ n_hidden_neurons_dir=cfg.implicit_function.n_hidden_neurons_dir,
53
+ n_layers_xyz=cfg.implicit_function.n_layers_xyz,
54
+ density_noise_std=cfg.implicit_function.density_noise_std,
55
+ )
56
+
57
+ # Move the model to the relevant device.
58
+ model.to(device)
59
+
60
+ # Resume from the checkpoint.
61
+ checkpoint_path = os.path.join(hydra.utils.get_original_cwd(), cfg.checkpoint_path)
62
+ if not os.path.isfile(checkpoint_path):
63
+ raise ValueError(f"Model checkpoint {checkpoint_path} does not exist!")
64
+
65
+ print(f"Loading checkpoint {checkpoint_path}.")
66
+ loaded_data = torch.load(checkpoint_path)
67
+ # Do not load the cached xy grid.
68
+ # - this allows setting an arbitrary evaluation image size.
69
+ state_dict = {
70
+ k: v
71
+ for k, v in loaded_data["model"].items()
72
+ if "_grid_raysampler._xy_grid" not in k
73
+ }
74
+ model.load_state_dict(state_dict, strict=False)
75
+
76
+ # Load the test data.
77
+ if cfg.test.mode == "evaluation":
78
+ _, _, test_dataset = get_nerf_datasets(
79
+ dataset_name=cfg.data.dataset_name,
80
+ image_size=cfg.data.image_size,
81
+ )
82
+ elif cfg.test.mode == "export_video":
83
+ train_dataset, _, _ = get_nerf_datasets(
84
+ dataset_name=cfg.data.dataset_name,
85
+ image_size=cfg.data.image_size,
86
+ )
87
+ test_dataset = generate_eval_video_cameras(
88
+ train_dataset,
89
+ trajectory_type=cfg.test.trajectory_type,
90
+ up=cfg.test.up,
91
+ scene_center=cfg.test.scene_center,
92
+ n_eval_cams=cfg.test.n_frames,
93
+ trajectory_scale=cfg.test.trajectory_scale,
94
+ )
95
+ # store the video in directory (checkpoint_file - extension + '_video')
96
+ export_dir = os.path.splitext(checkpoint_path)[0] + "_video"
97
+ os.makedirs(export_dir, exist_ok=True)
98
+ else:
99
+ raise ValueError(f"Unknown test mode {cfg.test_mode}.")
100
+
101
+ # Init the test dataloader.
102
+ test_dataloader = torch.utils.data.DataLoader(
103
+ test_dataset,
104
+ batch_size=1,
105
+ shuffle=False,
106
+ num_workers=0,
107
+ collate_fn=trivial_collate,
108
+ )
109
+
110
+ if cfg.test.mode == "evaluation":
111
+ # Init the test stats object.
112
+ eval_stats = ["mse_coarse", "mse_fine", "psnr_coarse", "psnr_fine", "sec/it"]
113
+ stats = Stats(eval_stats)
114
+ stats.new_epoch()
115
+ elif cfg.test.mode == "export_video":
116
+ # Init the frame buffer.
117
+ frame_paths = []
118
+
119
+ # Set the model to the eval mode.
120
+ model.eval()
121
+
122
+ # Run the main testing loop.
123
+ for batch_idx, test_batch in enumerate(test_dataloader):
124
+ test_image, test_camera, camera_idx = test_batch[0].values()
125
+ if test_image is not None:
126
+ test_image = test_image.to(device)
127
+ test_camera = test_camera.to(device)
128
+
129
+ # Activate eval mode of the model (lets us do a full rendering pass).
130
+ model.eval()
131
+ with torch.no_grad():
132
+ test_nerf_out, test_metrics = model(
133
+ None, # we do not use pre-cached cameras
134
+ test_camera,
135
+ test_image,
136
+ )
137
+
138
+ if cfg.test.mode == "evaluation":
139
+ # Update stats with the validation metrics.
140
+ stats.update(test_metrics, stat_set="test")
141
+ stats.print(stat_set="test")
142
+
143
+ elif cfg.test.mode == "export_video":
144
+ # Store the video frame.
145
+ frame = test_nerf_out["rgb_fine"][0].detach().cpu()
146
+ frame_path = os.path.join(export_dir, f"frame_{batch_idx:05d}.png")
147
+ print(f"Writing {frame_path}.")
148
+ Image.fromarray((frame.numpy() * 255.0).astype(np.uint8)).save(frame_path)
149
+ frame_paths.append(frame_path)
150
+
151
+ if cfg.test.mode == "evaluation":
152
+ print(f"Final evaluation metrics on '{cfg.data.dataset_name}':")
153
+ for stat in eval_stats:
154
+ stat_value = stats.stats["test"][stat].get_epoch_averages()[0]
155
+ print(f"{stat:15s}: {stat_value:1.4f}")
156
+
157
+ elif cfg.test.mode == "export_video":
158
+ # Convert the exported frames to a video.
159
+ video_path = os.path.join(export_dir, "video.mp4")
160
+ ffmpeg_bin = "ffmpeg"
161
+ frame_regexp = os.path.join(export_dir, "frame_%05d.png")
162
+ ffmcmd = (
163
+ "%s -r %d -i %s -vcodec h264 -f mp4 -y -b 2000k -pix_fmt yuv420p %s"
164
+ % (ffmpeg_bin, cfg.test.fps, frame_regexp, video_path)
165
+ )
166
+ ret = os.system(ffmcmd)
167
+ if ret != 0:
168
+ raise RuntimeError("ffmpeg failed!")
169
+
170
+
171
+ if __name__ == "__main__":
172
+ main()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/test_raymarcher.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import unittest
8
+
9
+ import torch
10
+ from nerf.raymarcher import EmissionAbsorptionNeRFRaymarcher
11
+ from pytorch3d.renderer import EmissionAbsorptionRaymarcher
12
+
13
+
14
+ class TestRaymarcher(unittest.TestCase):
15
+ def setUp(self) -> None:
16
+ torch.manual_seed(42)
17
+
18
+ def test_raymarcher(self):
19
+ """
20
+ Checks that the nerf raymarcher outputs are identical to the
21
+ EmissionAbsorptionRaymarcher.
22
+ """
23
+
24
+ feat_dim = 3
25
+ rays_densities = torch.rand(100, 10, 1)
26
+ rays_features = torch.randn(100, 10, feat_dim)
27
+
28
+ out, out_nerf = [
29
+ raymarcher(rays_densities, rays_features)
30
+ for raymarcher in (
31
+ EmissionAbsorptionRaymarcher(),
32
+ EmissionAbsorptionNeRFRaymarcher(),
33
+ )
34
+ ]
35
+
36
+ self.assertTrue(
37
+ torch.allclose(out[..., :feat_dim], out_nerf[0][..., :feat_dim])
38
+ )
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/tests/test_raysampler.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import unittest
8
+
9
+ import torch
10
+ from nerf.raysampler import NeRFRaysampler, ProbabilisticRaysampler
11
+ from pytorch3d.renderer import PerspectiveCameras
12
+ from pytorch3d.transforms.rotation_conversions import random_rotations
13
+
14
+
15
+ class TestRaysampler(unittest.TestCase):
16
+ def setUp(self) -> None:
17
+ torch.manual_seed(42)
18
+
19
+ def test_raysampler_caching(self, batch_size=10):
20
+ """
21
+ Tests the consistency of the NeRF raysampler caching.
22
+ """
23
+
24
+ raysampler = NeRFRaysampler(
25
+ min_x=0.0,
26
+ max_x=10.0,
27
+ min_y=0.0,
28
+ max_y=10.0,
29
+ n_pts_per_ray=10,
30
+ min_depth=0.1,
31
+ max_depth=10.0,
32
+ n_rays_per_image=12,
33
+ image_width=10,
34
+ image_height=10,
35
+ stratified=False,
36
+ stratified_test=False,
37
+ invert_directions=True,
38
+ )
39
+
40
+ raysampler.eval()
41
+
42
+ cameras, rays = [], []
43
+
44
+ for _ in range(batch_size):
45
+
46
+ R = random_rotations(1)
47
+ T = torch.randn(1, 3)
48
+ focal_length = torch.rand(1, 2) + 0.5
49
+ principal_point = torch.randn(1, 2)
50
+
51
+ camera = PerspectiveCameras(
52
+ focal_length=focal_length,
53
+ principal_point=principal_point,
54
+ R=R,
55
+ T=T,
56
+ )
57
+
58
+ cameras.append(camera)
59
+ rays.append(raysampler(camera))
60
+
61
+ raysampler.precache_rays(cameras, list(range(batch_size)))
62
+
63
+ for cam_index, rays_ in enumerate(rays):
64
+ rays_cached_ = raysampler(
65
+ cameras=cameras[cam_index],
66
+ chunksize=None,
67
+ chunk_idx=0,
68
+ camera_hash=cam_index,
69
+ caching=False,
70
+ )
71
+
72
+ for v, v_cached in zip(rays_, rays_cached_):
73
+ self.assertTrue(torch.allclose(v, v_cached))
74
+
75
+ def test_probabilistic_raysampler(self, batch_size=1, n_pts_per_ray=60):
76
+ """
77
+ Check that the probabilistic ray sampler does not crash for various
78
+ settings.
79
+ """
80
+
81
+ raysampler_grid = NeRFRaysampler(
82
+ min_x=0.0,
83
+ max_x=10.0,
84
+ min_y=0.0,
85
+ max_y=10.0,
86
+ n_pts_per_ray=n_pts_per_ray,
87
+ min_depth=1.0,
88
+ max_depth=10.0,
89
+ n_rays_per_image=12,
90
+ image_width=10,
91
+ image_height=10,
92
+ stratified=False,
93
+ stratified_test=False,
94
+ invert_directions=True,
95
+ )
96
+
97
+ R = random_rotations(batch_size)
98
+ T = torch.randn(batch_size, 3)
99
+ focal_length = torch.rand(batch_size, 2) + 0.5
100
+ principal_point = torch.randn(batch_size, 2)
101
+ camera = PerspectiveCameras(
102
+ focal_length=focal_length,
103
+ principal_point=principal_point,
104
+ R=R,
105
+ T=T,
106
+ )
107
+
108
+ raysampler_grid.eval()
109
+
110
+ ray_bundle = raysampler_grid(cameras=camera)
111
+
112
+ ray_weights = torch.rand_like(ray_bundle.lengths)
113
+
114
+ # Just check that we dont crash for all possible settings.
115
+ for stratified_test in (True, False):
116
+ for stratified in (True, False):
117
+ raysampler_prob = ProbabilisticRaysampler(
118
+ n_pts_per_ray=n_pts_per_ray,
119
+ stratified=stratified,
120
+ stratified_test=stratified_test,
121
+ add_input_samples=True,
122
+ )
123
+ for mode in ("train", "eval"):
124
+ getattr(raysampler_prob, mode)()
125
+ for _ in range(10):
126
+ raysampler_prob(ray_bundle, ray_weights)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/nerf/train_nerf.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ # All rights reserved.
4
+ #
5
+ # This source code is licensed under the BSD-style license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import collections
9
+ import os
10
+ import pickle
11
+ import warnings
12
+
13
+ import hydra
14
+ import numpy as np
15
+ import torch
16
+ from nerf.dataset import get_nerf_datasets, trivial_collate
17
+ from nerf.nerf_renderer import RadianceFieldRenderer, visualize_nerf_outputs
18
+ from nerf.stats import Stats
19
+ from omegaconf import DictConfig
20
+ from visdom import Visdom
21
+
22
+
23
+ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
24
+
25
+
26
+ @hydra.main(config_path=CONFIG_DIR, config_name="lego")
27
+ def main(cfg: DictConfig):
28
+
29
+ # Set the relevant seeds for reproducibility.
30
+ np.random.seed(cfg.seed)
31
+ torch.manual_seed(cfg.seed)
32
+
33
+ # Device on which to run.
34
+ if torch.cuda.is_available():
35
+ device = "cuda"
36
+ else:
37
+ warnings.warn(
38
+ "Please note that although executing on CPU is supported,"
39
+ + "the training is unlikely to finish in reasonable time."
40
+ )
41
+ device = "cpu"
42
+
43
+ # Initialize the Radiance Field model.
44
+ model = RadianceFieldRenderer(
45
+ image_size=cfg.data.image_size,
46
+ n_pts_per_ray=cfg.raysampler.n_pts_per_ray,
47
+ n_pts_per_ray_fine=cfg.raysampler.n_pts_per_ray,
48
+ n_rays_per_image=cfg.raysampler.n_rays_per_image,
49
+ min_depth=cfg.raysampler.min_depth,
50
+ max_depth=cfg.raysampler.max_depth,
51
+ stratified=cfg.raysampler.stratified,
52
+ stratified_test=cfg.raysampler.stratified_test,
53
+ chunk_size_test=cfg.raysampler.chunk_size_test,
54
+ n_harmonic_functions_xyz=cfg.implicit_function.n_harmonic_functions_xyz,
55
+ n_harmonic_functions_dir=cfg.implicit_function.n_harmonic_functions_dir,
56
+ n_hidden_neurons_xyz=cfg.implicit_function.n_hidden_neurons_xyz,
57
+ n_hidden_neurons_dir=cfg.implicit_function.n_hidden_neurons_dir,
58
+ n_layers_xyz=cfg.implicit_function.n_layers_xyz,
59
+ density_noise_std=cfg.implicit_function.density_noise_std,
60
+ visualization=cfg.visualization.visdom,
61
+ )
62
+
63
+ # Move the model to the relevant device.
64
+ model.to(device)
65
+
66
+ # Init stats to None before loading.
67
+ stats = None
68
+ optimizer_state_dict = None
69
+ start_epoch = 0
70
+
71
+ checkpoint_path = os.path.join(hydra.utils.get_original_cwd(), cfg.checkpoint_path)
72
+ if len(cfg.checkpoint_path) > 0:
73
+ # Make the root of the experiment directory.
74
+ checkpoint_dir = os.path.split(checkpoint_path)[0]
75
+ os.makedirs(checkpoint_dir, exist_ok=True)
76
+
77
+ # Resume training if requested.
78
+ if cfg.resume and os.path.isfile(checkpoint_path):
79
+ print(f"Resuming from checkpoint {checkpoint_path}.")
80
+ loaded_data = torch.load(checkpoint_path)
81
+ model.load_state_dict(loaded_data["model"])
82
+ stats = pickle.loads(loaded_data["stats"])
83
+ print(f" => resuming from epoch {stats.epoch}.")
84
+ optimizer_state_dict = loaded_data["optimizer"]
85
+ start_epoch = stats.epoch
86
+
87
+ # Initialize the optimizer.
88
+ optimizer = torch.optim.Adam(
89
+ model.parameters(),
90
+ lr=cfg.optimizer.lr,
91
+ )
92
+
93
+ # Load the optimizer state dict in case we are resuming.
94
+ if optimizer_state_dict is not None:
95
+ optimizer.load_state_dict(optimizer_state_dict)
96
+ optimizer.last_epoch = start_epoch
97
+
98
+ # Init the stats object.
99
+ if stats is None:
100
+ stats = Stats(
101
+ ["loss", "mse_coarse", "mse_fine", "psnr_coarse", "psnr_fine", "sec/it"],
102
+ )
103
+
104
+ # Learning rate scheduler setup.
105
+
106
+ # Following the original code, we use exponential decay of the
107
+ # learning rate: current_lr = base_lr * gamma ** (epoch / step_size)
108
+ def lr_lambda(epoch):
109
+ return cfg.optimizer.lr_scheduler_gamma ** (
110
+ epoch / cfg.optimizer.lr_scheduler_step_size
111
+ )
112
+
113
+ # The learning rate scheduling is implemented with LambdaLR PyTorch scheduler.
114
+ lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
115
+ optimizer, lr_lambda, last_epoch=start_epoch - 1, verbose=False
116
+ )
117
+
118
+ # Initialize the cache for storing variables needed for visualization.
119
+ visuals_cache = collections.deque(maxlen=cfg.visualization.history_size)
120
+
121
+ # Init the visualization visdom env.
122
+ if cfg.visualization.visdom:
123
+ viz = Visdom(
124
+ server=cfg.visualization.visdom_server,
125
+ port=cfg.visualization.visdom_port,
126
+ use_incoming_socket=False,
127
+ )
128
+ else:
129
+ viz = None
130
+
131
+ # Load the training/validation data.
132
+ train_dataset, val_dataset, _ = get_nerf_datasets(
133
+ dataset_name=cfg.data.dataset_name,
134
+ image_size=cfg.data.image_size,
135
+ )
136
+
137
+ if cfg.data.precache_rays:
138
+ # Precache the projection rays.
139
+ model.eval()
140
+ with torch.no_grad():
141
+ for dataset in (train_dataset, val_dataset):
142
+ cache_cameras = [e["camera"].to(device) for e in dataset]
143
+ cache_camera_hashes = [e["camera_idx"] for e in dataset]
144
+ model.precache_rays(cache_cameras, cache_camera_hashes)
145
+
146
+ train_dataloader = torch.utils.data.DataLoader(
147
+ train_dataset,
148
+ batch_size=1,
149
+ shuffle=True,
150
+ num_workers=0,
151
+ collate_fn=trivial_collate,
152
+ )
153
+
154
+ # The validation dataloader is just an endless stream of random samples.
155
+ val_dataloader = torch.utils.data.DataLoader(
156
+ val_dataset,
157
+ batch_size=1,
158
+ num_workers=0,
159
+ collate_fn=trivial_collate,
160
+ sampler=torch.utils.data.RandomSampler(
161
+ val_dataset,
162
+ replacement=True,
163
+ num_samples=cfg.optimizer.max_epochs,
164
+ ),
165
+ )
166
+
167
+ # Set the model to the training mode.
168
+ model.train()
169
+
170
+ # Run the main training loop.
171
+ for epoch in range(start_epoch, cfg.optimizer.max_epochs):
172
+ stats.new_epoch() # Init a new epoch.
173
+ for iteration, batch in enumerate(train_dataloader):
174
+ image, camera, camera_idx = batch[0].values()
175
+ image = image.to(device)
176
+ camera = camera.to(device)
177
+
178
+ optimizer.zero_grad()
179
+
180
+ # Run the forward pass of the model.
181
+ nerf_out, metrics = model(
182
+ camera_idx if cfg.data.precache_rays else None,
183
+ camera,
184
+ image,
185
+ )
186
+
187
+ # The loss is a sum of coarse and fine MSEs
188
+ loss = metrics["mse_coarse"] + metrics["mse_fine"]
189
+
190
+ # Take the training step.
191
+ loss.backward()
192
+ optimizer.step()
193
+
194
+ # Update stats with the current metrics.
195
+ stats.update(
196
+ {"loss": float(loss), **metrics},
197
+ stat_set="train",
198
+ )
199
+
200
+ if iteration % cfg.stats_print_interval == 0:
201
+ stats.print(stat_set="train")
202
+
203
+ # Update the visualization cache.
204
+ if viz is not None:
205
+ visuals_cache.append(
206
+ {
207
+ "camera": camera.cpu(),
208
+ "camera_idx": camera_idx,
209
+ "image": image.cpu().detach(),
210
+ "rgb_fine": nerf_out["rgb_fine"].cpu().detach(),
211
+ "rgb_coarse": nerf_out["rgb_coarse"].cpu().detach(),
212
+ "rgb_gt": nerf_out["rgb_gt"].cpu().detach(),
213
+ "coarse_ray_bundle": nerf_out["coarse_ray_bundle"],
214
+ }
215
+ )
216
+
217
+ # Adjust the learning rate.
218
+ lr_scheduler.step()
219
+
220
+ # Validation
221
+ if epoch % cfg.validation_epoch_interval == 0 and epoch > 0:
222
+
223
+ # Sample a validation camera/image.
224
+ val_batch = next(val_dataloader.__iter__())
225
+ val_image, val_camera, camera_idx = val_batch[0].values()
226
+ val_image = val_image.to(device)
227
+ val_camera = val_camera.to(device)
228
+
229
+ # Activate eval mode of the model (lets us do a full rendering pass).
230
+ model.eval()
231
+ with torch.no_grad():
232
+ val_nerf_out, val_metrics = model(
233
+ camera_idx if cfg.data.precache_rays else None,
234
+ val_camera,
235
+ val_image,
236
+ )
237
+
238
+ # Update stats with the validation metrics.
239
+ stats.update(val_metrics, stat_set="val")
240
+ stats.print(stat_set="val")
241
+
242
+ if viz is not None:
243
+ # Plot that loss curves into visdom.
244
+ stats.plot_stats(
245
+ viz=viz,
246
+ visdom_env=cfg.visualization.visdom_env,
247
+ plot_file=None,
248
+ )
249
+ # Visualize the intermediate results.
250
+ visualize_nerf_outputs(
251
+ val_nerf_out, visuals_cache, viz, cfg.visualization.visdom_env
252
+ )
253
+
254
+ # Set the model back to train mode.
255
+ model.train()
256
+
257
+ # Checkpoint.
258
+ if (
259
+ epoch % cfg.checkpoint_epoch_interval == 0
260
+ and len(cfg.checkpoint_path) > 0
261
+ and epoch > 0
262
+ ):
263
+ print(f"Storing checkpoint {checkpoint_path}.")
264
+ data_to_store = {
265
+ "model": model.state_dict(),
266
+ "optimizer": optimizer.state_dict(),
267
+ "stats": pickle.dumps(stats),
268
+ }
269
+ torch.save(data_to_store, checkpoint_path)
270
+
271
+
272
+ if __name__ == "__main__":
273
+ main()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ __version__ = "0.7.8"
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from .r2n2 import BlenderCamera, collate_batched_R2N2, R2N2, render_cubified_voxels
10
+ from .shapenet import ShapeNetCore
11
+ from .utils import collate_batched_meshes
12
+
13
+
14
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from .r2n2 import R2N2
10
+ from .utils import BlenderCamera, collate_batched_R2N2, render_cubified_voxels
11
+
12
+
13
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/r2n2.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import json
10
+ import warnings
11
+ from os import path
12
+ from pathlib import Path
13
+ from typing import Dict, List, Optional
14
+
15
+ import numpy as np
16
+ import torch
17
+ from PIL import Image
18
+ from pytorch3d.common.datatypes import Device
19
+ from pytorch3d.datasets.shapenet_base import ShapeNetBase
20
+ from pytorch3d.renderer import HardPhongShader
21
+ from tabulate import tabulate
22
+
23
+ from .utils import (
24
+ align_bbox,
25
+ BlenderCamera,
26
+ compute_extrinsic_matrix,
27
+ read_binvox_coords,
28
+ voxelize,
29
+ )
30
+
31
+
32
+ SYNSET_DICT_DIR = Path(__file__).resolve().parent
33
+ MAX_CAMERA_DISTANCE = 1.75 # Constant from R2N2.
34
+ VOXEL_SIZE = 128
35
+ # Intrinsic matrix extracted from Blender. Taken from meshrcnn codebase:
36
+ # https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py
37
+ BLENDER_INTRINSIC = torch.tensor(
38
+ [
39
+ [2.1875, 0.0, 0.0, 0.0],
40
+ [0.0, 2.1875, 0.0, 0.0],
41
+ [0.0, 0.0, -1.002002, -0.2002002],
42
+ [0.0, 0.0, -1.0, 0.0],
43
+ ]
44
+ )
45
+
46
+
47
+ class R2N2(ShapeNetBase): # pragma: no cover
48
+ """
49
+ This class loads the R2N2 dataset from a given directory into a Dataset object.
50
+ The R2N2 dataset contains 13 categories that are a subset of the ShapeNetCore v.1
51
+ dataset. The R2N2 dataset also contains its own 24 renderings of each object and
52
+ voxelized models. Most of the models have all 24 views in the same split, but there
53
+ are eight of them that divide their views between train and test splits.
54
+
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ split: str,
60
+ shapenet_dir: str,
61
+ r2n2_dir: str,
62
+ splits_file: str,
63
+ return_all_views: bool = True,
64
+ return_voxels: bool = False,
65
+ views_rel_path: str = "ShapeNetRendering",
66
+ voxels_rel_path: str = "ShapeNetVoxels",
67
+ load_textures: bool = True,
68
+ texture_resolution: int = 4,
69
+ ) -> None:
70
+ """
71
+ Store each object's synset id and models id the given directories.
72
+
73
+ Args:
74
+ split (str): One of (train, val, test).
75
+ shapenet_dir (str): Path to ShapeNet core v1.
76
+ r2n2_dir (str): Path to the R2N2 dataset.
77
+ splits_file (str): File containing the train/val/test splits.
78
+ return_all_views (bool): Indicator of whether or not to load all the views in
79
+ the split. If set to False, one of the views in the split will be randomly
80
+ selected and loaded.
81
+ return_voxels(bool): Indicator of whether or not to return voxels as a tensor
82
+ of shape (D, D, D) where D is the number of voxels along each dimension.
83
+ views_rel_path: path to rendered views within the r2n2_dir. If not specified,
84
+ the renderings are assumed to be at os.path.join(rn2n_dir, "ShapeNetRendering").
85
+ voxels_rel_path: path to rendered views within the r2n2_dir. If not specified,
86
+ the renderings are assumed to be at os.path.join(rn2n_dir, "ShapeNetVoxels").
87
+ load_textures: Boolean indicating whether textures should loaded for the model.
88
+ Textures will be of type TexturesAtlas i.e. a texture map per face.
89
+ texture_resolution: Int specifying the resolution of the texture map per face
90
+ created using the textures in the obj file. A
91
+ (texture_resolution, texture_resolution, 3) map is created per face.
92
+
93
+ """
94
+ super().__init__()
95
+ self.shapenet_dir = shapenet_dir
96
+ self.r2n2_dir = r2n2_dir
97
+ self.views_rel_path = views_rel_path
98
+ self.voxels_rel_path = voxels_rel_path
99
+ self.load_textures = load_textures
100
+ self.texture_resolution = texture_resolution
101
+ # Examine if split is valid.
102
+ if split not in ["train", "val", "test"]:
103
+ raise ValueError("split has to be one of (train, val, test).")
104
+ # Synset dictionary mapping synset offsets in R2N2 to corresponding labels.
105
+ with open(
106
+ path.join(SYNSET_DICT_DIR, "r2n2_synset_dict.json"), "r"
107
+ ) as read_dict:
108
+ self.synset_dict = json.load(read_dict)
109
+ # Inverse dictionary mapping synset labels to corresponding offsets.
110
+ self.synset_inv = {label: offset for offset, label in self.synset_dict.items()}
111
+
112
+ # Store synset and model ids of objects mentioned in the splits_file.
113
+ with open(splits_file) as splits:
114
+ split_dict = json.load(splits)[split]
115
+
116
+ self.return_images = True
117
+ # Check if the folder containing R2N2 renderings is included in r2n2_dir.
118
+ if not path.isdir(path.join(r2n2_dir, views_rel_path)):
119
+ self.return_images = False
120
+ msg = (
121
+ "%s not found in %s. R2N2 renderings will "
122
+ "be skipped when returning models."
123
+ ) % (views_rel_path, r2n2_dir)
124
+ warnings.warn(msg)
125
+
126
+ self.return_voxels = return_voxels
127
+ # Check if the folder containing voxel coordinates is included in r2n2_dir.
128
+ if not path.isdir(path.join(r2n2_dir, voxels_rel_path)):
129
+ self.return_voxels = False
130
+ msg = (
131
+ "%s not found in %s. Voxel coordinates will "
132
+ "be skipped when returning models."
133
+ ) % (voxels_rel_path, r2n2_dir)
134
+ warnings.warn(msg)
135
+
136
+ synset_set = set()
137
+ # Store lists of views of each model in a list.
138
+ self.views_per_model_list = []
139
+ # Store tuples of synset label and total number of views in each category in a list.
140
+ synset_num_instances = []
141
+ for synset in split_dict.keys():
142
+ # Examine if the given synset is present in the ShapeNetCore dataset
143
+ # and is also part of the standard R2N2 dataset.
144
+ if not (
145
+ path.isdir(path.join(shapenet_dir, synset))
146
+ and synset in self.synset_dict
147
+ ):
148
+ msg = (
149
+ "Synset category %s from the splits file is either not "
150
+ "present in %s or not part of the standard R2N2 dataset."
151
+ ) % (synset, shapenet_dir)
152
+ warnings.warn(msg)
153
+ continue
154
+
155
+ synset_set.add(synset)
156
+ self.synset_start_idxs[synset] = len(self.synset_ids)
157
+ # Start counting total number of views in the current category.
158
+ synset_view_count = 0
159
+ for model in split_dict[synset]:
160
+ # Examine if the given model is present in the ShapeNetCore path.
161
+ shapenet_path = path.join(shapenet_dir, synset, model)
162
+ if not path.isdir(shapenet_path):
163
+ msg = "Model %s from category %s is not present in %s." % (
164
+ model,
165
+ synset,
166
+ shapenet_dir,
167
+ )
168
+ warnings.warn(msg)
169
+ continue
170
+ self.synset_ids.append(synset)
171
+ self.model_ids.append(model)
172
+
173
+ model_views = split_dict[synset][model]
174
+ # Randomly select a view index if return_all_views set to False.
175
+ if not return_all_views:
176
+ rand_idx = torch.randint(len(model_views), (1,))
177
+ model_views = [model_views[rand_idx]]
178
+ self.views_per_model_list.append(model_views)
179
+ synset_view_count += len(model_views)
180
+ synset_num_instances.append((self.synset_dict[synset], synset_view_count))
181
+ model_count = len(self.synset_ids) - self.synset_start_idxs[synset]
182
+ self.synset_num_models[synset] = model_count
183
+ headers = ["category", "#instances"]
184
+ synset_num_instances.append(("total", sum(n for _, n in synset_num_instances)))
185
+ print(
186
+ tabulate(synset_num_instances, headers, numalign="left", stralign="center")
187
+ )
188
+
189
+ # Examine if all the synsets in the standard R2N2 mapping are present.
190
+ # Update self.synset_inv so that it only includes the loaded categories.
191
+ synset_not_present = [
192
+ self.synset_inv.pop(self.synset_dict[synset])
193
+ for synset in self.synset_dict
194
+ if synset not in synset_set
195
+ ]
196
+ if len(synset_not_present) > 0:
197
+ msg = (
198
+ "The following categories are included in R2N2's"
199
+ "official mapping but not found in the dataset location %s: %s"
200
+ ) % (shapenet_dir, ", ".join(synset_not_present))
201
+ warnings.warn(msg)
202
+
203
+ def __getitem__(self, model_idx, view_idxs: Optional[List[int]] = None) -> Dict:
204
+ """
205
+ Read a model by the given index.
206
+
207
+ Args:
208
+ model_idx: The idx of the model to be retrieved in the dataset.
209
+ view_idx: List of indices of the view to be returned. Each index needs to be
210
+ contained in the loaded split (always between 0 and 23, inclusive). If
211
+ an invalid index is supplied, view_idx will be ignored and all the loaded
212
+ views will be returned.
213
+
214
+ Returns:
215
+ dictionary with following keys:
216
+ - verts: FloatTensor of shape (V, 3).
217
+ - faces: faces.verts_idx, LongTensor of shape (F, 3).
218
+ - synset_id (str): synset id.
219
+ - model_id (str): model id.
220
+ - label (str): synset label.
221
+ - images: FloatTensor of shape (V, H, W, C), where V is number of views
222
+ returned. Returns a batch of the renderings of the models from the R2N2 dataset.
223
+ - R: Rotation matrix of shape (V, 3, 3), where V is number of views returned.
224
+ - T: Translation matrix of shape (V, 3), where V is number of views returned.
225
+ - K: Intrinsic matrix of shape (V, 4, 4), where V is number of views returned.
226
+ - voxels: Voxels of shape (D, D, D), where D is the number of voxels along each
227
+ dimension.
228
+ """
229
+ if isinstance(model_idx, tuple):
230
+ model_idx, view_idxs = model_idx
231
+ if view_idxs is not None:
232
+ if isinstance(view_idxs, int):
233
+ view_idxs = [view_idxs]
234
+ if not isinstance(view_idxs, list) and not torch.is_tensor(view_idxs):
235
+ raise TypeError(
236
+ "view_idxs is of type %s but it needs to be a list."
237
+ % type(view_idxs)
238
+ )
239
+
240
+ model_views = self.views_per_model_list[model_idx]
241
+ if view_idxs is not None and any(
242
+ idx not in self.views_per_model_list[model_idx] for idx in view_idxs
243
+ ):
244
+ msg = """At least one of the indices in view_idxs is not available.
245
+ Specified view of the model needs to be contained in the
246
+ loaded split. If return_all_views is set to False, only one
247
+ random view is loaded. Try accessing the specified view(s)
248
+ after loading the dataset with self.return_all_views set to True.
249
+ Now returning all view(s) in the loaded dataset."""
250
+ warnings.warn(msg)
251
+ elif view_idxs is not None:
252
+ model_views = view_idxs
253
+
254
+ model = self._get_item_ids(model_idx)
255
+ model_path = path.join(
256
+ self.shapenet_dir, model["synset_id"], model["model_id"], "model.obj"
257
+ )
258
+
259
+ verts, faces, textures = self._load_mesh(model_path)
260
+ model["verts"] = verts
261
+ model["faces"] = faces
262
+ model["textures"] = textures
263
+ model["label"] = self.synset_dict[model["synset_id"]]
264
+
265
+ model["images"] = None
266
+ images, Rs, Ts, voxel_RTs = [], [], [], []
267
+ # Retrieve R2N2's renderings if required.
268
+ if self.return_images:
269
+ rendering_path = path.join(
270
+ self.r2n2_dir,
271
+ self.views_rel_path,
272
+ model["synset_id"],
273
+ model["model_id"],
274
+ "rendering",
275
+ )
276
+ # Read metadata file to obtain params for calibration matrices.
277
+ with open(path.join(rendering_path, "rendering_metadata.txt"), "r") as f:
278
+ metadata_lines = f.readlines()
279
+ for i in model_views:
280
+ # Read image.
281
+ image_path = path.join(rendering_path, "%02d.png" % i)
282
+ raw_img = Image.open(image_path)
283
+ image = torch.from_numpy(np.array(raw_img) / 255.0)[..., :3]
284
+ images.append(image.to(dtype=torch.float32))
285
+
286
+ # Get camera calibration.
287
+ azim, elev, yaw, dist_ratio, fov = [
288
+ float(v) for v in metadata_lines[i].strip().split(" ")
289
+ ]
290
+ dist = dist_ratio * MAX_CAMERA_DISTANCE
291
+ # Extrinsic matrix before transformation to PyTorch3D world space.
292
+ RT = compute_extrinsic_matrix(azim, elev, dist)
293
+ R, T = self._compute_camera_calibration(RT)
294
+ Rs.append(R)
295
+ Ts.append(T)
296
+ voxel_RTs.append(RT)
297
+
298
+ # Intrinsic matrix extracted from the Blender with slight modification to work with
299
+ # PyTorch3D world space. Taken from meshrcnn codebase:
300
+ # https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py
301
+ K = torch.tensor(
302
+ [
303
+ [2.1875, 0.0, 0.0, 0.0],
304
+ [0.0, 2.1875, 0.0, 0.0],
305
+ [0.0, 0.0, -1.002002, -0.2002002],
306
+ [0.0, 0.0, 1.0, 0.0],
307
+ ]
308
+ )
309
+ model["images"] = torch.stack(images)
310
+ model["R"] = torch.stack(Rs)
311
+ model["T"] = torch.stack(Ts)
312
+ model["K"] = K.expand(len(model_views), 4, 4)
313
+
314
+ voxels_list = []
315
+
316
+ # Read voxels if required.
317
+ voxel_path = path.join(
318
+ self.r2n2_dir,
319
+ self.voxels_rel_path,
320
+ model["synset_id"],
321
+ model["model_id"],
322
+ "model.binvox",
323
+ )
324
+ if self.return_voxels:
325
+ if not path.isfile(voxel_path):
326
+ msg = "Voxel file not found for model %s from category %s."
327
+ raise FileNotFoundError(msg % (model["model_id"], model["synset_id"]))
328
+
329
+ with open(voxel_path, "rb") as f:
330
+ # Read voxel coordinates as a tensor of shape (N, 3).
331
+ voxel_coords = read_binvox_coords(f)
332
+ # Align voxels to the same coordinate system as mesh verts.
333
+ voxel_coords = align_bbox(voxel_coords, model["verts"])
334
+ for RT in voxel_RTs:
335
+ # Compute projection matrix.
336
+ P = BLENDER_INTRINSIC.mm(RT)
337
+ # Convert voxel coordinates of shape (N, 3) to voxels of shape (D, D, D).
338
+ voxels = voxelize(voxel_coords, P, VOXEL_SIZE)
339
+ voxels_list.append(voxels)
340
+ model["voxels"] = torch.stack(voxels_list)
341
+
342
+ return model
343
+
344
+ def _compute_camera_calibration(self, RT):
345
+ """
346
+ Helper function for calculating rotation and translation matrices from ShapeNet
347
+ to camera transformation and ShapeNet to PyTorch3D transformation.
348
+
349
+ Args:
350
+ RT: Extrinsic matrix that performs ShapeNet world view to camera view
351
+ transformation.
352
+
353
+ Returns:
354
+ R: Rotation matrix of shape (3, 3).
355
+ T: Translation matrix of shape (3).
356
+ """
357
+ # Transform the mesh vertices from shapenet world to pytorch3d world.
358
+ shapenet_to_pytorch3d = torch.tensor(
359
+ [
360
+ [-1.0, 0.0, 0.0, 0.0],
361
+ [0.0, 1.0, 0.0, 0.0],
362
+ [0.0, 0.0, -1.0, 0.0],
363
+ [0.0, 0.0, 0.0, 1.0],
364
+ ],
365
+ dtype=torch.float32,
366
+ )
367
+ RT = torch.transpose(RT, 0, 1).mm(shapenet_to_pytorch3d) # (4, 4)
368
+ # Extract rotation and translation matrices from RT.
369
+ R = RT[:3, :3]
370
+ T = RT[3, :3]
371
+ return R, T
372
+
373
+ def render(
374
+ self,
375
+ model_ids: Optional[List[str]] = None,
376
+ categories: Optional[List[str]] = None,
377
+ sample_nums: Optional[List[int]] = None,
378
+ idxs: Optional[List[int]] = None,
379
+ view_idxs: Optional[List[int]] = None,
380
+ shader_type=HardPhongShader,
381
+ device: Device = "cpu",
382
+ **kwargs,
383
+ ) -> torch.Tensor:
384
+ """
385
+ Render models with BlenderCamera by default to achieve the same orientations as the
386
+ R2N2 renderings. Also accepts other types of cameras and any of the args that the
387
+ render function in the ShapeNetBase class accepts.
388
+
389
+ Args:
390
+ view_idxs: each model will be rendered with the orientation(s) of the specified
391
+ views. Only render by view_idxs if no camera or args for BlenderCamera is
392
+ supplied.
393
+ Accepts any of the args of the render function in ShapeNetBase:
394
+ model_ids: List[str] of model_ids of models intended to be rendered.
395
+ categories: List[str] of categories intended to be rendered. categories
396
+ and sample_nums must be specified at the same time. categories can be given
397
+ in the form of synset offsets or labels, or a combination of both.
398
+ sample_nums: List[int] of number of models to be randomly sampled from
399
+ each category. Could also contain one single integer, in which case it
400
+ will be broadcasted for every category.
401
+ idxs: List[int] of indices of models to be rendered in the dataset.
402
+ shader_type: Shader to use for rendering. Examples include HardPhongShader
403
+ (default), SoftPhongShader etc or any other type of valid Shader class.
404
+ device: Device (as str or torch.device) on which the tensors should be located.
405
+ **kwargs: Accepts any of the kwargs that the renderer supports and any of the
406
+ args that BlenderCamera supports.
407
+
408
+ Returns:
409
+ Batch of rendered images of shape (N, H, W, 3).
410
+ """
411
+ idxs = self._handle_render_inputs(model_ids, categories, sample_nums, idxs)
412
+ r = torch.cat([self[idxs[i], view_idxs]["R"] for i in range(len(idxs))])
413
+ t = torch.cat([self[idxs[i], view_idxs]["T"] for i in range(len(idxs))])
414
+ k = torch.cat([self[idxs[i], view_idxs]["K"] for i in range(len(idxs))])
415
+ # Initialize default camera using R, T, K from kwargs or R, T, K of the specified views.
416
+ blend_cameras = BlenderCamera(
417
+ R=kwargs.get("R", r),
418
+ T=kwargs.get("T", t),
419
+ K=kwargs.get("K", k),
420
+ device=device,
421
+ )
422
+ cameras = kwargs.get("cameras", blend_cameras).to(device)
423
+ kwargs.pop("cameras", None)
424
+ # pass down all the same inputs
425
+ return super().render(
426
+ idxs=idxs, shader_type=shader_type, device=device, cameras=cameras, **kwargs
427
+ )
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/r2n2_synset_dict.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "04256520": "sofa",
3
+ "02933112": "cabinet",
4
+ "02828884": "bench",
5
+ "03001627": "chair",
6
+ "03211117": "display",
7
+ "04090263": "rifle",
8
+ "03691459": "loudspeaker",
9
+ "03636649": "lamp",
10
+ "04401088": "telephone",
11
+ "02691156": "airplane",
12
+ "04379243": "table",
13
+ "02958343": "car",
14
+ "04530566": "watercraft"
15
+ }
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/r2n2/utils.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import math
10
+ from typing import Dict, List
11
+
12
+ import numpy as np
13
+ import torch
14
+ from pytorch3d.common.datatypes import Device
15
+ from pytorch3d.datasets.utils import collate_batched_meshes
16
+ from pytorch3d.ops import cubify
17
+ from pytorch3d.renderer import (
18
+ HardPhongShader,
19
+ MeshRasterizer,
20
+ MeshRenderer,
21
+ PointLights,
22
+ RasterizationSettings,
23
+ TexturesVertex,
24
+ )
25
+ from pytorch3d.renderer.cameras import CamerasBase
26
+ from pytorch3d.transforms import Transform3d
27
+
28
+
29
+ # Empirical min and max over the dataset from meshrcnn.
30
+ # https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L9
31
+ SHAPENET_MIN_ZMIN = 0.67
32
+ SHAPENET_MAX_ZMAX = 0.92
33
+ # Threshold for cubify from meshrcnn:
34
+ # https://github.com/facebookresearch/meshrcnn/blob/main/configs/shapenet/voxmesh_R50.yaml#L11
35
+ CUBIFY_THRESH = 0.2
36
+
37
+ # Default values of rotation, translation and intrinsic matrices for BlenderCamera.
38
+ r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3)
39
+ t = np.expand_dims(np.zeros(3), axis=0) # (1, 3)
40
+ k = np.expand_dims(np.eye(4), axis=0) # (1, 4, 4)
41
+
42
+
43
+ def collate_batched_R2N2(batch: List[Dict]): # pragma: no cover
44
+ """
45
+ Take a list of objects in the form of dictionaries and merge them
46
+ into a single dictionary. This function can be used with a Dataset
47
+ object to create a torch.utils.data.Dataloader which directly
48
+ returns Meshes objects.
49
+ TODO: Add support for textures.
50
+
51
+ Args:
52
+ batch: List of dictionaries containing information about objects
53
+ in the dataset.
54
+
55
+ Returns:
56
+ collated_dict: Dictionary of collated lists. If batch contains both
57
+ verts and faces, a collated mesh batch is also returned.
58
+ """
59
+ collated_dict = collate_batched_meshes(batch)
60
+
61
+ # If collate_batched_meshes receives R2N2 items with images and that
62
+ # all models have the same number of views V, stack the batches of
63
+ # views of each model into a new batch of shape (N, V, H, W, 3).
64
+ # Otherwise leave it as a list.
65
+ if "images" in collated_dict:
66
+ try:
67
+ collated_dict["images"] = torch.stack(collated_dict["images"])
68
+ except RuntimeError:
69
+ print(
70
+ "Models don't have the same number of views. Now returning "
71
+ "lists of images instead of batches."
72
+ )
73
+
74
+ # If collate_batched_meshes receives R2N2 items with camera calibration
75
+ # matrices and that all models have the same number of views V, stack each
76
+ # type of matrices into a new batch of shape (N, V, ...).
77
+ # Otherwise leave them as lists.
78
+ if all(x in collated_dict for x in ["R", "T", "K"]):
79
+ try:
80
+ collated_dict["R"] = torch.stack(collated_dict["R"]) # (N, V, 3, 3)
81
+ collated_dict["T"] = torch.stack(collated_dict["T"]) # (N, V, 3)
82
+ collated_dict["K"] = torch.stack(collated_dict["K"]) # (N, V, 4, 4)
83
+ except RuntimeError:
84
+ print(
85
+ "Models don't have the same number of views. Now returning "
86
+ "lists of calibration matrices instead of a batched tensor."
87
+ )
88
+
89
+ # If collate_batched_meshes receives voxels and all models have the same
90
+ # number of views V, stack the batches of voxels into a new batch of shape
91
+ # (N, V, S, S, S), where S is the voxel size.
92
+ if "voxels" in collated_dict:
93
+ try:
94
+ collated_dict["voxels"] = torch.stack(collated_dict["voxels"])
95
+ except RuntimeError:
96
+ print(
97
+ "Models don't have the same number of views. Now returning "
98
+ "lists of voxels instead of a batched tensor."
99
+ )
100
+ return collated_dict
101
+
102
+
103
+ def compute_extrinsic_matrix(
104
+ azimuth: float, elevation: float, distance: float
105
+ ): # pragma: no cover
106
+ """
107
+ Copied from meshrcnn codebase:
108
+ https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L96
109
+
110
+ Compute 4x4 extrinsic matrix that converts from homogeneous world coordinates
111
+ to homogeneous camera coordinates. We assume that the camera is looking at the
112
+ origin.
113
+ Used in R2N2 Dataset when computing calibration matrices.
114
+
115
+ Args:
116
+ azimuth: Rotation about the z-axis, in degrees.
117
+ elevation: Rotation above the xy-plane, in degrees.
118
+ distance: Distance from the origin.
119
+
120
+ Returns:
121
+ FloatTensor of shape (4, 4).
122
+ """
123
+ azimuth, elevation, distance = float(azimuth), float(elevation), float(distance)
124
+
125
+ az_rad = -math.pi * azimuth / 180.0
126
+ el_rad = -math.pi * elevation / 180.0
127
+ sa = math.sin(az_rad)
128
+ ca = math.cos(az_rad)
129
+ se = math.sin(el_rad)
130
+ ce = math.cos(el_rad)
131
+ R_world2obj = torch.tensor(
132
+ [[ca * ce, sa * ce, -se], [-sa, ca, 0], [ca * se, sa * se, ce]]
133
+ )
134
+ R_obj2cam = torch.tensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
135
+ R_world2cam = R_obj2cam.mm(R_world2obj)
136
+ cam_location = torch.tensor([[distance, 0, 0]]).t()
137
+ T_world2cam = -(R_obj2cam.mm(cam_location))
138
+ RT = torch.cat([R_world2cam, T_world2cam], dim=1)
139
+ RT = torch.cat([RT, torch.tensor([[0.0, 0, 0, 1]])])
140
+
141
+ # Georgia: For some reason I cannot fathom, when Blender loads a .obj file it
142
+ # rotates the model 90 degrees about the x axis. To compensate for this quirk we
143
+ # roll that rotation into the extrinsic matrix here
144
+ rot = torch.tensor([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
145
+ RT = RT.mm(rot.to(RT))
146
+
147
+ return RT
148
+
149
+
150
+ def read_binvox_coords(
151
+ f,
152
+ integer_division: bool = True,
153
+ dtype: torch.dtype = torch.float32,
154
+ ): # pragma: no cover
155
+ """
156
+ Copied from meshrcnn codebase:
157
+ https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L5
158
+
159
+ Read a binvox file and return the indices of all nonzero voxels.
160
+
161
+ This matches the behavior of binvox_rw.read_as_coord_array
162
+ (https://github.com/dimatura/binvox-rw-py/blob/public/binvox_rw.py#L153)
163
+ but this implementation uses torch rather than numpy, and is more efficient
164
+ due to improved vectorization.
165
+
166
+ Georgia: I think that binvox_rw.read_as_coord_array actually has a bug; when converting
167
+ linear indices into three-dimensional indices, they use floating-point
168
+ division instead of integer division. We can reproduce their incorrect
169
+ implementation by passing integer_division=False.
170
+
171
+ Args:
172
+ f (str): A file pointer to the binvox file to read
173
+ integer_division (bool): If False, then match the buggy implementation from binvox_rw
174
+ dtype: Datatype of the output tensor. Use float64 to match binvox_rw
175
+
176
+ Returns:
177
+ coords (tensor): A tensor of shape (N, 3) where N is the number of nonzero voxels,
178
+ and coords[i] = (x, y, z) gives the index of the ith nonzero voxel. If the
179
+ voxel grid has shape (V, V, V) then we have 0 <= x, y, z < V.
180
+ """
181
+ size, translation, scale = _read_binvox_header(f)
182
+ storage = torch.ByteStorage.from_buffer(f.read())
183
+ data = torch.tensor([], dtype=torch.uint8)
184
+ # pyre-fixme[28]: Unexpected keyword argument `source`.
185
+ data.set_(source=storage)
186
+ vals, counts = data[::2], data[1::2]
187
+ idxs = _compute_idxs(vals, counts)
188
+ if not integer_division:
189
+ idxs = idxs.to(dtype)
190
+ x_idxs = idxs // (size * size)
191
+ zy_idxs = idxs % (size * size)
192
+ z_idxs = zy_idxs // size
193
+ y_idxs = zy_idxs % size
194
+ coords = torch.stack([x_idxs, y_idxs, z_idxs], dim=1)
195
+ return coords.to(dtype)
196
+
197
+
198
+ def _compute_idxs(vals, counts): # pragma: no cover
199
+ """
200
+ Copied from meshrcnn codebase:
201
+ https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L58
202
+
203
+ Fast vectorized version of index computation.
204
+
205
+ Args:
206
+ vals: tensor of binary values indicating voxel presence in a dense format.
207
+ counts: tensor of number of occurrence of each value in vals.
208
+
209
+ Returns:
210
+ idxs: A tensor of shape (N), where N is the number of nonzero voxels.
211
+ """
212
+ # Consider an example where:
213
+ # vals = [0, 1, 0, 1, 1]
214
+ # counts = [2, 3, 3, 2, 1]
215
+ #
216
+ # These values of counts and vals mean that the dense binary grid is:
217
+ # [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]
218
+ #
219
+ # So the nonzero indices we want to return are:
220
+ # [2, 3, 4, 8, 9, 10]
221
+
222
+ # After the cumsum we will have:
223
+ # end_idxs = [2, 5, 8, 10, 11]
224
+ end_idxs = counts.cumsum(dim=0)
225
+
226
+ # After masking and computing start_idx we have:
227
+ # end_idxs = [5, 10, 11]
228
+ # counts = [3, 2, 1]
229
+ # start_idxs = [2, 8, 10]
230
+ mask = vals == 1
231
+ end_idxs = end_idxs[mask]
232
+ counts = counts[mask].to(end_idxs)
233
+ start_idxs = end_idxs - counts
234
+
235
+ # We initialize delta as:
236
+ # [2, 1, 1, 1, 1, 1]
237
+ delta = torch.ones(counts.sum().item(), dtype=torch.int64)
238
+ delta[0] = start_idxs[0]
239
+
240
+ # We compute pos = [3, 5], val = [3, 0]; then delta is
241
+ # [2, 1, 1, 4, 1, 1]
242
+ pos = counts.cumsum(dim=0)[:-1]
243
+ val = start_idxs[1:] - end_idxs[:-1]
244
+ delta[pos] += val
245
+
246
+ # A final cumsum gives the idx we want: [2, 3, 4, 8, 9, 10]
247
+ idxs = delta.cumsum(dim=0)
248
+ return idxs
249
+
250
+
251
+ def _read_binvox_header(f): # pragma: no cover
252
+ """
253
+ Copied from meshrcnn codebase:
254
+ https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L99
255
+
256
+ Read binvox header and extract information regarding voxel sizes and translations
257
+ to original voxel coordinates.
258
+
259
+ Args:
260
+ f (str): A file pointer to the binvox file to read.
261
+
262
+ Returns:
263
+ size (int): size of voxel.
264
+ translation (tuple(float)): translation to original voxel coordinates.
265
+ scale (float): scale to original voxel coordinates.
266
+ """
267
+ # First line of the header should be "#binvox 1"
268
+ line = f.readline().strip()
269
+ if line != b"#binvox 1":
270
+ raise ValueError("Invalid header (line 1)")
271
+
272
+ # Second line of the header should be "dim [int] [int] [int]"
273
+ # and all three int should be the same
274
+ line = f.readline().strip()
275
+ if not line.startswith(b"dim "):
276
+ raise ValueError("Invalid header (line 2)")
277
+ dims = line.split(b" ")
278
+ try:
279
+ dims = [int(d) for d in dims[1:]]
280
+ except ValueError:
281
+ raise ValueError("Invalid header (line 2)") from None
282
+ if len(dims) != 3 or dims[0] != dims[1] or dims[0] != dims[2]:
283
+ raise ValueError("Invalid header (line 2)")
284
+ size = dims[0]
285
+
286
+ # Third line of the header should be "translate [float] [float] [float]"
287
+ line = f.readline().strip()
288
+ if not line.startswith(b"translate "):
289
+ raise ValueError("Invalid header (line 3)")
290
+ translation = line.split(b" ")
291
+ if len(translation) != 4:
292
+ raise ValueError("Invalid header (line 3)")
293
+ try:
294
+ translation = tuple(float(t) for t in translation[1:])
295
+ except ValueError:
296
+ raise ValueError("Invalid header (line 3)") from None
297
+
298
+ # Fourth line of the header should be "scale [float]"
299
+ line = f.readline().strip()
300
+ if not line.startswith(b"scale "):
301
+ raise ValueError("Invalid header (line 4)")
302
+ line = line.split(b" ")
303
+ if not len(line) == 2:
304
+ raise ValueError("Invalid header (line 4)")
305
+ scale = float(line[1])
306
+
307
+ # Fifth line of the header should be "data"
308
+ line = f.readline().strip()
309
+ if not line == b"data":
310
+ raise ValueError("Invalid header (line 5)")
311
+
312
+ return size, translation, scale
313
+
314
+
315
+ def align_bbox(src, tgt): # pragma: no cover
316
+ """
317
+ Copied from meshrcnn codebase:
318
+ https://github.com/facebookresearch/meshrcnn/blob/main/tools/preprocess_shapenet.py#L263
319
+
320
+ Return a copy of src points in the coordinate system of tgt by applying a
321
+ scale and shift along each coordinate axis to make the min / max values align.
322
+
323
+ Args:
324
+ src, tgt: Torch Tensor of shape (N, 3)
325
+
326
+ Returns:
327
+ out: Torch Tensor of shape (N, 3)
328
+ """
329
+ if src.ndim != 2 or tgt.ndim != 2:
330
+ raise ValueError("Both src and tgt need to have dimensions of 2.")
331
+ if src.shape[-1] != 3 or tgt.shape[-1] != 3:
332
+ raise ValueError(
333
+ "Both src and tgt need to have sizes of 3 along the second dimension."
334
+ )
335
+ src_min = src.min(dim=0)[0]
336
+ src_max = src.max(dim=0)[0]
337
+ tgt_min = tgt.min(dim=0)[0]
338
+ tgt_max = tgt.max(dim=0)[0]
339
+ scale = (tgt_max - tgt_min) / (src_max - src_min)
340
+ shift = tgt_min - scale * src_min
341
+ out = scale * src + shift
342
+ return out
343
+
344
+
345
+ def voxelize(voxel_coords, P, V): # pragma: no cover
346
+ """
347
+ Copied from meshrcnn codebase:
348
+ https://github.com/facebookresearch/meshrcnn/blob/main/tools/preprocess_shapenet.py#L284
349
+ but changing flip y to flip x.
350
+
351
+ Creating voxels of shape (D, D, D) from voxel_coords and projection matrix.
352
+
353
+ Args:
354
+ voxel_coords: FloatTensor of shape (V, 3) giving voxel's coordinates aligned to
355
+ the vertices.
356
+ P: FloatTensor of shape (4, 4) giving the projection matrix.
357
+ V: Voxel size of the output.
358
+
359
+ Returns:
360
+ voxels: Tensor of shape (D, D, D) giving the voxelized result.
361
+ """
362
+ device = voxel_coords.device
363
+ voxel_coords = project_verts(voxel_coords, P)
364
+
365
+ # Using the actual zmin and zmax of the model is bad because we need them
366
+ # to perform the inverse transform, which transform voxels back into world
367
+ # space for refinement or evaluation. Instead we use an empirical min and
368
+ # max over the dataset; that way it is consistent for all images.
369
+ zmin = SHAPENET_MIN_ZMIN
370
+ zmax = SHAPENET_MAX_ZMAX
371
+
372
+ # Once we know zmin and zmax, we need to adjust the z coordinates so the
373
+ # range [zmin, zmax] instead runs from [-1, 1]
374
+ m = 2.0 / (zmax - zmin)
375
+ b = -2.0 * zmin / (zmax - zmin) - 1
376
+ voxel_coords[:, 2].mul_(m).add_(b)
377
+ voxel_coords[:, 0].mul_(-1) # Flip x
378
+
379
+ # Now voxels are in [-1, 1]^3; map to [0, V-1)^3
380
+ voxel_coords = 0.5 * (V - 1) * (voxel_coords + 1.0)
381
+ voxel_coords = voxel_coords.round().to(torch.int64)
382
+ valid = (0 <= voxel_coords) * (voxel_coords < V)
383
+ valid = valid[:, 0] * valid[:, 1] * valid[:, 2]
384
+ x, y, z = voxel_coords.unbind(dim=1)
385
+ x, y, z = x[valid], y[valid], z[valid]
386
+ voxels = torch.zeros(V, V, V, dtype=torch.uint8, device=device)
387
+ voxels[z, y, x] = 1
388
+
389
+ return voxels
390
+
391
+
392
+ def project_verts(verts, P, eps: float = 1e-1): # pragma: no cover
393
+ """
394
+ Copied from meshrcnn codebase:
395
+ https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L159
396
+
397
+ Project vertices using a 4x4 transformation matrix.
398
+
399
+ Args:
400
+ verts: FloatTensor of shape (N, V, 3) giving a batch of vertex positions or of
401
+ shape (V, 3) giving a single set of vertex positions.
402
+ P: FloatTensor of shape (N, 4, 4) giving projection matrices or of shape (4, 4)
403
+ giving a single projection matrix.
404
+
405
+ Returns:
406
+ verts_out: FloatTensor of shape (N, V, 3) giving vertex positions (x, y, z)
407
+ where verts_out[i] is the result of transforming verts[i] by P[i].
408
+ """
409
+ # Handle unbatched inputs
410
+ singleton = False
411
+ if verts.dim() == 2:
412
+ assert P.dim() == 2
413
+ singleton = True
414
+ verts, P = verts[None], P[None]
415
+
416
+ N, V = verts.shape[0], verts.shape[1]
417
+ dtype, device = verts.dtype, verts.device
418
+
419
+ # Add an extra row of ones to the world-space coordinates of verts before
420
+ # multiplying by the projection matrix. We could avoid this allocation by
421
+ # instead multiplying by a 4x3 submatrix of the projection matrix, then
422
+ # adding the remaining 4x1 vector. Not sure whether there will be much
423
+ # performance difference between the two.
424
+ ones = torch.ones(N, V, 1, dtype=dtype, device=device)
425
+ verts_hom = torch.cat([verts, ones], dim=2)
426
+ verts_cam_hom = torch.bmm(verts_hom, P.transpose(1, 2))
427
+
428
+ # Avoid division by zero by clamping the absolute value
429
+ w = verts_cam_hom[:, :, 3:]
430
+ w_sign = w.sign()
431
+ w_sign[w == 0] = 1
432
+ w = w_sign * w.abs().clamp(min=eps)
433
+
434
+ verts_proj = verts_cam_hom[:, :, :3] / w
435
+
436
+ if singleton:
437
+ return verts_proj[0]
438
+ return verts_proj
439
+
440
+
441
+ class BlenderCamera(CamerasBase): # pragma: no cover
442
+ """
443
+ Camera for rendering objects with calibration matrices from the R2N2 dataset
444
+ (which uses Blender for rendering the views for each model).
445
+ """
446
+
447
+ def __init__(self, R=r, T=t, K=k, device: Device = "cpu") -> None:
448
+ """
449
+ Args:
450
+ R: Rotation matrix of shape (N, 3, 3).
451
+ T: Translation matrix of shape (N, 3).
452
+ K: Intrinsic matrix of shape (N, 4, 4).
453
+ device: Device (as str or torch.device).
454
+ """
455
+ # The initializer formats all inputs to torch tensors and broadcasts
456
+ # all the inputs to have the same batch dimension where necessary.
457
+ super().__init__(device=device, R=R, T=T, K=K)
458
+
459
+ def get_projection_transform(self, **kwargs) -> Transform3d:
460
+ transform = Transform3d(device=self.device)
461
+ transform._matrix = self.K.transpose(1, 2).contiguous()
462
+ return transform
463
+
464
+ def is_perspective(self):
465
+ return False
466
+
467
+ def in_ndc(self):
468
+ return True
469
+
470
+
471
+ def render_cubified_voxels(
472
+ voxels: torch.Tensor, shader_type=HardPhongShader, device: Device = "cpu", **kwargs
473
+ ): # pragma: no cover
474
+ """
475
+ Use the Cubify operator to convert inputs voxels to a mesh and then render that mesh.
476
+
477
+ Args:
478
+ voxels: FloatTensor of shape (N, D, D, D) where N is the batch size and
479
+ D is the number of voxels along each dimension.
480
+ shader_type: shader_type: shader_type: Shader to use for rendering. Examples
481
+ include HardPhongShader (default), SoftPhongShader etc or any other type
482
+ of valid Shader class.
483
+ device: Device (as str or torch.device) on which the tensors should be located.
484
+ **kwargs: Accepts any of the kwargs that the renderer supports.
485
+ Returns:
486
+ Batch of rendered images of shape (N, H, W, 3).
487
+ """
488
+ cubified_voxels = cubify(voxels, CUBIFY_THRESH).to(device)
489
+ cubified_voxels.textures = TexturesVertex(
490
+ verts_features=torch.ones_like(cubified_voxels.verts_padded(), device=device)
491
+ )
492
+ cameras = BlenderCamera(device=device)
493
+ renderer = MeshRenderer(
494
+ rasterizer=MeshRasterizer(
495
+ cameras=cameras,
496
+ raster_settings=kwargs.get("raster_settings", RasterizationSettings()),
497
+ ),
498
+ shader=shader_type(
499
+ device=device,
500
+ cameras=cameras,
501
+ lights=kwargs.get("lights", PointLights()).to(device),
502
+ ),
503
+ )
504
+ return renderer(cubified_voxels)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/shapenet_base.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import warnings
10
+ from typing import Dict, List, Optional, Tuple
11
+
12
+ import torch
13
+ from pytorch3d.common.datatypes import Device
14
+ from pytorch3d.io import load_obj
15
+ from pytorch3d.renderer import (
16
+ FoVPerspectiveCameras,
17
+ HardPhongShader,
18
+ MeshRasterizer,
19
+ MeshRenderer,
20
+ PointLights,
21
+ RasterizationSettings,
22
+ TexturesVertex,
23
+ )
24
+
25
+ from .utils import collate_batched_meshes
26
+
27
+
28
+ class ShapeNetBase(torch.utils.data.Dataset): # pragma: no cover
29
+ """
30
+ 'ShapeNetBase' implements a base Dataset for ShapeNet and R2N2 with helper methods.
31
+ It is not intended to be used on its own as a Dataset for a Dataloader. Both __init__
32
+ and __getitem__ need to be implemented.
33
+ """
34
+
35
+ def __init__(self) -> None:
36
+ """
37
+ Set up lists of synset_ids and model_ids.
38
+ """
39
+ self.synset_ids = []
40
+ self.model_ids = []
41
+ self.synset_inv = {}
42
+ self.synset_start_idxs = {}
43
+ self.synset_num_models = {}
44
+ self.shapenet_dir = ""
45
+ self.model_dir = "model.obj"
46
+ self.load_textures = True
47
+ self.texture_resolution = 4
48
+
49
+ def __len__(self) -> int:
50
+ """
51
+ Return number of total models in the loaded dataset.
52
+ """
53
+ return len(self.model_ids)
54
+
55
+ def __getitem__(self, idx) -> Dict:
56
+ """
57
+ Read a model by the given index. Need to be implemented for every child class
58
+ of ShapeNetBase.
59
+
60
+ Args:
61
+ idx: The idx of the model to be retrieved in the dataset.
62
+
63
+ Returns:
64
+ dictionary containing information about the model.
65
+ """
66
+ raise NotImplementedError(
67
+ "__getitem__ should be implemented in the child class of ShapeNetBase"
68
+ )
69
+
70
+ def _get_item_ids(self, idx) -> Dict:
71
+ """
72
+ Read a model by the given index.
73
+
74
+ Args:
75
+ idx: The idx of the model to be retrieved in the dataset.
76
+
77
+ Returns:
78
+ dictionary with following keys:
79
+ - synset_id (str): synset id
80
+ - model_id (str): model id
81
+ """
82
+ model = {}
83
+ model["synset_id"] = self.synset_ids[idx]
84
+ model["model_id"] = self.model_ids[idx]
85
+ return model
86
+
87
+ def _load_mesh(self, model_path) -> Tuple:
88
+ verts, faces, aux = load_obj(
89
+ model_path,
90
+ create_texture_atlas=self.load_textures,
91
+ load_textures=self.load_textures,
92
+ texture_atlas_size=self.texture_resolution,
93
+ )
94
+ if self.load_textures:
95
+ textures = aux.texture_atlas
96
+ # Some meshes don't have textures. In this case
97
+ # create a white texture map
98
+ if textures is None:
99
+ textures = verts.new_ones(
100
+ faces.verts_idx.shape[0],
101
+ self.texture_resolution,
102
+ self.texture_resolution,
103
+ 3,
104
+ )
105
+ else:
106
+ textures = None
107
+
108
+ return verts, faces.verts_idx, textures
109
+
110
+ def render(
111
+ self,
112
+ model_ids: Optional[List[str]] = None,
113
+ categories: Optional[List[str]] = None,
114
+ sample_nums: Optional[List[int]] = None,
115
+ idxs: Optional[List[int]] = None,
116
+ shader_type=HardPhongShader,
117
+ device: Device = "cpu",
118
+ **kwargs,
119
+ ) -> torch.Tensor:
120
+ """
121
+ If a list of model_ids are supplied, render all the objects by the given model_ids.
122
+ If no model_ids are supplied, but categories and sample_nums are specified, randomly
123
+ select a number of objects (number specified in sample_nums) in the given categories
124
+ and render these objects. If instead a list of idxs is specified, check if the idxs
125
+ are all valid and render models by the given idxs. Otherwise, randomly select a number
126
+ (first number in sample_nums, default is set to be 1) of models from the loaded dataset
127
+ and render these models.
128
+
129
+ Args:
130
+ model_ids: List[str] of model_ids of models intended to be rendered.
131
+ categories: List[str] of categories intended to be rendered. categories
132
+ and sample_nums must be specified at the same time. categories can be given
133
+ in the form of synset offsets or labels, or a combination of both.
134
+ sample_nums: List[int] of number of models to be randomly sampled from
135
+ each category. Could also contain one single integer, in which case it
136
+ will be broadcasted for every category.
137
+ idxs: List[int] of indices of models to be rendered in the dataset.
138
+ shader_type: Select shading. Valid options include HardPhongShader (default),
139
+ SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
140
+ SoftSilhouetteShader.
141
+ device: Device (as str or torch.device) on which the tensors should be located.
142
+ **kwargs: Accepts any of the kwargs that the renderer supports.
143
+
144
+ Returns:
145
+ Batch of rendered images of shape (N, H, W, 3).
146
+ """
147
+ idxs = self._handle_render_inputs(model_ids, categories, sample_nums, idxs)
148
+ # Use the getitem method which loads mesh + texture
149
+ models = [self[idx] for idx in idxs]
150
+ meshes = collate_batched_meshes(models)["mesh"]
151
+ if meshes.textures is None:
152
+ meshes.textures = TexturesVertex(
153
+ verts_features=torch.ones_like(meshes.verts_padded(), device=device)
154
+ )
155
+
156
+ meshes = meshes.to(device)
157
+ cameras = kwargs.get("cameras", FoVPerspectiveCameras()).to(device)
158
+ if len(cameras) != 1 and len(cameras) % len(meshes) != 0:
159
+ raise ValueError("Mismatch between batch dims of cameras and meshes.")
160
+ if len(cameras) > 1:
161
+ # When rendering R2N2 models, if more than one views are provided, broadcast
162
+ # the meshes so that each mesh can be rendered for each of the views.
163
+ meshes = meshes.extend(len(cameras) // len(meshes))
164
+ renderer = MeshRenderer(
165
+ rasterizer=MeshRasterizer(
166
+ cameras=cameras,
167
+ raster_settings=kwargs.get("raster_settings", RasterizationSettings()),
168
+ ),
169
+ shader=shader_type(
170
+ device=device,
171
+ cameras=cameras,
172
+ lights=kwargs.get("lights", PointLights()).to(device),
173
+ ),
174
+ )
175
+ return renderer(meshes)
176
+
177
+ def _handle_render_inputs(
178
+ self,
179
+ model_ids: Optional[List[str]] = None,
180
+ categories: Optional[List[str]] = None,
181
+ sample_nums: Optional[List[int]] = None,
182
+ idxs: Optional[List[int]] = None,
183
+ ) -> List[int]:
184
+ """
185
+ Helper function for converting user provided model_ids, categories and sample_nums
186
+ to indices of models in the loaded dataset. If model idxs are provided, we check if
187
+ the idxs are valid. If no models are specified, the first model in the loaded dataset
188
+ is chosen. The function returns the file paths to the selected models.
189
+
190
+ Args:
191
+ model_ids: List[str] of model_ids of models to be rendered.
192
+ categories: List[str] of categories to be rendered.
193
+ sample_nums: List[int] of number of models to be randomly sampled from
194
+ each category.
195
+ idxs: List[int] of indices of models to be rendered in the dataset.
196
+
197
+ Returns:
198
+ List of paths of models to be rendered.
199
+ """
200
+ # Get corresponding indices if model_ids are supplied.
201
+ if model_ids is not None and len(model_ids) > 0:
202
+ idxs = []
203
+ for model_id in model_ids:
204
+ if model_id not in self.model_ids:
205
+ raise ValueError(
206
+ "model_id %s not found in the loaded dataset." % model_id
207
+ )
208
+ idxs.append(self.model_ids.index(model_id))
209
+
210
+ # Sample random models if categories and sample_nums are supplied and get
211
+ # the corresponding indices.
212
+ elif categories is not None and len(categories) > 0:
213
+ sample_nums = [1] if sample_nums is None else sample_nums
214
+ if len(categories) != len(sample_nums) and len(sample_nums) != 1:
215
+ raise ValueError(
216
+ "categories and sample_nums needs to be of the same length or "
217
+ "sample_nums needs to be of length 1."
218
+ )
219
+
220
+ idxs_tensor = torch.empty(0, dtype=torch.int32)
221
+ for i in range(len(categories)):
222
+ category = self.synset_inv.get(categories[i], categories[i])
223
+ if category not in self.synset_inv.values():
224
+ raise ValueError(
225
+ "Category %s is not in the loaded dataset." % category
226
+ )
227
+ # Broadcast if sample_nums has length of 1.
228
+ sample_num = sample_nums[i] if len(sample_nums) > 1 else sample_nums[0]
229
+ sampled_idxs = self._sample_idxs_from_category(
230
+ sample_num=sample_num, category=category
231
+ )
232
+ # pyre-fixme[6]: For 1st param expected `Union[List[Tensor],
233
+ # typing.Tuple[Tensor, ...]]` but got `Tuple[Tensor, List[int]]`.
234
+ idxs_tensor = torch.cat((idxs_tensor, sampled_idxs))
235
+ idxs = idxs_tensor.tolist()
236
+ # Check if the indices are valid if idxs are supplied.
237
+ elif idxs is not None and len(idxs) > 0:
238
+ if any(idx < 0 or idx >= len(self.model_ids) for idx in idxs):
239
+ raise IndexError(
240
+ "One or more idx values are out of bounds. Indices need to be"
241
+ "between 0 and %s." % (len(self.model_ids) - 1)
242
+ )
243
+ # Check if sample_nums is specified, if so sample sample_nums[0] number
244
+ # of indices from the entire loaded dataset. Otherwise randomly select one
245
+ # index from the dataset.
246
+ else:
247
+ sample_nums = [1] if sample_nums is None else sample_nums
248
+ if len(sample_nums) > 1:
249
+ msg = (
250
+ "More than one sample sizes specified, now sampling "
251
+ "%d models from the dataset." % sample_nums[0]
252
+ )
253
+ warnings.warn(msg)
254
+ idxs = self._sample_idxs_from_category(sample_nums[0])
255
+ return idxs
256
+
257
+ def _sample_idxs_from_category(
258
+ self, sample_num: int = 1, category: Optional[str] = None
259
+ ) -> List[int]:
260
+ """
261
+ Helper function for sampling a number of indices from the given category.
262
+
263
+ Args:
264
+ sample_num: number of indices to be sampled from the given category.
265
+ category: category synset of the category to be sampled from. If not
266
+ specified, sample from all models in the loaded dataset.
267
+ """
268
+ start = self.synset_start_idxs[category] if category is not None else 0
269
+ range_len = (
270
+ self.synset_num_models[category] if category is not None else self.__len__()
271
+ )
272
+ replacement = sample_num > range_len
273
+ sampled_idxs = (
274
+ torch.multinomial(
275
+ torch.ones((range_len), dtype=torch.float32),
276
+ sample_num,
277
+ replacement=replacement,
278
+ )
279
+ + start
280
+ )
281
+ if replacement:
282
+ msg = (
283
+ "Sample size %d is larger than the number of objects in %s, "
284
+ "values sampled with replacement."
285
+ ) % (
286
+ sample_num,
287
+ "category " + category if category is not None else "all categories",
288
+ )
289
+ warnings.warn(msg)
290
+ # pyre-fixme[7]: Expected `List[int]` but got `Tensor`.
291
+ return sampled_idxs
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/datasets/utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from typing import Dict, List
10
+
11
+ from pytorch3d.renderer.mesh import TexturesAtlas
12
+ from pytorch3d.structures import Meshes
13
+
14
+
15
+ def collate_batched_meshes(batch: List[Dict]): # pragma: no cover
16
+ """
17
+ Take a list of objects in the form of dictionaries and merge them
18
+ into a single dictionary. This function can be used with a Dataset
19
+ object to create a torch.utils.data.Dataloader which directly
20
+ returns Meshes objects.
21
+ TODO: Add support for textures.
22
+
23
+ Args:
24
+ batch: List of dictionaries containing information about objects
25
+ in the dataset.
26
+
27
+ Returns:
28
+ collated_dict: Dictionary of collated lists. If batch contains both
29
+ verts and faces, a collated mesh batch is also returned.
30
+ """
31
+ if batch is None or len(batch) == 0:
32
+ return None
33
+ collated_dict = {}
34
+ for k in batch[0].keys():
35
+ collated_dict[k] = [d[k] for d in batch]
36
+
37
+ collated_dict["mesh"] = None
38
+ if {"verts", "faces"}.issubset(collated_dict.keys()):
39
+
40
+ textures = None
41
+ if "textures" in collated_dict:
42
+ textures = TexturesAtlas(atlas=collated_dict["textures"])
43
+
44
+ collated_dict["mesh"] = Meshes(
45
+ verts=collated_dict["verts"],
46
+ faces=collated_dict["faces"],
47
+ textures=textures,
48
+ )
49
+
50
+ return collated_dict
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/eval_demo.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ import dataclasses
11
+ import os
12
+ from enum import Enum
13
+ from typing import Any, cast, Dict, List, Optional, Tuple
14
+
15
+ import lpips
16
+ import torch
17
+ from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
18
+ from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
19
+ from pytorch3d.implicitron.dataset.json_index_dataset_map_provider import (
20
+ CO3D_CATEGORIES,
21
+ )
22
+ from pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis import (
23
+ aggregate_nvs_results,
24
+ eval_batch,
25
+ pretty_print_nvs_metrics,
26
+ summarize_nvs_eval_results,
27
+ )
28
+ from pytorch3d.implicitron.models.model_dbir import ModelDBIR
29
+ from pytorch3d.implicitron.tools.utils import dataclass_to_cuda_
30
+ from tqdm import tqdm
31
+
32
+
33
+ class Task(Enum):
34
+ SINGLE_SEQUENCE = "singlesequence"
35
+ MULTI_SEQUENCE = "multisequence"
36
+
37
+
38
+ def main() -> None:
39
+ """
40
+ Evaluates new view synthesis metrics of a simple depth-based image rendering
41
+ (DBIR) model for multisequence/singlesequence tasks for several categories.
42
+
43
+ The evaluation is conducted on the same data as in [1] and, hence, the results
44
+ are directly comparable to the numbers reported in [1].
45
+
46
+ References:
47
+ [1] J. Reizenstein, R. Shapovalov, P. Henzler, L. Sbordone,
48
+ P. Labatut, D. Novotny:
49
+ Common Objects in 3D: Large-Scale Learning
50
+ and Evaluation of Real-life 3D Category Reconstruction
51
+ """
52
+
53
+ task_results = {}
54
+ for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):
55
+ task_results[task] = []
56
+ for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:
57
+ for single_sequence_id in (
58
+ (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)
59
+ ):
60
+ category_result = evaluate_dbir_for_category(
61
+ category, task=task, single_sequence_id=single_sequence_id
62
+ )
63
+ print("")
64
+ print(
65
+ f"Results for task={task}; category={category};"
66
+ + (
67
+ f" sequence={single_sequence_id}:"
68
+ if single_sequence_id is not None
69
+ else ":"
70
+ )
71
+ )
72
+ pretty_print_nvs_metrics(category_result)
73
+ print("")
74
+
75
+ task_results[task].append(category_result)
76
+ _print_aggregate_results(task, task_results)
77
+
78
+ for task in task_results:
79
+ _print_aggregate_results(task, task_results)
80
+
81
+
82
+ def evaluate_dbir_for_category(
83
+ category: str,
84
+ task: Task,
85
+ bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0),
86
+ single_sequence_id: Optional[int] = None,
87
+ num_workers: int = 16,
88
+ ):
89
+ """
90
+ Evaluates new view synthesis metrics of a simple depth-based image rendering
91
+ (DBIR) model for a given task, category, and sequence (in case task=='singlesequence').
92
+
93
+ Args:
94
+ category: Object category.
95
+ bg_color: Background color of the renders.
96
+ task: Evaluation task. Either singlesequence or multisequence.
97
+ single_sequence_id: The ID of the evaluiation sequence for the singlesequence task.
98
+ num_workers: The number of workers for the employed dataloaders.
99
+ path_manager: (optional) Used for interpreting paths.
100
+
101
+ Returns:
102
+ category_result: A dictionary of quantitative metrics.
103
+ """
104
+
105
+ single_sequence_id = single_sequence_id if single_sequence_id is not None else -1
106
+
107
+ torch.manual_seed(42)
108
+
109
+ dataset_map_provider_args = {
110
+ "category": category,
111
+ "dataset_root": os.environ["CO3D_DATASET_ROOT"],
112
+ "assert_single_seq": task == Task.SINGLE_SEQUENCE,
113
+ "task_str": task.value,
114
+ "test_on_train": False,
115
+ "test_restrict_sequence_id": single_sequence_id,
116
+ "dataset_JsonIndexDataset_args": {"load_point_clouds": True},
117
+ }
118
+ data_source = ImplicitronDataSource(
119
+ dataset_map_provider_JsonIndexDatasetMapProvider_args=dataset_map_provider_args
120
+ )
121
+
122
+ datasets, dataloaders = data_source.get_datasets_and_dataloaders()
123
+
124
+ test_dataset = datasets.test
125
+ test_dataloader = dataloaders.test
126
+ if test_dataset is None or test_dataloader is None:
127
+ raise ValueError("must have a test dataset.")
128
+
129
+ image_size = cast(JsonIndexDataset, test_dataset).image_width
130
+
131
+ if image_size is None:
132
+ raise ValueError("Image size should be set in the dataset")
133
+
134
+ # init the simple DBIR model
135
+ model = ModelDBIR(
136
+ render_image_width=image_size,
137
+ render_image_height=image_size,
138
+ bg_color=bg_color,
139
+ max_points=int(1e5),
140
+ )
141
+ model.cuda()
142
+
143
+ # init the lpips model for eval
144
+ lpips_model = lpips.LPIPS(net="vgg")
145
+ lpips_model = lpips_model.cuda()
146
+
147
+ per_batch_eval_results = []
148
+ print("Evaluating DBIR model ...")
149
+ for frame_data in tqdm(test_dataloader):
150
+ frame_data = dataclass_to_cuda_(frame_data)
151
+ preds = model(**dataclasses.asdict(frame_data))
152
+ per_batch_eval_results.append(
153
+ eval_batch(
154
+ frame_data,
155
+ preds["implicitron_render"],
156
+ bg_color=bg_color,
157
+ lpips_model=lpips_model,
158
+ )
159
+ )
160
+
161
+ category_result_flat, category_result = summarize_nvs_eval_results(
162
+ per_batch_eval_results,
163
+ is_multisequence=task != Task.SINGLE_SEQUENCE,
164
+ )
165
+
166
+ return category_result["results"]
167
+
168
+
169
+ def _print_aggregate_results(
170
+ task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]
171
+ ) -> None:
172
+ """
173
+ Prints the aggregate metrics for a given task.
174
+ """
175
+ aggregate_task_result = aggregate_nvs_results(task_results[task])
176
+ print("")
177
+ print(f"Aggregate results for task={task}:")
178
+ pretty_print_nvs_metrics(aggregate_task_result)
179
+ print("")
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import warnings
10
+
11
+
12
+ try:
13
+ from .plotly_vis import get_camera_wireframe, plot_batch_individually, plot_scene
14
+ except ModuleNotFoundError as err:
15
+ if "plotly" in str(err):
16
+ warnings.warn(
17
+ "Cannot import plotly-based visualization code."
18
+ " Please install plotly to enable (pip install plotly)."
19
+ )
20
+ else:
21
+ raise
22
+
23
+ from .texture_vis import texturesuv_image_matplotlib, texturesuv_image_PIL
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (678 Bytes). View file
 
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/plotly_vis.cpython-310.pyc ADDED
Binary file (30 kB). View file
 
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/__pycache__/texture_vis.cpython-310.pyc ADDED
Binary file (3.57 kB). View file
 
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/vis/plotly_vis.py ADDED
@@ -0,0 +1,1057 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import warnings
10
+ from typing import Dict, List, NamedTuple, Optional, Tuple, Union
11
+
12
+ import plotly.graph_objects as go
13
+ import torch
14
+ from plotly.subplots import make_subplots
15
+ from pytorch3d.renderer import (
16
+ HeterogeneousRayBundle,
17
+ ray_bundle_to_ray_points,
18
+ RayBundle,
19
+ TexturesAtlas,
20
+ TexturesVertex,
21
+ )
22
+ from pytorch3d.renderer.camera_utils import camera_to_eye_at_up
23
+ from pytorch3d.renderer.cameras import CamerasBase
24
+ from pytorch3d.structures import join_meshes_as_scene, Meshes, Pointclouds
25
+
26
+
27
+ Struct = Union[CamerasBase, Meshes, Pointclouds, RayBundle, HeterogeneousRayBundle]
28
+
29
+
30
+ def _get_len(struct: Union[Struct, List[Struct]]) -> int: # pragma: no cover
31
+ """
32
+ Returns the length (usually corresponds to the batch size) of the input structure.
33
+ """
34
+ # pyre-ignore[6]
35
+ if not _is_ray_bundle(struct):
36
+ # pyre-ignore[6]
37
+ return len(struct)
38
+ if _is_heterogeneous_ray_bundle(struct):
39
+ # pyre-ignore[16]
40
+ return len(struct.camera_counts)
41
+ # pyre-ignore[16]
42
+ return len(struct.directions)
43
+
44
+
45
+ def _is_ray_bundle(struct: Struct) -> bool:
46
+ """
47
+ Args:
48
+ struct: Struct object to test
49
+ Returns:
50
+ True if something is a RayBundle, HeterogeneousRayBundle or
51
+ ImplicitronRayBundle, else False
52
+ """
53
+ return hasattr(struct, "directions")
54
+
55
+
56
+ def _is_heterogeneous_ray_bundle(struct: Union[List[Struct], Struct]) -> bool:
57
+ """
58
+ Args:
59
+ struct :object to test
60
+ Returns:
61
+ True if something is a HeterogeneousRayBundle or ImplicitronRayBundle
62
+ and cant be reduced to RayBundle else False
63
+ """
64
+ # pyre-ignore[16]
65
+ return hasattr(struct, "camera_counts") and struct.camera_counts is not None
66
+
67
+
68
+ def get_camera_wireframe(scale: float = 0.3): # pragma: no cover
69
+ """
70
+ Returns a wireframe of a 3D line-plot of a camera symbol.
71
+ """
72
+ a = 0.5 * torch.tensor([-2, 1.5, 4])
73
+ up1 = 0.5 * torch.tensor([0, 1.5, 4])
74
+ up2 = 0.5 * torch.tensor([0, 2, 4])
75
+ b = 0.5 * torch.tensor([2, 1.5, 4])
76
+ c = 0.5 * torch.tensor([-2, -1.5, 4])
77
+ d = 0.5 * torch.tensor([2, -1.5, 4])
78
+ C = torch.zeros(3)
79
+ F = torch.tensor([0, 0, 3])
80
+ camera_points = [a, up1, up2, up1, b, d, c, a, C, b, d, C, c, C, F]
81
+ lines = torch.stack([x.float() for x in camera_points]) * scale
82
+ return lines
83
+
84
+
85
+ class AxisArgs(NamedTuple): # pragma: no cover
86
+ showgrid: bool = False
87
+ zeroline: bool = False
88
+ showline: bool = False
89
+ ticks: str = ""
90
+ showticklabels: bool = False
91
+ backgroundcolor: str = "#fff"
92
+ showaxeslabels: bool = False
93
+
94
+
95
+ class Lighting(NamedTuple): # pragma: no cover
96
+ ambient: float = 0.8
97
+ diffuse: float = 1.0
98
+ fresnel: float = 0.0
99
+ specular: float = 0.0
100
+ roughness: float = 0.5
101
+ facenormalsepsilon: float = 1e-6
102
+ vertexnormalsepsilon: float = 1e-12
103
+
104
+
105
+ @torch.no_grad()
106
+ def plot_scene(
107
+ plots: Dict[str, Dict[str, Struct]],
108
+ *,
109
+ viewpoint_cameras: Optional[CamerasBase] = None,
110
+ ncols: int = 1,
111
+ camera_scale: float = 0.3,
112
+ pointcloud_max_points: int = 20000,
113
+ pointcloud_marker_size: int = 1,
114
+ raybundle_max_rays: int = 20000,
115
+ raybundle_max_points_per_ray: int = 1000,
116
+ raybundle_ray_point_marker_size: int = 1,
117
+ raybundle_ray_line_width: int = 1,
118
+ **kwargs,
119
+ ): # pragma: no cover
120
+ """
121
+ Main function to visualize Cameras, Meshes, Pointclouds, and RayBundle.
122
+ Plots input Cameras, Meshes, Pointclouds, and RayBundle data into named subplots,
123
+ with named traces based on the dictionary keys. Cameras are
124
+ rendered at the camera center location using a wireframe.
125
+
126
+ Args:
127
+ plots: A dict containing subplot and trace names,
128
+ as well as the Meshes, Cameras and Pointclouds objects to be rendered.
129
+ See below for examples of the format.
130
+ viewpoint_cameras: an instance of a Cameras object providing a location
131
+ to view the plotly plot from. If the batch size is equal
132
+ to the number of subplots, it is a one to one mapping.
133
+ If the batch size is 1, then that viewpoint will be used
134
+ for all the subplots will be viewed from that point.
135
+ Otherwise, the viewpoint_cameras will not be used.
136
+ ncols: the number of subplots per row
137
+ camera_scale: determines the size of the wireframe used to render cameras.
138
+ pointcloud_max_points: the maximum number of points to plot from
139
+ a pointcloud. If more are present, a random sample of size
140
+ pointcloud_max_points is used.
141
+ pointcloud_marker_size: the size of the points rendered by plotly
142
+ when plotting a pointcloud.
143
+ raybundle_max_rays: maximum number of rays of a RayBundle to visualize. Randomly
144
+ subsamples without replacement in case the number of rays is bigger than max_rays.
145
+ raybundle_max_points_per_ray: the maximum number of points per ray in RayBundle
146
+ to visualize. If more are present, a random sample of size
147
+ max_points_per_ray is used.
148
+ raybundle_ray_point_marker_size: the size of the ray points of a plotted RayBundle
149
+ raybundle_ray_line_width: the width of the plotted rays of a RayBundle
150
+ **kwargs: Accepts lighting (a Lighting object) and any of the args xaxis,
151
+ yaxis and zaxis which Plotly's scene accepts. Accepts axis_args,
152
+ which is an AxisArgs object that is applied to all 3 axes.
153
+ Example settings for axis_args and lighting are given at the
154
+ top of this file.
155
+
156
+ Example:
157
+
158
+ ..code-block::python
159
+
160
+ mesh = ...
161
+ point_cloud = ...
162
+ fig = plot_scene({
163
+ "subplot_title": {
164
+ "mesh_trace_title": mesh,
165
+ "pointcloud_trace_title": point_cloud
166
+ }
167
+ })
168
+ fig.show()
169
+
170
+ The above example will render one subplot which has both a mesh and pointcloud.
171
+
172
+ If the Meshes, Pointclouds, or Cameras objects are batched, then every object in that batch
173
+ will be plotted in a single trace.
174
+
175
+ ..code-block::python
176
+ mesh = ... # batch size 2
177
+ point_cloud = ... # batch size 2
178
+ fig = plot_scene({
179
+ "subplot_title": {
180
+ "mesh_trace_title": mesh,
181
+ "pointcloud_trace_title": point_cloud
182
+ }
183
+ })
184
+ fig.show()
185
+
186
+ The above example renders one subplot with 2 traces, each of which renders
187
+ both objects from their respective batched data.
188
+
189
+ Multiple subplots follow the same pattern:
190
+ ..code-block::python
191
+ mesh = ... # batch size 2
192
+ point_cloud = ... # batch size 2
193
+ fig = plot_scene({
194
+ "subplot1_title": {
195
+ "mesh_trace_title": mesh[0],
196
+ "pointcloud_trace_title": point_cloud[0]
197
+ },
198
+ "subplot2_title": {
199
+ "mesh_trace_title": mesh[1],
200
+ "pointcloud_trace_title": point_cloud[1]
201
+ }
202
+ },
203
+ ncols=2) # specify the number of subplots per row
204
+ fig.show()
205
+
206
+ The above example will render two subplots, each containing a mesh
207
+ and a pointcloud. The ncols argument will render two subplots in one row
208
+ instead of having them vertically stacked because the default is one subplot
209
+ per row.
210
+
211
+ To view plotly plots from a PyTorch3D camera's point of view, we can use
212
+ viewpoint_cameras:
213
+ ..code-block::python
214
+ mesh = ... # batch size 2
215
+ R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back
216
+ # Any instance of CamerasBase works, here we use FoVPerspectiveCameras
217
+ cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
218
+ fig = plot_scene({
219
+ "subplot1_title": {
220
+ "mesh_trace_title": mesh[0]
221
+ },
222
+ "subplot2_title": {
223
+ "mesh_trace_title": mesh[1]
224
+ }
225
+ },
226
+ viewpoint_cameras=cameras)
227
+ fig.show()
228
+
229
+ The above example will render the first subplot seen from the camera on the +z axis,
230
+ and the second subplot from the viewpoint of the camera on the -z axis.
231
+
232
+ We can visualize these cameras as well:
233
+ ..code-block::python
234
+ mesh = ...
235
+ R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back
236
+ # Any instance of CamerasBase works, here we use FoVPerspectiveCameras
237
+ cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
238
+ fig = plot_scene({
239
+ "subplot1_title": {
240
+ "mesh_trace_title": mesh,
241
+ "cameras_trace_title": cameras,
242
+ },
243
+ })
244
+ fig.show()
245
+
246
+ The above example will render one subplot with the mesh object
247
+ and two cameras.
248
+
249
+ RayBundle visualization is also supproted:
250
+ ..code-block::python
251
+ cameras = PerspectiveCameras(...)
252
+ ray_bundle = RayBundle(origins=..., lengths=..., directions=..., xys=...)
253
+ fig = plot_scene({
254
+ "subplot1_title": {
255
+ "ray_bundle_trace_title": ray_bundle,
256
+ "cameras_trace_title": cameras,
257
+ },
258
+ })
259
+ fig.show()
260
+
261
+ For an example of using kwargs, see below:
262
+ ..code-block::python
263
+ mesh = ...
264
+ point_cloud = ...
265
+ fig = plot_scene({
266
+ "subplot_title": {
267
+ "mesh_trace_title": mesh,
268
+ "pointcloud_trace_title": point_cloud
269
+ }
270
+ },
271
+ axis_args=AxisArgs(backgroundcolor="rgb(200,230,200)")) # kwarg axis_args
272
+ fig.show()
273
+
274
+ The above example will render each axis with the input background color.
275
+
276
+ See the tutorials in pytorch3d/docs/tutorials for more examples
277
+ (namely rendered_color_points.ipynb and rendered_textured_meshes.ipynb).
278
+ """
279
+
280
+ subplots = list(plots.keys())
281
+ fig = _gen_fig_with_subplots(len(subplots), ncols, subplots)
282
+ lighting = kwargs.get("lighting", Lighting())._asdict()
283
+ axis_args_dict = kwargs.get("axis_args", AxisArgs())._asdict()
284
+
285
+ # Set axis arguments to defaults defined at the top of this file
286
+ x_settings = {**axis_args_dict}
287
+ y_settings = {**axis_args_dict}
288
+ z_settings = {**axis_args_dict}
289
+
290
+ # Update the axes with any axis settings passed in as kwargs.
291
+ x_settings.update(**kwargs.get("xaxis", {}))
292
+ y_settings.update(**kwargs.get("yaxis", {}))
293
+ z_settings.update(**kwargs.get("zaxis", {}))
294
+
295
+ camera = {
296
+ "up": {
297
+ "x": 0.0,
298
+ "y": 1.0,
299
+ "z": 0.0,
300
+ } # set the up vector to match PyTorch3D world coordinates conventions
301
+ }
302
+ viewpoints_eye_at_up_world = None
303
+ if viewpoint_cameras:
304
+ n_viewpoint_cameras = len(viewpoint_cameras)
305
+ if n_viewpoint_cameras == len(subplots) or n_viewpoint_cameras == 1:
306
+ # Calculate the vectors eye, at, up in world space
307
+ # to initialize the position of the camera in
308
+ # the plotly figure
309
+ viewpoints_eye_at_up_world = camera_to_eye_at_up(
310
+ viewpoint_cameras.get_world_to_view_transform().cpu()
311
+ )
312
+ else:
313
+ msg = "Invalid number {} of viewpoint cameras were provided. Either 1 \
314
+ or {} cameras are required".format(
315
+ len(viewpoint_cameras), len(subplots)
316
+ )
317
+ warnings.warn(msg)
318
+
319
+ for subplot_idx in range(len(subplots)):
320
+ subplot_name = subplots[subplot_idx]
321
+ traces = plots[subplot_name]
322
+ for trace_name, struct in traces.items():
323
+ if isinstance(struct, Meshes):
324
+ _add_mesh_trace(fig, struct, trace_name, subplot_idx, ncols, lighting)
325
+ elif isinstance(struct, Pointclouds):
326
+ _add_pointcloud_trace(
327
+ fig,
328
+ struct,
329
+ trace_name,
330
+ subplot_idx,
331
+ ncols,
332
+ pointcloud_max_points,
333
+ pointcloud_marker_size,
334
+ )
335
+ elif isinstance(struct, CamerasBase):
336
+ _add_camera_trace(
337
+ fig, struct, trace_name, subplot_idx, ncols, camera_scale
338
+ )
339
+ elif _is_ray_bundle(struct):
340
+ _add_ray_bundle_trace(
341
+ fig,
342
+ struct,
343
+ trace_name,
344
+ subplot_idx,
345
+ ncols,
346
+ raybundle_max_rays,
347
+ raybundle_max_points_per_ray,
348
+ raybundle_ray_point_marker_size,
349
+ raybundle_ray_line_width,
350
+ )
351
+ else:
352
+ raise ValueError(
353
+ "struct {} is not a Cameras, Meshes, Pointclouds,".format(struct)
354
+ + " , RayBundle or HeterogeneousRayBundle object."
355
+ )
356
+
357
+ # Ensure update for every subplot.
358
+ plot_scene = "scene" + str(subplot_idx + 1)
359
+ current_layout = fig["layout"][plot_scene]
360
+ xaxis = current_layout["xaxis"]
361
+ yaxis = current_layout["yaxis"]
362
+ zaxis = current_layout["zaxis"]
363
+
364
+ # Update the axes with our above default and provided settings.
365
+ xaxis.update(**x_settings)
366
+ yaxis.update(**y_settings)
367
+ zaxis.update(**z_settings)
368
+
369
+ # update camera viewpoint if provided
370
+ if viewpoints_eye_at_up_world is not None:
371
+ # Use camera params for batch index or the first camera if only one provided.
372
+ # pyre-fixme[61]: `n_viewpoint_cameras` is undefined, or not always defined.
373
+ viewpoint_idx = min(n_viewpoint_cameras - 1, subplot_idx)
374
+
375
+ eye, at, up = (i[viewpoint_idx] for i in viewpoints_eye_at_up_world)
376
+ eye_x, eye_y, eye_z = eye.tolist()
377
+ at_x, at_y, at_z = at.tolist()
378
+ up_x, up_y, up_z = up.tolist()
379
+
380
+ # scale camera eye to plotly [-1, 1] ranges
381
+ x_range = xaxis["range"]
382
+ y_range = yaxis["range"]
383
+ z_range = zaxis["range"]
384
+
385
+ eye_x = _scale_camera_to_bounds(eye_x, x_range, True)
386
+ eye_y = _scale_camera_to_bounds(eye_y, y_range, True)
387
+ eye_z = _scale_camera_to_bounds(eye_z, z_range, True)
388
+
389
+ at_x = _scale_camera_to_bounds(at_x, x_range, True)
390
+ at_y = _scale_camera_to_bounds(at_y, y_range, True)
391
+ at_z = _scale_camera_to_bounds(at_z, z_range, True)
392
+
393
+ up_x = _scale_camera_to_bounds(up_x, x_range, False)
394
+ up_y = _scale_camera_to_bounds(up_y, y_range, False)
395
+ up_z = _scale_camera_to_bounds(up_z, z_range, False)
396
+
397
+ camera["eye"] = {"x": eye_x, "y": eye_y, "z": eye_z}
398
+ camera["center"] = {"x": at_x, "y": at_y, "z": at_z}
399
+ camera["up"] = {"x": up_x, "y": up_y, "z": up_z}
400
+
401
+ current_layout.update(
402
+ {
403
+ "xaxis": xaxis,
404
+ "yaxis": yaxis,
405
+ "zaxis": zaxis,
406
+ "aspectmode": "cube",
407
+ "camera": camera,
408
+ }
409
+ )
410
+
411
+ return fig
412
+
413
+
414
+ @torch.no_grad()
415
+ def plot_batch_individually(
416
+ batched_structs: Union[
417
+ List[Struct],
418
+ Struct,
419
+ ],
420
+ *,
421
+ viewpoint_cameras: Optional[CamerasBase] = None,
422
+ ncols: int = 1,
423
+ extend_struct: bool = True,
424
+ subplot_titles: Optional[List[str]] = None,
425
+ **kwargs,
426
+ ): # pragma: no cover
427
+ """
428
+ This is a higher level plotting function than plot_scene, for plotting
429
+ Cameras, Meshes, Pointclouds, and RayBundle in simple cases. The simplest use
430
+ is to plot a single Cameras, Meshes, Pointclouds, or a RayBundle object,
431
+ where you just pass it in as a one element list. This will plot each batch
432
+ element in a separate subplot.
433
+
434
+ More generally, you can supply multiple Cameras, Meshes, Pointclouds, or RayBundle
435
+ having the same batch size `n`. In this case, there will be `n` subplots,
436
+ each depicting the corresponding batch element of all the inputs.
437
+
438
+ In addition, you can include Cameras, Meshes, Pointclouds, or RayBundle of size 1 in
439
+ the input. These will either be rendered in the first subplot
440
+ (if extend_struct is False), or in every subplot.
441
+ RayBundle includes ImplicitronRayBundle and HeterogeneousRaybundle.
442
+
443
+ Args:
444
+ batched_structs: a list of Cameras, Meshes, Pointclouds and RayBundle to be
445
+ rendered. Each structure's corresponding batch element will be plotted in a
446
+ single subplot, resulting in n subplots for a batch of size n. Every struct
447
+ should either have the same batch size or be of batch size 1. See extend_struct
448
+ and the description above for how batch size 1 structs are handled. Also accepts
449
+ a single Cameras, Meshes, Pointclouds, and RayBundle object, which will have
450
+ each individual element plotted in its own subplot.
451
+ viewpoint_cameras: an instance of a Cameras object providing a location
452
+ to view the plotly plot from. If the batch size is equal
453
+ to the number of subplots, it is a one to one mapping.
454
+ If the batch size is 1, then that viewpoint will be used
455
+ for all the subplots will be viewed from that point.
456
+ Otherwise, the viewpoint_cameras will not be used.
457
+ ncols: the number of subplots per row
458
+ extend_struct: if True, indicates that structs of batch size 1
459
+ should be plotted in every subplot.
460
+ subplot_titles: strings to name each subplot
461
+ **kwargs: keyword arguments which are passed to plot_scene.
462
+ See plot_scene documentation for details.
463
+
464
+ Example:
465
+
466
+ ..code-block::python
467
+
468
+ mesh = ... # mesh of batch size 2
469
+ point_cloud = ... # point_cloud of batch size 2
470
+ fig = plot_batch_individually([mesh, point_cloud], subplot_titles=["plot1", "plot2"])
471
+ fig.show()
472
+
473
+ # this is equivalent to the below figure
474
+ fig = plot_scene({
475
+ "plot1": {
476
+ "trace1-1": mesh[0],
477
+ "trace1-2": point_cloud[0]
478
+ },
479
+ "plot2":{
480
+ "trace2-1": mesh[1],
481
+ "trace2-2": point_cloud[1]
482
+ }
483
+ })
484
+ fig.show()
485
+
486
+ The above example will render two subplots which each have both a mesh and pointcloud.
487
+ For more examples look at the pytorch3d tutorials at `pytorch3d/docs/tutorials`,
488
+ in particular the files rendered_color_points.ipynb and rendered_textured_meshes.ipynb.
489
+ """
490
+
491
+ # check that every batch is the same size or is size 1
492
+ if _get_len(batched_structs) == 0:
493
+ msg = "No structs to plot"
494
+ warnings.warn(msg)
495
+ return
496
+ max_size = 0
497
+ if isinstance(batched_structs, list):
498
+ max_size = max(_get_len(s) for s in batched_structs)
499
+ for struct in batched_structs:
500
+ struct_len = _get_len(struct)
501
+ if struct_len not in (1, max_size):
502
+ msg = "invalid batch size {} provided: {}".format(struct_len, struct)
503
+ raise ValueError(msg)
504
+ else:
505
+ max_size = _get_len(batched_structs)
506
+
507
+ if max_size == 0:
508
+ msg = "No data is provided with at least one element"
509
+ raise ValueError(msg)
510
+
511
+ if subplot_titles:
512
+ if len(subplot_titles) != max_size:
513
+ msg = "invalid number of subplot titles"
514
+ raise ValueError(msg)
515
+
516
+ # if we are dealing with HeterogeneousRayBundle of ImplicitronRayBundle create
517
+ # first indexes for faster
518
+ first_idxs = None
519
+ if _is_heterogeneous_ray_bundle(batched_structs):
520
+ # pyre-ignore[16]
521
+ cumsum = batched_structs.camera_counts.cumsum(dim=0)
522
+ first_idxs = torch.cat((cumsum.new_zeros((1,)), cumsum))
523
+
524
+ scene_dictionary = {}
525
+ # construct the scene dictionary
526
+ for scene_num in range(max_size):
527
+ subplot_title = (
528
+ subplot_titles[scene_num]
529
+ if subplot_titles
530
+ else "subplot " + str(scene_num + 1)
531
+ )
532
+ scene_dictionary[subplot_title] = {}
533
+
534
+ if isinstance(batched_structs, list):
535
+ for i, batched_struct in enumerate(batched_structs):
536
+ first_idxs = None
537
+ if _is_heterogeneous_ray_bundle(batched_structs[i]):
538
+ # pyre-ignore[16]
539
+ cumsum = batched_struct.camera_counts.cumsum(dim=0)
540
+ first_idxs = torch.cat((cumsum.new_zeros((1,)), cumsum))
541
+ # check for whether this struct needs to be extended
542
+ batched_struct_len = _get_len(batched_struct)
543
+ if i >= batched_struct_len and not extend_struct:
544
+ continue
545
+ _add_struct_from_batch(
546
+ batched_struct,
547
+ scene_num,
548
+ subplot_title,
549
+ scene_dictionary,
550
+ i + 1,
551
+ first_idxs=first_idxs,
552
+ )
553
+ else: # batched_structs is a single struct
554
+ _add_struct_from_batch(
555
+ batched_structs,
556
+ scene_num,
557
+ subplot_title,
558
+ scene_dictionary,
559
+ first_idxs=first_idxs,
560
+ )
561
+
562
+ return plot_scene(
563
+ scene_dictionary, viewpoint_cameras=viewpoint_cameras, ncols=ncols, **kwargs
564
+ )
565
+
566
+
567
+ def _add_struct_from_batch(
568
+ batched_struct: Struct,
569
+ scene_num: int,
570
+ subplot_title: str,
571
+ scene_dictionary: Dict[str, Dict[str, Struct]],
572
+ trace_idx: int = 1,
573
+ first_idxs: Optional[torch.Tensor] = None,
574
+ ) -> None: # pragma: no cover
575
+ """
576
+ Adds the struct corresponding to the given scene_num index to
577
+ a provided scene_dictionary to be passed in to plot_scene
578
+
579
+ Args:
580
+ batched_struct: the batched data structure to add to the dict
581
+ scene_num: the subplot from plot_batch_individually which this struct
582
+ should be added to
583
+ subplot_title: the title of the subplot
584
+ scene_dictionary: the dictionary to add the indexed struct to
585
+ trace_idx: the trace number, starting at 1 for this struct's trace
586
+ """
587
+ struct = None
588
+ if isinstance(batched_struct, CamerasBase):
589
+ # we can't index directly into camera batches
590
+ R, T = batched_struct.R, batched_struct.T
591
+ r_idx = min(scene_num, len(R) - 1)
592
+ t_idx = min(scene_num, len(T) - 1)
593
+ R = R[r_idx].unsqueeze(0)
594
+ T = T[t_idx].unsqueeze(0)
595
+ struct = CamerasBase(device=batched_struct.device, R=R, T=T)
596
+ elif _is_ray_bundle(batched_struct) and not _is_heterogeneous_ray_bundle(
597
+ batched_struct
598
+ ):
599
+ # for RayBundle we treat the camera count as the batch index
600
+ struct_idx = min(scene_num, _get_len(batched_struct) - 1)
601
+
602
+ struct = RayBundle(
603
+ **{
604
+ attr: getattr(batched_struct, attr)[struct_idx]
605
+ for attr in ["origins", "directions", "lengths", "xys"]
606
+ }
607
+ )
608
+ elif _is_heterogeneous_ray_bundle(batched_struct):
609
+ # for RayBundle we treat the camera count as the batch index
610
+ struct_idx = min(scene_num, _get_len(batched_struct) - 1)
611
+
612
+ struct = RayBundle(
613
+ **{
614
+ attr: getattr(batched_struct, attr)[
615
+ # pyre-ignore[16]
616
+ first_idxs[struct_idx] : first_idxs[struct_idx + 1]
617
+ ]
618
+ for attr in ["origins", "directions", "lengths", "xys"]
619
+ }
620
+ )
621
+
622
+ else: # batched meshes and pointclouds are indexable
623
+ struct_idx = min(scene_num, _get_len(batched_struct) - 1)
624
+ # pyre-ignore[16]
625
+ struct = batched_struct[struct_idx]
626
+ trace_name = "trace{}-{}".format(scene_num + 1, trace_idx)
627
+ scene_dictionary[subplot_title][trace_name] = struct
628
+
629
+
630
+ def _add_mesh_trace(
631
+ fig: go.Figure,
632
+ meshes: Meshes,
633
+ trace_name: str,
634
+ subplot_idx: int,
635
+ ncols: int,
636
+ lighting: Lighting,
637
+ ) -> None: # pragma: no cover
638
+ """
639
+ Adds a trace rendering a Meshes object to the passed in figure, with
640
+ a given name and in a specific subplot.
641
+
642
+ Args:
643
+ fig: plotly figure to add the trace within.
644
+ meshes: Meshes object to render. It can be batched.
645
+ trace_name: name to label the trace with.
646
+ subplot_idx: identifies the subplot, with 0 being the top left.
647
+ ncols: the number of subplots per row.
648
+ lighting: a Lighting object that specifies the Mesh3D lighting.
649
+ """
650
+
651
+ mesh = join_meshes_as_scene(meshes)
652
+ mesh = mesh.detach().cpu()
653
+ verts = mesh.verts_packed()
654
+ faces = mesh.faces_packed()
655
+ # If mesh has vertex colors or face colors, use them
656
+ # for figure, otherwise use plotly's default colors.
657
+ verts_rgb = None
658
+ faces_rgb = None
659
+ if isinstance(mesh.textures, TexturesVertex):
660
+ verts_rgb = mesh.textures.verts_features_packed()
661
+ verts_rgb.clamp_(min=0.0, max=1.0)
662
+ verts_rgb = torch.tensor(255.0) * verts_rgb
663
+ if isinstance(mesh.textures, TexturesAtlas):
664
+ atlas = mesh.textures.atlas_packed()
665
+ # If K==1
666
+ if atlas.shape[1] == 1 and atlas.shape[3] == 3:
667
+ faces_rgb = atlas[:, 0, 0]
668
+
669
+ # Reposition the unused vertices to be "inside" the object
670
+ # (i.e. they won't be visible in the plot).
671
+ verts_used = torch.zeros((verts.shape[0],), dtype=torch.bool)
672
+ verts_used[torch.unique(faces)] = True
673
+ verts_center = verts[verts_used].mean(0)
674
+ verts[~verts_used] = verts_center
675
+
676
+ row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
677
+ # pyre-fixme[16]: `Figure` has no attribute `add_trace`.
678
+ fig.add_trace(
679
+ go.Mesh3d(
680
+ x=verts[:, 0],
681
+ y=verts[:, 1],
682
+ z=verts[:, 2],
683
+ vertexcolor=verts_rgb,
684
+ facecolor=faces_rgb,
685
+ i=faces[:, 0],
686
+ j=faces[:, 1],
687
+ k=faces[:, 2],
688
+ lighting=lighting,
689
+ name=trace_name,
690
+ ),
691
+ row=row,
692
+ col=col,
693
+ )
694
+
695
+ # Access the current subplot's scene configuration
696
+ plot_scene = "scene" + str(subplot_idx + 1)
697
+ current_layout = fig["layout"][plot_scene]
698
+
699
+ # update the bounds of the axes for the current trace
700
+ max_expand = (verts.max(0)[0] - verts.min(0)[0]).max()
701
+ _update_axes_bounds(verts_center, max_expand, current_layout)
702
+
703
+
704
+ def _add_pointcloud_trace(
705
+ fig: go.Figure,
706
+ pointclouds: Pointclouds,
707
+ trace_name: str,
708
+ subplot_idx: int,
709
+ ncols: int,
710
+ max_points_per_pointcloud: int,
711
+ marker_size: int,
712
+ ) -> None: # pragma: no cover
713
+ """
714
+ Adds a trace rendering a Pointclouds object to the passed in figure, with
715
+ a given name and in a specific subplot.
716
+
717
+ Args:
718
+ fig: plotly figure to add the trace within.
719
+ pointclouds: Pointclouds object to render. It can be batched.
720
+ trace_name: name to label the trace with.
721
+ subplot_idx: identifies the subplot, with 0 being the top left.
722
+ ncols: the number of subplots per row.
723
+ max_points_per_pointcloud: the number of points to render, which are randomly sampled.
724
+ marker_size: the size of the rendered points
725
+ """
726
+ pointclouds = pointclouds.detach().cpu().subsample(max_points_per_pointcloud)
727
+ verts = pointclouds.points_packed()
728
+ features = pointclouds.features_packed()
729
+
730
+ color = None
731
+ if features is not None:
732
+ if features.shape[1] == 4: # rgba
733
+ template = "rgb(%d, %d, %d, %f)"
734
+ rgb = (features[:, :3].clamp(0.0, 1.0) * 255).int()
735
+ color = [template % (*rgb_, a_) for rgb_, a_ in zip(rgb, features[:, 3])]
736
+
737
+ if features.shape[1] == 3:
738
+ template = "rgb(%d, %d, %d)"
739
+ rgb = (features.clamp(0.0, 1.0) * 255).int()
740
+ color = [template % (r, g, b) for r, g, b in rgb]
741
+
742
+ row = subplot_idx // ncols + 1
743
+ col = subplot_idx % ncols + 1
744
+ # pyre-fixme[16]: `Figure` has no attribute `add_trace`.
745
+ fig.add_trace(
746
+ go.Scatter3d(
747
+ x=verts[:, 0],
748
+ y=verts[:, 1],
749
+ z=verts[:, 2],
750
+ marker={"color": color, "size": marker_size},
751
+ mode="markers",
752
+ name=trace_name,
753
+ ),
754
+ row=row,
755
+ col=col,
756
+ )
757
+
758
+ # Access the current subplot's scene configuration
759
+ plot_scene = "scene" + str(subplot_idx + 1)
760
+ current_layout = fig["layout"][plot_scene]
761
+
762
+ # update the bounds of the axes for the current trace
763
+ verts_center = verts.mean(0)
764
+ max_expand = (verts.max(0)[0] - verts.min(0)[0]).max()
765
+ _update_axes_bounds(verts_center, max_expand, current_layout)
766
+
767
+
768
+ def _add_camera_trace(
769
+ fig: go.Figure,
770
+ cameras: CamerasBase,
771
+ trace_name: str,
772
+ subplot_idx: int,
773
+ ncols: int,
774
+ camera_scale: float,
775
+ ) -> None: # pragma: no cover
776
+ """
777
+ Adds a trace rendering a Cameras object to the passed in figure, with
778
+ a given name and in a specific subplot.
779
+
780
+ Args:
781
+ fig: plotly figure to add the trace within.
782
+ cameras: the Cameras object to render. It can be batched.
783
+ trace_name: name to label the trace with.
784
+ subplot_idx: identifies the subplot, with 0 being the top left.
785
+ ncols: the number of subplots per row.
786
+ camera_scale: the size of the wireframe used to render the Cameras object.
787
+ """
788
+ cam_wires = get_camera_wireframe(camera_scale).to(cameras.device)
789
+ cam_trans = cameras.get_world_to_view_transform().inverse()
790
+ cam_wires_trans = cam_trans.transform_points(cam_wires).detach().cpu()
791
+ # if batch size is 1, unsqueeze to add dimension
792
+ if len(cam_wires_trans.shape) < 3:
793
+ cam_wires_trans = cam_wires_trans.unsqueeze(0)
794
+
795
+ nan_tensor = torch.Tensor([[float("NaN")] * 3])
796
+ all_cam_wires = cam_wires_trans[0]
797
+ for wire in cam_wires_trans[1:]:
798
+ # We combine camera points into a single tensor to plot them in a
799
+ # single trace. The NaNs are inserted between sets of camera
800
+ # points so that the lines drawn by Plotly are not drawn between
801
+ # points that belong to different cameras.
802
+ all_cam_wires = torch.cat((all_cam_wires, nan_tensor, wire))
803
+ x, y, z = all_cam_wires.detach().cpu().numpy().T.astype(float)
804
+
805
+ row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
806
+ # pyre-fixme[16]: `Figure` has no attribute `add_trace`.
807
+ fig.add_trace(
808
+ go.Scatter3d(x=x, y=y, z=z, marker={"size": 1}, name=trace_name),
809
+ row=row,
810
+ col=col,
811
+ )
812
+
813
+ # Access the current subplot's scene configuration
814
+ plot_scene = "scene" + str(subplot_idx + 1)
815
+ current_layout = fig["layout"][plot_scene]
816
+
817
+ # flatten for bounds calculations
818
+ flattened_wires = cam_wires_trans.flatten(0, 1)
819
+ verts_center = flattened_wires.mean(0)
820
+ max_expand = (flattened_wires.max(0)[0] - flattened_wires.min(0)[0]).max()
821
+ _update_axes_bounds(verts_center, max_expand, current_layout)
822
+
823
+
824
+ def _add_ray_bundle_trace(
825
+ fig: go.Figure,
826
+ ray_bundle: Union[RayBundle, HeterogeneousRayBundle],
827
+ trace_name: str,
828
+ subplot_idx: int,
829
+ ncols: int,
830
+ max_rays: int,
831
+ max_points_per_ray: int,
832
+ marker_size: int,
833
+ line_width: int,
834
+ ) -> None: # pragma: no cover
835
+ """
836
+ Adds a trace rendering a ray bundle object
837
+ to the passed in figure, with a given name and in a specific subplot.
838
+
839
+ Args:
840
+ fig: plotly figure to add the trace within.
841
+ ray_bundle: the RayBundle, ImplicitronRayBundle or HeterogeneousRaybundle to render.
842
+ It can be batched.
843
+ trace_name: name to label the trace with.
844
+ subplot_idx: identifies the subplot, with 0 being the top left.
845
+ ncols: the number of subplots per row.
846
+ max_rays: maximum number of plotted rays in total. Randomly subsamples
847
+ without replacement in case the number of rays is bigger than max_rays.
848
+ max_points_per_ray: maximum number of points plotted per ray.
849
+ marker_size: the size of the ray point markers.
850
+ line_width: the width of the ray lines.
851
+ """
852
+
853
+ n_pts_per_ray = ray_bundle.lengths.shape[-1]
854
+ n_rays = ray_bundle.lengths.shape[:-1].numel()
855
+
856
+ # flatten all batches of rays into a single big bundle
857
+ ray_bundle_flat = RayBundle(
858
+ **{
859
+ attr: torch.flatten(getattr(ray_bundle, attr), start_dim=0, end_dim=-2)
860
+ for attr in ["origins", "directions", "lengths", "xys"]
861
+ }
862
+ )
863
+
864
+ # subsample the rays (if needed)
865
+ if n_rays > max_rays:
866
+ indices_rays = torch.randperm(n_rays)[:max_rays]
867
+ ray_bundle_flat = RayBundle(
868
+ **{
869
+ attr: getattr(ray_bundle_flat, attr)[indices_rays]
870
+ for attr in ["origins", "directions", "lengths", "xys"]
871
+ }
872
+ )
873
+
874
+ # make ray line endpoints
875
+ min_max_ray_depth = torch.stack(
876
+ [
877
+ ray_bundle_flat.lengths.min(dim=1).values,
878
+ ray_bundle_flat.lengths.max(dim=1).values,
879
+ ],
880
+ dim=-1,
881
+ )
882
+ ray_lines_endpoints = ray_bundle_to_ray_points(
883
+ ray_bundle_flat._replace(lengths=min_max_ray_depth)
884
+ )
885
+
886
+ # make the ray lines for plotly plotting
887
+ nan_tensor = torch.tensor(
888
+ [[float("NaN")] * 3],
889
+ device=ray_lines_endpoints.device,
890
+ dtype=ray_lines_endpoints.dtype,
891
+ )
892
+ ray_lines = torch.empty(size=(1, 3), device=ray_lines_endpoints.device)
893
+ for ray_line in ray_lines_endpoints:
894
+ # We combine the ray lines into a single tensor to plot them in a
895
+ # single trace. The NaNs are inserted between sets of ray lines
896
+ # so that the lines drawn by Plotly are not drawn between
897
+ # lines that belong to different rays.
898
+ ray_lines = torch.cat((ray_lines, nan_tensor, ray_line))
899
+ x, y, z = ray_lines.detach().cpu().numpy().T.astype(float)
900
+ row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
901
+ # pyre-fixme[16]: `Figure` has no attribute `add_trace`.
902
+ fig.add_trace(
903
+ go.Scatter3d(
904
+ x=x,
905
+ y=y,
906
+ z=z,
907
+ marker={"size": 0.1},
908
+ line={"width": line_width},
909
+ name=trace_name,
910
+ ),
911
+ row=row,
912
+ col=col,
913
+ )
914
+
915
+ # subsample the ray points (if needed)
916
+ if n_pts_per_ray > max_points_per_ray:
917
+ indices_ray_pts = torch.cat(
918
+ [
919
+ torch.randperm(n_pts_per_ray)[:max_points_per_ray] + ri * n_pts_per_ray
920
+ for ri in range(ray_bundle_flat.lengths.shape[0])
921
+ ]
922
+ )
923
+ ray_bundle_flat = ray_bundle_flat._replace(
924
+ lengths=ray_bundle_flat.lengths.reshape(-1)[indices_ray_pts].reshape(
925
+ ray_bundle_flat.lengths.shape[0], -1
926
+ )
927
+ )
928
+
929
+ # plot the ray points
930
+ ray_points = (
931
+ ray_bundle_to_ray_points(ray_bundle_flat)
932
+ .view(-1, 3)
933
+ .detach()
934
+ .cpu()
935
+ .numpy()
936
+ .astype(float)
937
+ )
938
+ fig.add_trace(
939
+ go.Scatter3d(
940
+ x=ray_points[:, 0],
941
+ y=ray_points[:, 1],
942
+ z=ray_points[:, 2],
943
+ mode="markers",
944
+ name=trace_name + "_points",
945
+ marker={"size": marker_size},
946
+ ),
947
+ row=row,
948
+ col=col,
949
+ )
950
+
951
+ # Access the current subplot's scene configuration
952
+ plot_scene = "scene" + str(subplot_idx + 1)
953
+ current_layout = fig["layout"][plot_scene]
954
+
955
+ # update the bounds of the axes for the current trace
956
+ all_ray_points = ray_bundle_to_ray_points(ray_bundle).reshape(-1, 3)
957
+ ray_points_center = all_ray_points.mean(dim=0)
958
+ max_expand = (all_ray_points.max(0)[0] - all_ray_points.min(0)[0]).max().item()
959
+ _update_axes_bounds(ray_points_center, float(max_expand), current_layout)
960
+
961
+
962
+ def _gen_fig_with_subplots(
963
+ batch_size: int, ncols: int, subplot_titles: List[str]
964
+ ): # pragma: no cover
965
+ """
966
+ Takes in the number of objects to be plotted and generate a plotly figure
967
+ with the appropriate number and orientation of titled subplots.
968
+ Args:
969
+ batch_size: the number of elements in the batch of objects to be visualized.
970
+ ncols: number of subplots in the same row.
971
+ subplot_titles: titles for the subplot(s). list of strings of length batch_size.
972
+
973
+ Returns:
974
+ Plotly figure with ncols subplots per row, and batch_size subplots.
975
+ """
976
+ fig_rows = batch_size // ncols
977
+ if batch_size % ncols != 0:
978
+ fig_rows += 1 # allow for non-uniform rows
979
+ fig_cols = ncols
980
+ fig_type = [{"type": "scene"}]
981
+ specs = [fig_type * fig_cols] * fig_rows
982
+ # subplot_titles must have one title per subplot
983
+ fig = make_subplots(
984
+ rows=fig_rows,
985
+ cols=fig_cols,
986
+ specs=specs,
987
+ subplot_titles=subplot_titles,
988
+ column_widths=[1.0] * fig_cols,
989
+ )
990
+ return fig
991
+
992
+
993
+ def _update_axes_bounds(
994
+ verts_center: torch.Tensor,
995
+ max_expand: float,
996
+ current_layout: go.Scene,
997
+ ) -> None: # pragma: no cover
998
+ """
999
+ Takes in the vertices' center point and max spread, and the current plotly figure
1000
+ layout and updates the layout to have bounds that include all traces for that subplot.
1001
+ Args:
1002
+ verts_center: tensor of size (3) corresponding to a trace's vertices' center point.
1003
+ max_expand: the maximum spread in any dimension of the trace's vertices.
1004
+ current_layout: the plotly figure layout scene corresponding to the referenced trace.
1005
+ """
1006
+ verts_center = verts_center.detach().cpu()
1007
+ verts_min = verts_center - max_expand
1008
+ verts_max = verts_center + max_expand
1009
+ bounds = torch.t(torch.stack((verts_min, verts_max)))
1010
+
1011
+ # Ensure that within a subplot, the bounds capture all traces
1012
+ old_xrange, old_yrange, old_zrange = (
1013
+ # pyre-fixme[16]: `Scene` has no attribute `__getitem__`.
1014
+ current_layout["xaxis"]["range"],
1015
+ current_layout["yaxis"]["range"],
1016
+ current_layout["zaxis"]["range"],
1017
+ )
1018
+ x_range, y_range, z_range = bounds
1019
+ if old_xrange is not None:
1020
+ x_range[0] = min(x_range[0], old_xrange[0])
1021
+ x_range[1] = max(x_range[1], old_xrange[1])
1022
+ if old_yrange is not None:
1023
+ y_range[0] = min(y_range[0], old_yrange[0])
1024
+ y_range[1] = max(y_range[1], old_yrange[1])
1025
+ if old_zrange is not None:
1026
+ z_range[0] = min(z_range[0], old_zrange[0])
1027
+ z_range[1] = max(z_range[1], old_zrange[1])
1028
+
1029
+ xaxis = {"range": x_range}
1030
+ yaxis = {"range": y_range}
1031
+ zaxis = {"range": z_range}
1032
+ # pyre-fixme[16]: `Scene` has no attribute `update`.
1033
+ current_layout.update({"xaxis": xaxis, "yaxis": yaxis, "zaxis": zaxis})
1034
+
1035
+
1036
+ def _scale_camera_to_bounds(
1037
+ coordinate: float, axis_bounds: Tuple[float, float], is_position: bool
1038
+ ) -> float: # pragma: no cover
1039
+ """
1040
+ We set our plotly plot's axes' bounding box to [-1,1]x[-1,1]x[-1,1]. As such,
1041
+ the plotly camera location has to be scaled accordingly to have its world coordinates
1042
+ correspond to its relative plotted coordinates for viewing the plotly plot.
1043
+ This function does the scaling and offset to transform the coordinates.
1044
+
1045
+ Args:
1046
+ coordinate: the float value to be transformed
1047
+ axis_bounds: the bounds of the plotly plot for the axis which
1048
+ the coordinate argument refers to
1049
+ is_position: If true, the float value is the coordinate of a position, and so must
1050
+ be moved in to [-1,1]. Otherwise it is a component of a direction, and so needs only
1051
+ to be scaled.
1052
+ """
1053
+ scale = (axis_bounds[1] - axis_bounds[0]) / 2
1054
+ if not is_position:
1055
+ return coordinate / scale
1056
+ offset = (axis_bounds[1] / scale) - 1
1057
+ return coordinate / scale - offset