Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml +79 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml +42 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml +56 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml +80 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml +38 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml +12 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml +18 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml +7 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml +11 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml +31 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml +8 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml +57 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml +3 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml +29 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml +30 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml +11 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml +4 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.cu +216 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.h +103 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend_cpu.cpp +129 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/ext.cpp +193 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu +301 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.h +84 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp +215 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu +91 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp +128 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu +565 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h +62 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp +111 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h +145 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h +294 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency.h +30 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency_cpu.cpp +53 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu +241 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp +70 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/constants.h +19 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/README.md +5 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/commands.h +505 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward_dbg.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_gradients.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_signature.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.construct.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.create_selector.gpu.cu +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.destruct.gpu.cu +9 -0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default_config
|
| 3 |
+
- _self_
|
| 4 |
+
exp_dir: ./data/exps/overfit_base/
|
| 5 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 6 |
+
visdom_port: 8097
|
| 7 |
+
visualize_interval: 0
|
| 8 |
+
max_epochs: 1000
|
| 9 |
+
data_source_ImplicitronDataSource_args:
|
| 10 |
+
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
| 11 |
+
dataset_map_provider_class_type: JsonIndexDatasetMapProvider
|
| 12 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 13 |
+
dataset_length_train: 1000
|
| 14 |
+
dataset_length_val: 1
|
| 15 |
+
num_workers: 8
|
| 16 |
+
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
| 17 |
+
dataset_root: ${oc.env:CO3D_DATASET_ROOT}
|
| 18 |
+
n_frames_per_sequence: -1
|
| 19 |
+
test_on_train: true
|
| 20 |
+
test_restrict_sequence_id: 0
|
| 21 |
+
dataset_JsonIndexDataset_args:
|
| 22 |
+
load_point_clouds: false
|
| 23 |
+
mask_depths: false
|
| 24 |
+
mask_images: false
|
| 25 |
+
model_factory_ImplicitronModelFactory_args:
|
| 26 |
+
model_class_type: "OverfitModel"
|
| 27 |
+
model_OverfitModel_args:
|
| 28 |
+
loss_weights:
|
| 29 |
+
loss_mask_bce: 1.0
|
| 30 |
+
loss_prev_stage_mask_bce: 1.0
|
| 31 |
+
loss_autodecoder_norm: 0.01
|
| 32 |
+
loss_rgb_mse: 1.0
|
| 33 |
+
loss_prev_stage_rgb_mse: 1.0
|
| 34 |
+
output_rasterized_mc: false
|
| 35 |
+
chunk_size_grid: 102400
|
| 36 |
+
render_image_height: 400
|
| 37 |
+
render_image_width: 400
|
| 38 |
+
share_implicit_function_across_passes: false
|
| 39 |
+
implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
|
| 40 |
+
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
| 41 |
+
n_harmonic_functions_xyz: 10
|
| 42 |
+
n_harmonic_functions_dir: 4
|
| 43 |
+
n_hidden_neurons_xyz: 256
|
| 44 |
+
n_hidden_neurons_dir: 128
|
| 45 |
+
n_layers_xyz: 8
|
| 46 |
+
append_xyz:
|
| 47 |
+
- 5
|
| 48 |
+
coarse_implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
|
| 49 |
+
coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
| 50 |
+
n_harmonic_functions_xyz: 10
|
| 51 |
+
n_harmonic_functions_dir: 4
|
| 52 |
+
n_hidden_neurons_xyz: 256
|
| 53 |
+
n_hidden_neurons_dir: 128
|
| 54 |
+
n_layers_xyz: 8
|
| 55 |
+
append_xyz:
|
| 56 |
+
- 5
|
| 57 |
+
raysampler_AdaptiveRaySampler_args:
|
| 58 |
+
n_rays_per_image_sampled_from_mask: 1024
|
| 59 |
+
scene_extent: 8.0
|
| 60 |
+
n_pts_per_ray_training: 64
|
| 61 |
+
n_pts_per_ray_evaluation: 64
|
| 62 |
+
stratified_point_sampling_training: true
|
| 63 |
+
stratified_point_sampling_evaluation: false
|
| 64 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 65 |
+
n_pts_per_ray_fine_training: 64
|
| 66 |
+
n_pts_per_ray_fine_evaluation: 64
|
| 67 |
+
append_coarse_samples_to_fine: true
|
| 68 |
+
density_noise_std_train: 1.0
|
| 69 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 70 |
+
breed: Adam
|
| 71 |
+
weight_decay: 0.0
|
| 72 |
+
lr_policy: MultiStepLR
|
| 73 |
+
multistep_lr_milestones: []
|
| 74 |
+
lr: 0.0005
|
| 75 |
+
gamma: 0.1
|
| 76 |
+
momentum: 0.9
|
| 77 |
+
betas:
|
| 78 |
+
- 0.9
|
| 79 |
+
- 0.999
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- overfit_base
|
| 3 |
+
- _self_
|
| 4 |
+
data_source_ImplicitronDataSource_args:
|
| 5 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 6 |
+
batch_size: 1
|
| 7 |
+
dataset_length_train: 1000
|
| 8 |
+
dataset_length_val: 1
|
| 9 |
+
num_workers: 8
|
| 10 |
+
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
| 11 |
+
assert_single_seq: true
|
| 12 |
+
n_frames_per_sequence: -1
|
| 13 |
+
test_restrict_sequence_id: 0
|
| 14 |
+
test_on_train: false
|
| 15 |
+
model_factory_ImplicitronModelFactory_args:
|
| 16 |
+
model_class_type: "OverfitModel"
|
| 17 |
+
model_OverfitModel_args:
|
| 18 |
+
render_image_height: 800
|
| 19 |
+
render_image_width: 800
|
| 20 |
+
log_vars:
|
| 21 |
+
- loss_rgb_psnr_fg
|
| 22 |
+
- loss_rgb_psnr
|
| 23 |
+
- loss_eikonal
|
| 24 |
+
- loss_prev_stage_rgb_psnr
|
| 25 |
+
- loss_mask_bce
|
| 26 |
+
- loss_prev_stage_mask_bce
|
| 27 |
+
- loss_rgb_mse
|
| 28 |
+
- loss_prev_stage_rgb_mse
|
| 29 |
+
- loss_depth_abs
|
| 30 |
+
- loss_depth_abs_fg
|
| 31 |
+
- loss_kl
|
| 32 |
+
- loss_mask_neg_iou
|
| 33 |
+
- objective
|
| 34 |
+
- epoch
|
| 35 |
+
- sec/it
|
| 36 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 37 |
+
lr: 0.0005
|
| 38 |
+
multistep_lr_milestones:
|
| 39 |
+
- 200
|
| 40 |
+
- 300
|
| 41 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 42 |
+
max_epochs: 400
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- overfit_singleseq_base
|
| 3 |
+
- _self_
|
| 4 |
+
exp_dir: "./data/overfit_nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
|
| 5 |
+
data_source_ImplicitronDataSource_args:
|
| 6 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 7 |
+
dataset_length_train: 100
|
| 8 |
+
dataset_map_provider_class_type: BlenderDatasetMapProvider
|
| 9 |
+
dataset_map_provider_BlenderDatasetMapProvider_args:
|
| 10 |
+
base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
|
| 11 |
+
n_known_frames_for_test: null
|
| 12 |
+
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
|
| 13 |
+
path_manager_factory_class_type: PathManagerFactory
|
| 14 |
+
path_manager_factory_PathManagerFactory_args:
|
| 15 |
+
silence_logs: true
|
| 16 |
+
|
| 17 |
+
model_factory_ImplicitronModelFactory_args:
|
| 18 |
+
model_class_type: "OverfitModel"
|
| 19 |
+
model_OverfitModel_args:
|
| 20 |
+
mask_images: false
|
| 21 |
+
raysampler_class_type: AdaptiveRaySampler
|
| 22 |
+
raysampler_AdaptiveRaySampler_args:
|
| 23 |
+
n_pts_per_ray_training: 64
|
| 24 |
+
n_pts_per_ray_evaluation: 64
|
| 25 |
+
n_rays_per_image_sampled_from_mask: 4096
|
| 26 |
+
stratified_point_sampling_training: true
|
| 27 |
+
stratified_point_sampling_evaluation: false
|
| 28 |
+
scene_extent: 2.0
|
| 29 |
+
scene_center:
|
| 30 |
+
- 0.0
|
| 31 |
+
- 0.0
|
| 32 |
+
- 0.0
|
| 33 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 34 |
+
density_noise_std_train: 0.0
|
| 35 |
+
n_pts_per_ray_fine_training: 128
|
| 36 |
+
n_pts_per_ray_fine_evaluation: 128
|
| 37 |
+
raymarcher_EmissionAbsorptionRaymarcher_args:
|
| 38 |
+
blend_output: false
|
| 39 |
+
loss_weights:
|
| 40 |
+
loss_rgb_mse: 1.0
|
| 41 |
+
loss_prev_stage_rgb_mse: 1.0
|
| 42 |
+
loss_mask_bce: 0.0
|
| 43 |
+
loss_prev_stage_mask_bce: 0.0
|
| 44 |
+
loss_autodecoder_norm: 0.00
|
| 45 |
+
|
| 46 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 47 |
+
exponential_lr_step_size: 3001
|
| 48 |
+
lr_policy: LinearExponential
|
| 49 |
+
linear_exponential_lr_milestone: 200
|
| 50 |
+
|
| 51 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 52 |
+
max_epochs: 6000
|
| 53 |
+
metric_print_interval: 10
|
| 54 |
+
store_checkpoints_purge: 3
|
| 55 |
+
test_when_finished: true
|
| 56 |
+
validation_interval: 100
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default_config
|
| 3 |
+
- _self_
|
| 4 |
+
exp_dir: ./data/exps/base/
|
| 5 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 6 |
+
visdom_port: 8097
|
| 7 |
+
visualize_interval: 0
|
| 8 |
+
max_epochs: 1000
|
| 9 |
+
data_source_ImplicitronDataSource_args:
|
| 10 |
+
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
| 11 |
+
dataset_map_provider_class_type: JsonIndexDatasetMapProvider
|
| 12 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 13 |
+
dataset_length_train: 1000
|
| 14 |
+
dataset_length_val: 1
|
| 15 |
+
num_workers: 8
|
| 16 |
+
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
| 17 |
+
dataset_root: ${oc.env:CO3D_DATASET_ROOT}
|
| 18 |
+
n_frames_per_sequence: -1
|
| 19 |
+
test_on_train: true
|
| 20 |
+
test_restrict_sequence_id: 0
|
| 21 |
+
dataset_JsonIndexDataset_args:
|
| 22 |
+
load_point_clouds: false
|
| 23 |
+
mask_depths: false
|
| 24 |
+
mask_images: false
|
| 25 |
+
model_factory_ImplicitronModelFactory_args:
|
| 26 |
+
model_GenericModel_args:
|
| 27 |
+
loss_weights:
|
| 28 |
+
loss_mask_bce: 1.0
|
| 29 |
+
loss_prev_stage_mask_bce: 1.0
|
| 30 |
+
loss_autodecoder_norm: 0.01
|
| 31 |
+
loss_rgb_mse: 1.0
|
| 32 |
+
loss_prev_stage_rgb_mse: 1.0
|
| 33 |
+
output_rasterized_mc: false
|
| 34 |
+
chunk_size_grid: 102400
|
| 35 |
+
render_image_height: 400
|
| 36 |
+
render_image_width: 400
|
| 37 |
+
num_passes: 2
|
| 38 |
+
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
| 39 |
+
n_harmonic_functions_xyz: 10
|
| 40 |
+
n_harmonic_functions_dir: 4
|
| 41 |
+
n_hidden_neurons_xyz: 256
|
| 42 |
+
n_hidden_neurons_dir: 128
|
| 43 |
+
n_layers_xyz: 8
|
| 44 |
+
append_xyz:
|
| 45 |
+
- 5
|
| 46 |
+
raysampler_AdaptiveRaySampler_args:
|
| 47 |
+
n_rays_per_image_sampled_from_mask: 1024
|
| 48 |
+
scene_extent: 8.0
|
| 49 |
+
n_pts_per_ray_training: 64
|
| 50 |
+
n_pts_per_ray_evaluation: 64
|
| 51 |
+
stratified_point_sampling_training: true
|
| 52 |
+
stratified_point_sampling_evaluation: false
|
| 53 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 54 |
+
n_pts_per_ray_fine_training: 64
|
| 55 |
+
n_pts_per_ray_fine_evaluation: 64
|
| 56 |
+
append_coarse_samples_to_fine: true
|
| 57 |
+
density_noise_std_train: 1.0
|
| 58 |
+
view_pooler_args:
|
| 59 |
+
view_sampler_args:
|
| 60 |
+
masked_sampling: false
|
| 61 |
+
image_feature_extractor_ResNetFeatureExtractor_args:
|
| 62 |
+
stages:
|
| 63 |
+
- 1
|
| 64 |
+
- 2
|
| 65 |
+
- 3
|
| 66 |
+
- 4
|
| 67 |
+
proj_dim: 16
|
| 68 |
+
image_rescale: 0.32
|
| 69 |
+
first_max_pool: false
|
| 70 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 71 |
+
breed: Adam
|
| 72 |
+
weight_decay: 0.0
|
| 73 |
+
lr_policy: MultiStepLR
|
| 74 |
+
multistep_lr_milestones: []
|
| 75 |
+
lr: 0.0005
|
| 76 |
+
gamma: 0.1
|
| 77 |
+
momentum: 0.9
|
| 78 |
+
betas:
|
| 79 |
+
- 0.9
|
| 80 |
+
- 0.999
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_base.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
data_source_ImplicitronDataSource_args:
|
| 5 |
+
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
| 6 |
+
batch_size: 10
|
| 7 |
+
dataset_length_train: 1000
|
| 8 |
+
dataset_length_val: 1
|
| 9 |
+
num_workers: 8
|
| 10 |
+
train_conditioning_type: SAME
|
| 11 |
+
val_conditioning_type: SAME
|
| 12 |
+
test_conditioning_type: SAME
|
| 13 |
+
images_per_seq_options:
|
| 14 |
+
- 2
|
| 15 |
+
- 3
|
| 16 |
+
- 4
|
| 17 |
+
- 5
|
| 18 |
+
- 6
|
| 19 |
+
- 7
|
| 20 |
+
- 8
|
| 21 |
+
- 9
|
| 22 |
+
- 10
|
| 23 |
+
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
| 24 |
+
assert_single_seq: false
|
| 25 |
+
task_str: multisequence
|
| 26 |
+
n_frames_per_sequence: -1
|
| 27 |
+
test_on_train: true
|
| 28 |
+
test_restrict_sequence_id: 0
|
| 29 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 30 |
+
multistep_lr_milestones:
|
| 31 |
+
- 1000
|
| 32 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 33 |
+
max_epochs: 3000
|
| 34 |
+
evaluator_ImplicitronEvaluator_args:
|
| 35 |
+
camera_difficulty_bin_breaks:
|
| 36 |
+
- 0.666667
|
| 37 |
+
- 0.833334
|
| 38 |
+
is_multisequence: true
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
chunk_size_grid: 16000
|
| 7 |
+
view_pooler_enabled: false
|
| 8 |
+
global_encoder_class_type: SequenceAutodecoder
|
| 9 |
+
global_encoder_SequenceAutodecoder_args:
|
| 10 |
+
autodecoder_args:
|
| 11 |
+
n_instances: 20000
|
| 12 |
+
encoding_dim: 256
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- repro_feat_extractor_transformer.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
chunk_size_grid: 16000
|
| 8 |
+
raysampler_AdaptiveRaySampler_args:
|
| 9 |
+
n_rays_per_image_sampled_from_mask: 800
|
| 10 |
+
n_pts_per_ray_training: 32
|
| 11 |
+
n_pts_per_ray_evaluation: 32
|
| 12 |
+
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
| 13 |
+
n_pts_per_ray_fine_training: 16
|
| 14 |
+
n_pts_per_ray_fine_evaluation: 16
|
| 15 |
+
implicit_function_class_type: NeRFormerImplicitFunction
|
| 16 |
+
view_pooler_enabled: true
|
| 17 |
+
view_pooler_args:
|
| 18 |
+
feature_aggregator_class_type: IdentityFeatureAggregator
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_nerformer.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
view_pooler_args:
|
| 7 |
+
feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_srn_ad_hypernet.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
num_passes: 1
|
| 7 |
+
implicit_function_SRNHyperNetImplicitFunction_args:
|
| 8 |
+
pixel_generator_args:
|
| 9 |
+
n_harmonic_functions: 0
|
| 10 |
+
hypernet_args:
|
| 11 |
+
n_harmonic_functions: 0
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_base.yaml
|
| 3 |
+
- repro_feat_extractor_normed.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
chunk_size_grid: 32000
|
| 8 |
+
num_passes: 1
|
| 9 |
+
n_train_target_views: -1
|
| 10 |
+
loss_weights:
|
| 11 |
+
loss_rgb_mse: 200.0
|
| 12 |
+
loss_prev_stage_rgb_mse: 0.0
|
| 13 |
+
loss_mask_bce: 1.0
|
| 14 |
+
loss_prev_stage_mask_bce: 0.0
|
| 15 |
+
loss_autodecoder_norm: 0.0
|
| 16 |
+
depth_neg_penalty: 10000.0
|
| 17 |
+
raysampler_class_type: NearFarRaySampler
|
| 18 |
+
raysampler_NearFarRaySampler_args:
|
| 19 |
+
n_rays_per_image_sampled_from_mask: 2048
|
| 20 |
+
min_depth: 0.05
|
| 21 |
+
max_depth: 0.05
|
| 22 |
+
n_pts_per_ray_training: 1
|
| 23 |
+
n_pts_per_ray_evaluation: 1
|
| 24 |
+
stratified_point_sampling_training: false
|
| 25 |
+
stratified_point_sampling_evaluation: false
|
| 26 |
+
renderer_class_type: LSTMRenderer
|
| 27 |
+
implicit_function_class_type: SRNImplicitFunction
|
| 28 |
+
view_pooler_enabled: true
|
| 29 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 30 |
+
breed: Adam
|
| 31 |
+
lr: 5.0e-05
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_nerf_wce.yaml
|
| 3 |
+
- repro_multiseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_nerformer.yaml
|
| 3 |
+
- repro_multiseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_srn_ad_hypernet.yaml
|
| 3 |
+
- repro_multiseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_multiseq_srn_wce.yaml
|
| 3 |
+
- repro_multiseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data_source_ImplicitronDataSource_args:
|
| 2 |
+
dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2
|
| 3 |
+
dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
|
| 4 |
+
category: teddybear
|
| 5 |
+
subset_name: manyview_dev_0
|
| 6 |
+
training_loop_ImplicitronTrainingLoop_args:
|
| 7 |
+
evaluator_ImplicitronEvaluator_args:
|
| 8 |
+
is_multisequence: false
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_base
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
loss_weights:
|
| 7 |
+
loss_mask_bce: 100.0
|
| 8 |
+
loss_kl: 0.0
|
| 9 |
+
loss_rgb_mse: 1.0
|
| 10 |
+
loss_eikonal: 0.1
|
| 11 |
+
chunk_size_grid: 65536
|
| 12 |
+
num_passes: 1
|
| 13 |
+
view_pooler_enabled: false
|
| 14 |
+
implicit_function_IdrFeatureField_args:
|
| 15 |
+
n_harmonic_functions_xyz: 6
|
| 16 |
+
bias: 0.6
|
| 17 |
+
d_in: 3
|
| 18 |
+
d_out: 1
|
| 19 |
+
dims:
|
| 20 |
+
- 512
|
| 21 |
+
- 512
|
| 22 |
+
- 512
|
| 23 |
+
- 512
|
| 24 |
+
- 512
|
| 25 |
+
- 512
|
| 26 |
+
- 512
|
| 27 |
+
- 512
|
| 28 |
+
geometric_init: true
|
| 29 |
+
pooled_feature_dim: 0
|
| 30 |
+
skip_in:
|
| 31 |
+
- 6
|
| 32 |
+
weight_norm: true
|
| 33 |
+
renderer_SignedDistanceFunctionRenderer_args:
|
| 34 |
+
ray_tracer_args:
|
| 35 |
+
line_search_step: 0.5
|
| 36 |
+
line_step_iters: 3
|
| 37 |
+
n_secant_steps: 8
|
| 38 |
+
n_steps: 100
|
| 39 |
+
sdf_threshold: 5.0e-05
|
| 40 |
+
ray_normal_coloring_network_args:
|
| 41 |
+
d_in: 9
|
| 42 |
+
d_out: 3
|
| 43 |
+
dims:
|
| 44 |
+
- 512
|
| 45 |
+
- 512
|
| 46 |
+
- 512
|
| 47 |
+
- 512
|
| 48 |
+
mode: idr
|
| 49 |
+
n_harmonic_functions_dir: 4
|
| 50 |
+
pooled_feature_dim: 0
|
| 51 |
+
weight_norm: true
|
| 52 |
+
raysampler_AdaptiveRaySampler_args:
|
| 53 |
+
n_rays_per_image_sampled_from_mask: 1024
|
| 54 |
+
n_pts_per_ray_training: 0
|
| 55 |
+
n_pts_per_ray_evaluation: 0
|
| 56 |
+
renderer_class_type: SignedDistanceFunctionRenderer
|
| 57 |
+
implicit_function_class_type: IdrFeatureField
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_base
|
| 3 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_base.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
num_passes: 1
|
| 7 |
+
chunk_size_grid: 32000
|
| 8 |
+
view_pooler_enabled: false
|
| 9 |
+
loss_weights:
|
| 10 |
+
loss_rgb_mse: 200.0
|
| 11 |
+
loss_prev_stage_rgb_mse: 0.0
|
| 12 |
+
loss_mask_bce: 1.0
|
| 13 |
+
loss_prev_stage_mask_bce: 0.0
|
| 14 |
+
loss_autodecoder_norm: 0.0
|
| 15 |
+
depth_neg_penalty: 10000.0
|
| 16 |
+
raysampler_class_type: NearFarRaySampler
|
| 17 |
+
raysampler_NearFarRaySampler_args:
|
| 18 |
+
n_rays_per_image_sampled_from_mask: 2048
|
| 19 |
+
min_depth: 0.05
|
| 20 |
+
max_depth: 0.05
|
| 21 |
+
n_pts_per_ray_training: 1
|
| 22 |
+
n_pts_per_ray_evaluation: 1
|
| 23 |
+
stratified_point_sampling_training: false
|
| 24 |
+
stratified_point_sampling_evaluation: false
|
| 25 |
+
renderer_class_type: LSTMRenderer
|
| 26 |
+
implicit_function_class_type: SRNImplicitFunction
|
| 27 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 28 |
+
breed: Adam
|
| 29 |
+
lr: 5.0e-05
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_wce_base
|
| 3 |
+
- repro_feat_extractor_normed.yaml
|
| 4 |
+
- _self_
|
| 5 |
+
model_factory_ImplicitronModelFactory_args:
|
| 6 |
+
model_GenericModel_args:
|
| 7 |
+
num_passes: 1
|
| 8 |
+
chunk_size_grid: 32000
|
| 9 |
+
view_pooler_enabled: true
|
| 10 |
+
loss_weights:
|
| 11 |
+
loss_rgb_mse: 200.0
|
| 12 |
+
loss_prev_stage_rgb_mse: 0.0
|
| 13 |
+
loss_mask_bce: 1.0
|
| 14 |
+
loss_prev_stage_mask_bce: 0.0
|
| 15 |
+
loss_autodecoder_norm: 0.0
|
| 16 |
+
depth_neg_penalty: 10000.0
|
| 17 |
+
raysampler_class_type: NearFarRaySampler
|
| 18 |
+
raysampler_NearFarRaySampler_args:
|
| 19 |
+
n_rays_per_image_sampled_from_mask: 2048
|
| 20 |
+
min_depth: 0.05
|
| 21 |
+
max_depth: 0.05
|
| 22 |
+
n_pts_per_ray_training: 1
|
| 23 |
+
n_pts_per_ray_evaluation: 1
|
| 24 |
+
stratified_point_sampling_training: false
|
| 25 |
+
stratified_point_sampling_evaluation: false
|
| 26 |
+
renderer_class_type: LSTMRenderer
|
| 27 |
+
implicit_function_class_type: SRNImplicitFunction
|
| 28 |
+
optimizer_factory_ImplicitronOptimizerFactory_args:
|
| 29 |
+
breed: Adam
|
| 30 |
+
lr: 5.0e-05
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_srn_wce.yaml
|
| 3 |
+
- _self_
|
| 4 |
+
model_factory_ImplicitronModelFactory_args:
|
| 5 |
+
model_GenericModel_args:
|
| 6 |
+
num_passes: 1
|
| 7 |
+
implicit_function_SRNImplicitFunction_args:
|
| 8 |
+
pixel_generator_args:
|
| 9 |
+
n_harmonic_functions: 0
|
| 10 |
+
raymarch_function_args:
|
| 11 |
+
n_harmonic_functions: 0
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- repro_singleseq_nerf.yaml
|
| 3 |
+
- repro_singleseq_co3dv2_base.yaml
|
| 4 |
+
- _self_
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (231 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.cu
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 11 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 12 |
+
#include <cmath>
|
| 13 |
+
#include <vector>
|
| 14 |
+
|
| 15 |
+
template <typename scalar_t>
|
| 16 |
+
__global__ void SigmoidAlphaBlendForwardKernel(
|
| 17 |
+
// clang-format off
|
| 18 |
+
const at::PackedTensorAccessor64<scalar_t, 4, at::RestrictPtrTraits> distances, // (N, H, W, K)
|
| 19 |
+
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
|
| 20 |
+
at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> alphas, // (N, H, W)
|
| 21 |
+
// clang-format on
|
| 22 |
+
const scalar_t sigma,
|
| 23 |
+
const int N,
|
| 24 |
+
const int H,
|
| 25 |
+
const int W,
|
| 26 |
+
const int K) {
|
| 27 |
+
// Parallelize over each pixel in images of
|
| 28 |
+
// size H * W, for each image in the batch of size N.
|
| 29 |
+
const int num_threads = gridDim.x * blockDim.x;
|
| 30 |
+
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
| 31 |
+
|
| 32 |
+
// TODO: revisit performance of this kernel with shared memory usage
|
| 33 |
+
|
| 34 |
+
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
|
| 35 |
+
// Convert linear index to 3D index
|
| 36 |
+
const int n = t_i / (H * W); // batch index.
|
| 37 |
+
const int pix_idx = t_i % (H * W);
|
| 38 |
+
|
| 39 |
+
// TODO: fix index calculation for non square images.
|
| 40 |
+
const int yi = pix_idx / W;
|
| 41 |
+
const int xi = pix_idx % W;
|
| 42 |
+
scalar_t alpha = 1.0;
|
| 43 |
+
|
| 44 |
+
// Loop over all the faces for this pixel.
|
| 45 |
+
for (int k = 0; k < K; k++) {
|
| 46 |
+
// Index into (N, H, W, K) tensors
|
| 47 |
+
const int f = pix_to_face[n][yi][xi][k];
|
| 48 |
+
if (f < 0) {
|
| 49 |
+
// Sentinel value is -1 indicating no face overlaps the pixel.
|
| 50 |
+
continue;
|
| 51 |
+
}
|
| 52 |
+
// The distance is negative if a pixel is inside a face and positive
|
| 53 |
+
// outside the face. Therefore use -1.0 * the distance to get the
|
| 54 |
+
// correct sign.
|
| 55 |
+
scalar_t dist = -1.0 * distances[n][yi][xi][k];
|
| 56 |
+
|
| 57 |
+
// Calculate the sigmoid probability.
|
| 58 |
+
scalar_t prob = 1. / (1. + exp(-dist / sigma));
|
| 59 |
+
|
| 60 |
+
// The cumulative product ensures that alpha will be 0.0 if at least 1
|
| 61 |
+
// face fully covers the pixel as for that face, prob will be 1.0.
|
| 62 |
+
// This results in a multiplication by 0.0 because of the (1.0 - prob)
|
| 63 |
+
// term. Therefore the final result of (1.0 - alpha) will be 1.0.
|
| 64 |
+
alpha *= (1.0 - prob);
|
| 65 |
+
}
|
| 66 |
+
alphas[n][yi][xi] = 1.0 - alpha;
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
at::Tensor SigmoidAlphaBlendForwardCuda(
|
| 71 |
+
const at::Tensor& distances, // (N, H, W, K)
|
| 72 |
+
const at::Tensor& pix_to_face, // (N, H, W, K)
|
| 73 |
+
const float sigma) {
|
| 74 |
+
const int N = distances.size(0);
|
| 75 |
+
const int H = distances.size(1);
|
| 76 |
+
const int W = distances.size(2);
|
| 77 |
+
const int K = distances.size(3);
|
| 78 |
+
|
| 79 |
+
at::Tensor alphas = at::zeros({N, H, W}, distances.options());
|
| 80 |
+
const size_t blocks = 1024;
|
| 81 |
+
const size_t threads = 128;
|
| 82 |
+
|
| 83 |
+
// Check inputs are on the same device
|
| 84 |
+
at::TensorArg distances_t{distances, "distances", 1},
|
| 85 |
+
pix_to_face_t{pix_to_face, "pix_to_face", 2};
|
| 86 |
+
at::CheckedFrom c = "SigmoidAlphaBlendForwardCuda";
|
| 87 |
+
at::checkAllSameGPU(c, {distances_t, pix_to_face_t});
|
| 88 |
+
|
| 89 |
+
// Set the device for the kernel launch based on the device of distances
|
| 90 |
+
at::cuda::CUDAGuard device_guard(distances.device());
|
| 91 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 92 |
+
|
| 93 |
+
if (distances.numel() == 0) {
|
| 94 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 95 |
+
return alphas;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 99 |
+
distances.scalar_type(), "sigmoid_alpha_blend_kernel", ([&] {
|
| 100 |
+
// clang-format off
|
| 101 |
+
SigmoidAlphaBlendForwardKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 102 |
+
distances.packed_accessor64<scalar_t, 4, at::RestrictPtrTraits>(),
|
| 103 |
+
pix_to_face.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>(),
|
| 104 |
+
alphas.packed_accessor64<scalar_t, 3, at::RestrictPtrTraits>(),
|
| 105 |
+
sigma,
|
| 106 |
+
N,
|
| 107 |
+
H,
|
| 108 |
+
W,
|
| 109 |
+
K);
|
| 110 |
+
// clang-format on
|
| 111 |
+
}));
|
| 112 |
+
|
| 113 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 114 |
+
return alphas;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
template <typename scalar_t>
|
| 118 |
+
__global__ void SigmoidAlphaBlendBackwardKernel(
|
| 119 |
+
// clang-format off
|
| 120 |
+
const at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> grad_alphas, // (N, H, W)
|
| 121 |
+
const at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> alphas, // (N, H, W)
|
| 122 |
+
const at::PackedTensorAccessor64<scalar_t, 4, at::RestrictPtrTraits> distances, // (N, H, W, K)
|
| 123 |
+
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
|
| 124 |
+
at::PackedTensorAccessor64<scalar_t, 4, at::RestrictPtrTraits> grad_distances, // (N, H, W)
|
| 125 |
+
// clang-format on
|
| 126 |
+
const scalar_t sigma,
|
| 127 |
+
const int N,
|
| 128 |
+
const int H,
|
| 129 |
+
const int W,
|
| 130 |
+
const int K) {
|
| 131 |
+
// Parallelize over each of the top K faces for each pixel in images of
|
| 132 |
+
// size H * W * K, for each image in the batch of size N.
|
| 133 |
+
|
| 134 |
+
// Get block and thread index.
|
| 135 |
+
const int n = blockIdx.x;
|
| 136 |
+
const int num_pixels = H * W * K;
|
| 137 |
+
const int num_threads = gridDim.y * blockDim.x;
|
| 138 |
+
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
| 139 |
+
|
| 140 |
+
for (int t_i = tid; t_i < num_pixels; t_i += num_threads) {
|
| 141 |
+
// Convert linear index to 3D index.
|
| 142 |
+
int yi = t_i / (W * K);
|
| 143 |
+
int xi = (t_i % (W * K)) / K;
|
| 144 |
+
int k = (t_i % (W * K)) % K;
|
| 145 |
+
|
| 146 |
+
const scalar_t alpha = 1.0 - alphas[n][yi][xi];
|
| 147 |
+
const scalar_t grad_alpha = grad_alphas[n][yi][xi];
|
| 148 |
+
const int f = pix_to_face[n][yi][xi][k];
|
| 149 |
+
|
| 150 |
+
// Sentinel value is -1 indicating no face overlaps the pixel.
|
| 151 |
+
if (f >= 0) {
|
| 152 |
+
// The distance is negative if a pixel is inside a face and positive
|
| 153 |
+
// outside the face. Therefore use -1.0 * the distance to get the
|
| 154 |
+
// correct sign.
|
| 155 |
+
scalar_t dist = -1.0 * distances[n][yi][xi][k];
|
| 156 |
+
|
| 157 |
+
// Calculate the sigmoid probability.
|
| 158 |
+
scalar_t prob = 1. / (1. + exp(-dist / sigma));
|
| 159 |
+
|
| 160 |
+
grad_distances[n][yi][xi][k] = grad_alpha * (-1.0 / sigma) * prob * alpha;
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
at::Tensor SigmoidAlphaBlendBackwardCuda(
|
| 166 |
+
const at::Tensor& grad_alphas, // (N, H, W)
|
| 167 |
+
const at::Tensor& alphas, // (N, H, W)
|
| 168 |
+
const at::Tensor& distances, // (N, H, W, K)
|
| 169 |
+
const at::Tensor& pix_to_face, // (N, H, W, K)
|
| 170 |
+
float sigma) {
|
| 171 |
+
const int N = distances.size(0);
|
| 172 |
+
const int H = distances.size(1);
|
| 173 |
+
const int W = distances.size(2);
|
| 174 |
+
const int K = distances.size(3);
|
| 175 |
+
|
| 176 |
+
at::Tensor grad_distances = at::zeros({N, H, W, K}, distances.options());
|
| 177 |
+
|
| 178 |
+
const dim3 threads(512);
|
| 179 |
+
const dim3 blocks(N, 1024 / N + 1);
|
| 180 |
+
|
| 181 |
+
at::TensorArg grad_alphas_t{grad_alphas, "grad_alphas", 1},
|
| 182 |
+
alphas_t{alphas, "alphas", 2}, distances_t{distances, "distances", 3},
|
| 183 |
+
pix_to_face_t{pix_to_face, "pix_to_face", 4};
|
| 184 |
+
at::CheckedFrom c = "SigmoidAlphaBlendBackwardCuda";
|
| 185 |
+
at::checkAllSameGPU(c, {grad_alphas_t, alphas_t, distances_t, pix_to_face_t});
|
| 186 |
+
|
| 187 |
+
// Set the device for the kernel launch based on the device of distances
|
| 188 |
+
at::cuda::CUDAGuard device_guard(alphas.device());
|
| 189 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 190 |
+
|
| 191 |
+
if (alphas.numel() == 0) {
|
| 192 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 193 |
+
return grad_alphas;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 197 |
+
distances.scalar_type(), "sigmoid_alpha_blend_backward_kernel", ([&] {
|
| 198 |
+
SigmoidAlphaBlendBackwardKernel<
|
| 199 |
+
scalar_t><<<blocks, threads, 0, stream>>>(
|
| 200 |
+
// clang-format off
|
| 201 |
+
grad_alphas.packed_accessor64<scalar_t, 3,at::RestrictPtrTraits>(),
|
| 202 |
+
alphas.packed_accessor64<scalar_t, 3, at::RestrictPtrTraits>(),
|
| 203 |
+
distances.packed_accessor64<scalar_t, 4, at::RestrictPtrTraits>(),
|
| 204 |
+
pix_to_face.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>(),
|
| 205 |
+
grad_distances.packed_accessor64<scalar_t, 4, at::RestrictPtrTraits>(),
|
| 206 |
+
// clang-format on
|
| 207 |
+
sigma,
|
| 208 |
+
N,
|
| 209 |
+
H,
|
| 210 |
+
W,
|
| 211 |
+
K);
|
| 212 |
+
}));
|
| 213 |
+
|
| 214 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 215 |
+
return grad_distances;
|
| 216 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.h
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include <tuple>
|
| 12 |
+
|
| 13 |
+
// clang-format off
|
| 14 |
+
// Function to blend the top K faces per pixel based on the 2d euclidean distance
|
| 15 |
+
// from the center of the pixel to the face. This method is adapted from [1].
|
| 16 |
+
// The output can be used to set the alpha value in an RGBA image.
|
| 17 |
+
// Args:
|
| 18 |
+
// pix_to_face: LongTensor of shape (N, H, W, K), indices of faces overlapping
|
| 19 |
+
// with each pixel, where N is the batch size, H, W are the dimensions of the
|
| 20 |
+
// image and K is the number of faces rasterized per pixel.
|
| 21 |
+
// distances: FloatTensor of shape (N, H, W, K), 2d euclidean distance of each pixel
|
| 22 |
+
// relative to the faces in pix_to_face
|
| 23 |
+
// sigma: float, parameter which controls the width of the sigmoid for blending
|
| 24 |
+
// Returns:
|
| 25 |
+
// alphas: FloatTensor of shape (N, H, W), the blended values for each pixel
|
| 26 |
+
// in the image.
|
| 27 |
+
//
|
| 28 |
+
// [1] Shichen Liu et al, 'Soft Rasterizer: A Differentiable Renderer for
|
| 29 |
+
// Image-based 3D Reasoning'
|
| 30 |
+
// clang-format on
|
| 31 |
+
at::Tensor SigmoidAlphaBlendForwardCpu(
|
| 32 |
+
const at::Tensor& distances,
|
| 33 |
+
const at::Tensor& pix_to_face,
|
| 34 |
+
const float sigma);
|
| 35 |
+
|
| 36 |
+
#ifdef WITH_CUDA
|
| 37 |
+
at::Tensor SigmoidAlphaBlendForwardCuda(
|
| 38 |
+
const at::Tensor& distances,
|
| 39 |
+
const at::Tensor& pix_to_face,
|
| 40 |
+
const float sigma);
|
| 41 |
+
#endif
|
| 42 |
+
|
| 43 |
+
// clang-format off
|
| 44 |
+
// Args:
|
| 45 |
+
// grad_alphas: FloatTensor of shape (N, H, W), upstream gradients for alphas
|
| 46 |
+
// alphas: FloatTensor of shape (N, H, W), the alpha values from the forward pass
|
| 47 |
+
// pix_to_face: LongTensor of shape (N, H, W, K), indices of faces overlapping
|
| 48 |
+
// with each pixel, where N is the batch size, H, W are the dimensions of the
|
| 49 |
+
// image, and K is the number of faces rasterized per pixel
|
| 50 |
+
// distances: FloatTensor of shape (N, H, W, K), 2d euclidean distance of each pixel
|
| 51 |
+
// to the corresponding faces in pix_to_face
|
| 52 |
+
// sigma: float, parameter which controls the width of the sigmoid for blending
|
| 53 |
+
// Returns:
|
| 54 |
+
// grad_distances: FloatTensor of shape (N, H, W, K)
|
| 55 |
+
// clang-format on
|
| 56 |
+
at::Tensor SigmoidAlphaBlendBackwardCpu(
|
| 57 |
+
const at::Tensor& grad_alphas,
|
| 58 |
+
const at::Tensor& alphas,
|
| 59 |
+
const at::Tensor& distances,
|
| 60 |
+
const at::Tensor& pix_to_face,
|
| 61 |
+
const float sigma);
|
| 62 |
+
|
| 63 |
+
#ifdef WITH_CUDA
|
| 64 |
+
at::Tensor SigmoidAlphaBlendBackwardCuda(
|
| 65 |
+
const at::Tensor& grad_alphas,
|
| 66 |
+
const at::Tensor& alphas,
|
| 67 |
+
const at::Tensor& distances,
|
| 68 |
+
const at::Tensor& pix_to_face,
|
| 69 |
+
const float sigma);
|
| 70 |
+
#endif
|
| 71 |
+
|
| 72 |
+
// Implementation which is exposed.
|
| 73 |
+
at::Tensor
|
| 74 |
+
SigmoidAlphaBlend(at::Tensor& distances, at::Tensor& pix_to_face, float sigma) {
|
| 75 |
+
if (distances.is_cuda() && pix_to_face.is_cuda()) {
|
| 76 |
+
#ifdef WITH_CUDA
|
| 77 |
+
return SigmoidAlphaBlendForwardCuda(distances, pix_to_face, sigma);
|
| 78 |
+
#else
|
| 79 |
+
AT_ERROR("Not compiled with GPU support.");
|
| 80 |
+
#endif
|
| 81 |
+
}
|
| 82 |
+
return SigmoidAlphaBlendForwardCpu(distances, pix_to_face, sigma);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
// Implementation which is exposed.
|
| 86 |
+
at::Tensor SigmoidAlphaBlendBackward(
|
| 87 |
+
const at::Tensor& grad_alphas,
|
| 88 |
+
const at::Tensor& alphas,
|
| 89 |
+
const at::Tensor& distances,
|
| 90 |
+
const at::Tensor& pix_to_face,
|
| 91 |
+
const float sigma) {
|
| 92 |
+
if (distances.is_cuda() && pix_to_face.is_cuda() && alphas.is_cuda() &&
|
| 93 |
+
grad_alphas.is_cuda()) {
|
| 94 |
+
#ifdef WITH_CUDA
|
| 95 |
+
return SigmoidAlphaBlendBackwardCuda(
|
| 96 |
+
grad_alphas, alphas, distances, pix_to_face, sigma);
|
| 97 |
+
#else
|
| 98 |
+
AT_ERROR("Not compiled with GPU support.");
|
| 99 |
+
#endif
|
| 100 |
+
}
|
| 101 |
+
return SigmoidAlphaBlendBackwardCpu(
|
| 102 |
+
grad_alphas, alphas, distances, pix_to_face, sigma);
|
| 103 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend_cpu.cpp
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include <cmath>
|
| 11 |
+
#include <vector>
|
| 12 |
+
|
| 13 |
+
at::Tensor SigmoidAlphaBlendForwardCpu(
|
| 14 |
+
const at::Tensor& distances, // (N, H, W, K)
|
| 15 |
+
const at::Tensor& pix_to_face, // (N, H, W, K)
|
| 16 |
+
const float sigma) {
|
| 17 |
+
const int N = distances.size(0);
|
| 18 |
+
const int H = distances.size(1);
|
| 19 |
+
const int W = distances.size(2);
|
| 20 |
+
const int K = distances.size(3);
|
| 21 |
+
|
| 22 |
+
torch::Tensor out = torch::empty({N, H, W}, distances.options());
|
| 23 |
+
|
| 24 |
+
auto distances_a = distances.accessor<float, 4>();
|
| 25 |
+
auto pix_to_face_a = pix_to_face.accessor<int64_t, 4>();
|
| 26 |
+
auto out_a = out.accessor<float, 3>();
|
| 27 |
+
|
| 28 |
+
// Iterate over the images in the batch.
|
| 29 |
+
for (int n = 0; n < N; ++n) {
|
| 30 |
+
// Iterate through the horizontal lines of the image from top to bottom.
|
| 31 |
+
for (int h = 0; h < H; ++h) {
|
| 32 |
+
// Iterate over the pixels on this horizontal line, left to right.
|
| 33 |
+
for (int w = 0; w < W; ++w) {
|
| 34 |
+
float alpha = 1.0;
|
| 35 |
+
|
| 36 |
+
// Loop through the top K faces for each pixel.
|
| 37 |
+
for (int k = 0; k < K; ++k) {
|
| 38 |
+
const int f = pix_to_face_a[n][h][w][k];
|
| 39 |
+
if (f < 0) {
|
| 40 |
+
// Sentinel value is -1 indicating no face overlaps the pixel.
|
| 41 |
+
continue;
|
| 42 |
+
}
|
| 43 |
+
// The distance is negative if a pixel is inside a face and positive
|
| 44 |
+
// outside the face. Therefore use -1.0 * the distance to get the
|
| 45 |
+
// correct sign.
|
| 46 |
+
float dist = -1.0 * distances_a[n][h][w][k];
|
| 47 |
+
|
| 48 |
+
// Calculate the sigmoid probability.
|
| 49 |
+
float prob = 1. / (1. + exp(-dist / sigma));
|
| 50 |
+
|
| 51 |
+
// The product ensures that alpha will be 0.0 if at least 1
|
| 52 |
+
// face fully covers the pixel as for that face, prob will be 1.0.
|
| 53 |
+
// This results in a multiplication by 0.0 because of the (1.0 - prob)
|
| 54 |
+
// term. Therefore 1.0 - alpha will be 1.0.
|
| 55 |
+
alpha *= 1.0 - prob;
|
| 56 |
+
}
|
| 57 |
+
out_a[n][h][w] = 1.0 - alpha;
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
return out;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
at::Tensor SigmoidAlphaBlendBackwardCpu(
|
| 65 |
+
const at::Tensor& grad_alphas, // (N, H, W)
|
| 66 |
+
const at::Tensor& alphas, // (N, H, W)
|
| 67 |
+
const at::Tensor& distances, // (N, H, W, K)
|
| 68 |
+
const at::Tensor& pix_to_face, // (N, H, W, K)
|
| 69 |
+
const float sigma) {
|
| 70 |
+
const int N = distances.size(0);
|
| 71 |
+
const int H = distances.size(1);
|
| 72 |
+
const int W = distances.size(2);
|
| 73 |
+
const int K = distances.size(3);
|
| 74 |
+
|
| 75 |
+
auto distances_a = distances.accessor<float, 4>();
|
| 76 |
+
auto pix_to_face_a = pix_to_face.accessor<int64_t, 4>();
|
| 77 |
+
auto alphas_a = alphas.accessor<float, 3>();
|
| 78 |
+
auto grad_alphas_a = grad_alphas.accessor<float, 3>();
|
| 79 |
+
|
| 80 |
+
torch::Tensor grad_distances =
|
| 81 |
+
torch::zeros({N, H, W, K}, distances.options());
|
| 82 |
+
auto grad_distances_a = grad_distances.accessor<float, 4>();
|
| 83 |
+
|
| 84 |
+
// Iterate over the images in the batch.
|
| 85 |
+
for (int n = 0; n < N; ++n) {
|
| 86 |
+
// Iterate through the horizontal lines of the image from top to bottom.
|
| 87 |
+
for (int h = 0; h < H; ++h) {
|
| 88 |
+
// Iterate over the pixels on this horizontal line, left to right.
|
| 89 |
+
for (int w = 0; w < W; ++w) {
|
| 90 |
+
// Get the alpha value from the forward pass and the
|
| 91 |
+
// upstream gradient.
|
| 92 |
+
const float alpha = 1.0 - alphas_a[n][h][w];
|
| 93 |
+
const float grad_alpha = grad_alphas_a[n][h][w];
|
| 94 |
+
|
| 95 |
+
// Loop through the top K faces for each pixel.
|
| 96 |
+
for (int k = 0; k < K; ++k) {
|
| 97 |
+
const int f = pix_to_face_a[n][h][w][k];
|
| 98 |
+
if (f < 0) {
|
| 99 |
+
// Sentinel value is -1 indicating no face overlaps the pixel
|
| 100 |
+
continue;
|
| 101 |
+
}
|
| 102 |
+
// The distance is negative if a pixel is inside a face and positive
|
| 103 |
+
// outside the face. Therefore use -1.0 * distance to get the
|
| 104 |
+
// correct sign.
|
| 105 |
+
float dist = -1.0 * distances_a[n][h][w][k];
|
| 106 |
+
|
| 107 |
+
// Calculate the sigmoid probability.
|
| 108 |
+
float prob = 1. / (1. + exp(-dist / sigma));
|
| 109 |
+
|
| 110 |
+
// clang-format off
|
| 111 |
+
// We need to take the derivative of alpha w.r.t to the distance.
|
| 112 |
+
// alpha = 1.0 - (1.0- sigmoid(-x)) * (1.0 - sigmoid(-x2)) * ... * (1.0 - sigmoid(-xn))
|
| 113 |
+
//
|
| 114 |
+
// Note that d/dx sigmoid(x) = sigmoid(x) * (1.0 - sigmoid(x))
|
| 115 |
+
//
|
| 116 |
+
// This gives:
|
| 117 |
+
// d_alpha/d_dist = -1.0 * -1.0 * sigmoid(-x)(1. - sigmoid(-x)) * (-1.0/sigma)
|
| 118 |
+
// * ((1.0 - sigmoid(-x2) * ... * (1.0 - sigmoid(-xn))
|
| 119 |
+
// = (-1.0/sigma) * prob * (1.0 - prob) * alpha/(1.0 - prob)
|
| 120 |
+
// = (-1.0/sigma) * prob * alpha
|
| 121 |
+
// clang-format on
|
| 122 |
+
grad_distances_a[n][h][w][k] =
|
| 123 |
+
grad_alpha * (-1.0 / sigma) * prob * alpha;
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
}
|
| 128 |
+
return grad_distances;
|
| 129 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/ext.cpp
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
// clang-format off
|
| 10 |
+
#if !defined(USE_ROCM)
|
| 11 |
+
#include "./pulsar/global.h" // Include before <torch/extension.h>.
|
| 12 |
+
#endif
|
| 13 |
+
#include <torch/extension.h>
|
| 14 |
+
// clang-format on
|
| 15 |
+
#if !defined(USE_ROCM)
|
| 16 |
+
#include "./pulsar/pytorch/renderer.h"
|
| 17 |
+
#include "./pulsar/pytorch/tensor_util.h"
|
| 18 |
+
#endif
|
| 19 |
+
#include "ball_query/ball_query.h"
|
| 20 |
+
#include "blending/sigmoid_alpha_blend.h"
|
| 21 |
+
#include "compositing/alpha_composite.h"
|
| 22 |
+
#include "compositing/norm_weighted_sum.h"
|
| 23 |
+
#include "compositing/weighted_sum.h"
|
| 24 |
+
#include "face_areas_normals/face_areas_normals.h"
|
| 25 |
+
#include "gather_scatter/gather_scatter.h"
|
| 26 |
+
#include "interp_face_attrs/interp_face_attrs.h"
|
| 27 |
+
#include "iou_box3d/iou_box3d.h"
|
| 28 |
+
#include "knn/knn.h"
|
| 29 |
+
#include "marching_cubes/marching_cubes.h"
|
| 30 |
+
#include "mesh_normal_consistency/mesh_normal_consistency.h"
|
| 31 |
+
#include "packed_to_padded_tensor/packed_to_padded_tensor.h"
|
| 32 |
+
#include "point_mesh/point_mesh_cuda.h"
|
| 33 |
+
#include "points_to_volumes/points_to_volumes.h"
|
| 34 |
+
#include "rasterize_meshes/rasterize_meshes.h"
|
| 35 |
+
#include "rasterize_points/rasterize_points.h"
|
| 36 |
+
#include "sample_farthest_points/sample_farthest_points.h"
|
| 37 |
+
#include "sample_pdf/sample_pdf.h"
|
| 38 |
+
|
| 39 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 40 |
+
m.def("face_areas_normals_forward", &FaceAreasNormalsForward);
|
| 41 |
+
m.def("face_areas_normals_backward", &FaceAreasNormalsBackward);
|
| 42 |
+
m.def("packed_to_padded", &PackedToPadded);
|
| 43 |
+
m.def("padded_to_packed", &PaddedToPacked);
|
| 44 |
+
m.def("interp_face_attrs_forward", &InterpFaceAttrsForward);
|
| 45 |
+
m.def("interp_face_attrs_backward", &InterpFaceAttrsBackward);
|
| 46 |
+
#ifdef WITH_CUDA
|
| 47 |
+
m.def("knn_check_version", &KnnCheckVersion);
|
| 48 |
+
#endif
|
| 49 |
+
m.def("knn_points_idx", &KNearestNeighborIdx);
|
| 50 |
+
m.def("knn_points_backward", &KNearestNeighborBackward);
|
| 51 |
+
m.def("ball_query", &BallQuery);
|
| 52 |
+
m.def("sample_farthest_points", &FarthestPointSampling);
|
| 53 |
+
m.def(
|
| 54 |
+
"mesh_normal_consistency_find_verts", &MeshNormalConsistencyFindVertices);
|
| 55 |
+
m.def("gather_scatter", &GatherScatter);
|
| 56 |
+
m.def("points_to_volumes_forward", PointsToVolumesForward);
|
| 57 |
+
m.def("points_to_volumes_backward", PointsToVolumesBackward);
|
| 58 |
+
m.def("rasterize_points", &RasterizePoints);
|
| 59 |
+
m.def("rasterize_points_backward", &RasterizePointsBackward);
|
| 60 |
+
m.def("rasterize_meshes_backward", &RasterizeMeshesBackward);
|
| 61 |
+
m.def("rasterize_meshes", &RasterizeMeshes);
|
| 62 |
+
m.def("sigmoid_alpha_blend", &SigmoidAlphaBlend);
|
| 63 |
+
m.def("sigmoid_alpha_blend_backward", &SigmoidAlphaBlendBackward);
|
| 64 |
+
|
| 65 |
+
// Accumulation functions
|
| 66 |
+
m.def("accum_weightedsumnorm", &weightedSumNormForward);
|
| 67 |
+
m.def("accum_weightedsum", &weightedSumForward);
|
| 68 |
+
m.def("accum_alphacomposite", &alphaCompositeForward);
|
| 69 |
+
m.def("accum_weightedsumnorm_backward", &weightedSumNormBackward);
|
| 70 |
+
m.def("accum_weightedsum_backward", &weightedSumBackward);
|
| 71 |
+
m.def("accum_alphacomposite_backward", &alphaCompositeBackward);
|
| 72 |
+
|
| 73 |
+
// These are only visible for testing; users should not call them directly
|
| 74 |
+
m.def("_rasterize_points_coarse", &RasterizePointsCoarse);
|
| 75 |
+
m.def("_rasterize_points_naive", &RasterizePointsNaive);
|
| 76 |
+
m.def("_rasterize_meshes_naive", &RasterizeMeshesNaive);
|
| 77 |
+
m.def("_rasterize_meshes_coarse", &RasterizeMeshesCoarse);
|
| 78 |
+
m.def("_rasterize_meshes_fine", &RasterizeMeshesFine);
|
| 79 |
+
|
| 80 |
+
// PointEdge distance functions
|
| 81 |
+
m.def("point_edge_dist_forward", &PointEdgeDistanceForward);
|
| 82 |
+
m.def("point_edge_dist_backward", &PointEdgeDistanceBackward);
|
| 83 |
+
m.def("edge_point_dist_forward", &EdgePointDistanceForward);
|
| 84 |
+
m.def("edge_point_dist_backward", &EdgePointDistanceBackward);
|
| 85 |
+
m.def("point_edge_array_dist_forward", &PointEdgeArrayDistanceForward);
|
| 86 |
+
m.def("point_edge_array_dist_backward", &PointEdgeArrayDistanceBackward);
|
| 87 |
+
|
| 88 |
+
// PointFace distance functions
|
| 89 |
+
m.def("point_face_dist_forward", &PointFaceDistanceForward);
|
| 90 |
+
m.def("point_face_dist_backward", &PointFaceDistanceBackward);
|
| 91 |
+
m.def("face_point_dist_forward", &FacePointDistanceForward);
|
| 92 |
+
m.def("face_point_dist_backward", &FacePointDistanceBackward);
|
| 93 |
+
m.def("point_face_array_dist_forward", &PointFaceArrayDistanceForward);
|
| 94 |
+
m.def("point_face_array_dist_backward", &PointFaceArrayDistanceBackward);
|
| 95 |
+
|
| 96 |
+
// Sample PDF
|
| 97 |
+
m.def("sample_pdf", &SamplePdf);
|
| 98 |
+
|
| 99 |
+
// 3D IoU
|
| 100 |
+
m.def("iou_box3d", &IoUBox3D);
|
| 101 |
+
|
| 102 |
+
// Marching cubes
|
| 103 |
+
m.def("marching_cubes", &MarchingCubes);
|
| 104 |
+
|
| 105 |
+
// Pulsar.
|
| 106 |
+
// Pulsar not enabled on AMD.
|
| 107 |
+
#if !defined(USE_ROCM)
|
| 108 |
+
#ifdef PULSAR_LOGGING_ENABLED
|
| 109 |
+
c10::ShowLogInfoToStderr();
|
| 110 |
+
#endif
|
| 111 |
+
py::class_<
|
| 112 |
+
pulsar::pytorch::Renderer,
|
| 113 |
+
std::shared_ptr<pulsar::pytorch::Renderer>>(m, "PulsarRenderer")
|
| 114 |
+
.def(py::init<
|
| 115 |
+
const uint&,
|
| 116 |
+
const uint&,
|
| 117 |
+
const uint&,
|
| 118 |
+
const bool&,
|
| 119 |
+
const bool&,
|
| 120 |
+
const float&,
|
| 121 |
+
const uint&,
|
| 122 |
+
const uint&>())
|
| 123 |
+
.def(
|
| 124 |
+
"__eq__",
|
| 125 |
+
[](const pulsar::pytorch::Renderer& a,
|
| 126 |
+
const pulsar::pytorch::Renderer& b) { return a == b; },
|
| 127 |
+
py::is_operator())
|
| 128 |
+
.def(
|
| 129 |
+
"__ne__",
|
| 130 |
+
[](const pulsar::pytorch::Renderer& a,
|
| 131 |
+
const pulsar::pytorch::Renderer& b) { return !(a == b); },
|
| 132 |
+
py::is_operator())
|
| 133 |
+
.def(
|
| 134 |
+
"__repr__",
|
| 135 |
+
[](const pulsar::pytorch::Renderer& self) {
|
| 136 |
+
std::stringstream ss;
|
| 137 |
+
ss << self;
|
| 138 |
+
return ss.str();
|
| 139 |
+
})
|
| 140 |
+
.def(
|
| 141 |
+
"forward",
|
| 142 |
+
&pulsar::pytorch::Renderer::forward,
|
| 143 |
+
py::arg("vert_pos"),
|
| 144 |
+
py::arg("vert_col"),
|
| 145 |
+
py::arg("vert_radii"),
|
| 146 |
+
|
| 147 |
+
py::arg("cam_pos"),
|
| 148 |
+
py::arg("pixel_0_0_center"),
|
| 149 |
+
py::arg("pixel_vec_x"),
|
| 150 |
+
py::arg("pixel_vec_y"),
|
| 151 |
+
py::arg("focal_length"),
|
| 152 |
+
py::arg("principal_point_offsets"),
|
| 153 |
+
|
| 154 |
+
py::arg("gamma"),
|
| 155 |
+
py::arg("max_depth"),
|
| 156 |
+
py::arg("min_depth") /* = 0.f*/,
|
| 157 |
+
py::arg(
|
| 158 |
+
"bg_col") /* = at::nullopt not exposed properly in pytorch 1.1. */
|
| 159 |
+
,
|
| 160 |
+
py::arg("opacity") /* = at::nullopt ... */,
|
| 161 |
+
py::arg("percent_allowed_difference") = 0.01f,
|
| 162 |
+
py::arg("max_n_hits") = MAX_UINT,
|
| 163 |
+
py::arg("mode") = 0)
|
| 164 |
+
.def("backward", &pulsar::pytorch::Renderer::backward)
|
| 165 |
+
.def_property(
|
| 166 |
+
"device_tracker",
|
| 167 |
+
[](const pulsar::pytorch::Renderer& self) {
|
| 168 |
+
return self.device_tracker;
|
| 169 |
+
},
|
| 170 |
+
[](pulsar::pytorch::Renderer& self, const torch::Tensor& val) {
|
| 171 |
+
self.device_tracker = val;
|
| 172 |
+
})
|
| 173 |
+
.def_property_readonly("width", &pulsar::pytorch::Renderer::width)
|
| 174 |
+
.def_property_readonly("height", &pulsar::pytorch::Renderer::height)
|
| 175 |
+
.def_property_readonly(
|
| 176 |
+
"max_num_balls", &pulsar::pytorch::Renderer::max_num_balls)
|
| 177 |
+
.def_property_readonly(
|
| 178 |
+
"orthogonal", &pulsar::pytorch::Renderer::orthogonal)
|
| 179 |
+
.def_property_readonly(
|
| 180 |
+
"right_handed", &pulsar::pytorch::Renderer::right_handed)
|
| 181 |
+
.def_property_readonly("n_track", &pulsar::pytorch::Renderer::n_track);
|
| 182 |
+
m.def(
|
| 183 |
+
"pulsar_sphere_ids_from_result_info_nograd",
|
| 184 |
+
&pulsar::pytorch::sphere_ids_from_result_info_nograd);
|
| 185 |
+
// Constants.
|
| 186 |
+
m.attr("EPS") = py::float_(EPS);
|
| 187 |
+
m.attr("MAX_FLOAT") = py::float_(MAX_FLOAT);
|
| 188 |
+
m.attr("MAX_INT") = py::int_(MAX_INT);
|
| 189 |
+
m.attr("MAX_UINT") = py::int_(MAX_UINT);
|
| 190 |
+
m.attr("MAX_USHORT") = py::int_(MAX_USHORT);
|
| 191 |
+
m.attr("PULSAR_MAX_GRAD_SPHERES") = py::int_(MAX_GRAD_SPHERES);
|
| 192 |
+
#endif
|
| 193 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 11 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 12 |
+
#include <tuple>
|
| 13 |
+
|
| 14 |
+
template <typename scalar_t>
|
| 15 |
+
__global__ void FaceAreasNormalsForwardKernel(
|
| 16 |
+
const scalar_t* __restrict__ verts,
|
| 17 |
+
const int64_t* __restrict__ faces,
|
| 18 |
+
scalar_t* __restrict__ face_areas,
|
| 19 |
+
scalar_t* __restrict__ face_normals,
|
| 20 |
+
const size_t V,
|
| 21 |
+
const size_t F) {
|
| 22 |
+
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
|
| 23 |
+
const size_t stride = gridDim.x * blockDim.x;
|
| 24 |
+
|
| 25 |
+
// Faces split evenly over the number of threads in the grid.
|
| 26 |
+
// Each thread computes the area & normal of its respective faces and adds it
|
| 27 |
+
// to the global face_areas tensor.
|
| 28 |
+
for (size_t f = tid; f < F; f += stride) {
|
| 29 |
+
const int64_t i0 = faces[3 * f + 0];
|
| 30 |
+
const int64_t i1 = faces[3 * f + 1];
|
| 31 |
+
const int64_t i2 = faces[3 * f + 2];
|
| 32 |
+
|
| 33 |
+
const scalar_t v0_x = verts[3 * i0 + 0];
|
| 34 |
+
const scalar_t v0_y = verts[3 * i0 + 1];
|
| 35 |
+
const scalar_t v0_z = verts[3 * i0 + 2];
|
| 36 |
+
|
| 37 |
+
const scalar_t v1_x = verts[3 * i1 + 0];
|
| 38 |
+
const scalar_t v1_y = verts[3 * i1 + 1];
|
| 39 |
+
const scalar_t v1_z = verts[3 * i1 + 2];
|
| 40 |
+
|
| 41 |
+
const scalar_t v2_x = verts[3 * i2 + 0];
|
| 42 |
+
const scalar_t v2_y = verts[3 * i2 + 1];
|
| 43 |
+
const scalar_t v2_z = verts[3 * i2 + 2];
|
| 44 |
+
|
| 45 |
+
const scalar_t ax = v1_x - v0_x;
|
| 46 |
+
const scalar_t ay = v1_y - v0_y;
|
| 47 |
+
const scalar_t az = v1_z - v0_z;
|
| 48 |
+
|
| 49 |
+
const scalar_t bx = v2_x - v0_x;
|
| 50 |
+
const scalar_t by = v2_y - v0_y;
|
| 51 |
+
const scalar_t bz = v2_z - v0_z;
|
| 52 |
+
|
| 53 |
+
const scalar_t cx = ay * bz - az * by;
|
| 54 |
+
const scalar_t cy = az * bx - ax * bz;
|
| 55 |
+
const scalar_t cz = ax * by - ay * bx;
|
| 56 |
+
|
| 57 |
+
scalar_t norm = sqrt(cx * cx + cy * cy + cz * cz);
|
| 58 |
+
face_areas[f] = norm / 2.0;
|
| 59 |
+
norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6)
|
| 60 |
+
face_normals[3 * f + 0] = cx / norm;
|
| 61 |
+
face_normals[3 * f + 1] = cy / norm;
|
| 62 |
+
face_normals[3 * f + 2] = cz / norm;
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// TODO(gkioxari) support all data types once AtomicAdd supports doubles.
|
| 67 |
+
// Currently, support is for floats only.
|
| 68 |
+
__global__ void FaceAreasNormalsBackwardKernel(
|
| 69 |
+
const float* __restrict__ grad_areas,
|
| 70 |
+
const float* __restrict__ grad_normals,
|
| 71 |
+
const float* __restrict__ verts,
|
| 72 |
+
const int64_t* __restrict__ faces,
|
| 73 |
+
float* __restrict__ grad_verts,
|
| 74 |
+
const size_t V,
|
| 75 |
+
const size_t F) {
|
| 76 |
+
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
|
| 77 |
+
const size_t stride = gridDim.x * blockDim.x;
|
| 78 |
+
|
| 79 |
+
// Faces split evenly over the number of threads in the grid.
|
| 80 |
+
// Each thread computes the area & normal of its respective faces and adds it
|
| 81 |
+
// to the global face_areas tensor.
|
| 82 |
+
for (size_t f = tid; f < F; f += stride) {
|
| 83 |
+
const int64_t i0 = faces[3 * f + 0];
|
| 84 |
+
const int64_t i1 = faces[3 * f + 1];
|
| 85 |
+
const int64_t i2 = faces[3 * f + 2];
|
| 86 |
+
|
| 87 |
+
const float v0_x = verts[3 * i0 + 0];
|
| 88 |
+
const float v0_y = verts[3 * i0 + 1];
|
| 89 |
+
const float v0_z = verts[3 * i0 + 2];
|
| 90 |
+
|
| 91 |
+
const float v1_x = verts[3 * i1 + 0];
|
| 92 |
+
const float v1_y = verts[3 * i1 + 1];
|
| 93 |
+
const float v1_z = verts[3 * i1 + 2];
|
| 94 |
+
|
| 95 |
+
const float v2_x = verts[3 * i2 + 0];
|
| 96 |
+
const float v2_y = verts[3 * i2 + 1];
|
| 97 |
+
const float v2_z = verts[3 * i2 + 2];
|
| 98 |
+
|
| 99 |
+
const float ax = v1_x - v0_x;
|
| 100 |
+
const float ay = v1_y - v0_y;
|
| 101 |
+
const float az = v1_z - v0_z;
|
| 102 |
+
|
| 103 |
+
const float bx = v2_x - v0_x;
|
| 104 |
+
const float by = v2_y - v0_y;
|
| 105 |
+
const float bz = v2_z - v0_z;
|
| 106 |
+
|
| 107 |
+
const float cx = ay * bz - az * by;
|
| 108 |
+
const float cy = az * bx - ax * bz;
|
| 109 |
+
const float cz = ax * by - ay * bx;
|
| 110 |
+
|
| 111 |
+
float norm = sqrt(cx * cx + cy * cy + cz * cz);
|
| 112 |
+
norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6)
|
| 113 |
+
float inv_norm = 1. / norm;
|
| 114 |
+
float inv_norm_2 = pow(inv_norm, 2.0f);
|
| 115 |
+
float inv_norm_3 = pow(inv_norm, 3.0f);
|
| 116 |
+
|
| 117 |
+
// We compute gradients with respect to the input vertices.
|
| 118 |
+
// For each vertex, gradients come from grad_areas and grad_normals.
|
| 119 |
+
// eg, grad_v0_x = (d / d v0_x)
|
| 120 |
+
// = \sum_f (d / d areas[f]) * (d areas[f] / d v0_x)
|
| 121 |
+
// + (d / d normals[f, 0]) * (d normals[f, 0] / d v0_x)
|
| 122 |
+
// + (d / d normals[f, 1]) * (d normals[f, 1] / d v0_x)
|
| 123 |
+
// + (d / d normals[f, 2]) * (d normals[f, 2] / d v0_x)
|
| 124 |
+
// with (d / d areas[f]) = grad_areas[f] and
|
| 125 |
+
// (d / d normals[f, j]) = grad_normals[f][j].
|
| 126 |
+
// The equations below are derived after taking
|
| 127 |
+
// derivatives wrt to the vertices (fun times!).
|
| 128 |
+
|
| 129 |
+
// grad v0 coming from grad areas and grad normals
|
| 130 |
+
const float grad_v0_x =
|
| 131 |
+
((-az + bz) * cy + (-by + ay) * cz) / 2.0 * inv_norm * grad_areas[f] +
|
| 132 |
+
-cx * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_3 *
|
| 133 |
+
grad_normals[3 * f + 0] +
|
| 134 |
+
((-az + bz) - cy * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) *
|
| 135 |
+
inv_norm * grad_normals[3 * f + 1] +
|
| 136 |
+
((-by + ay) - cz * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) *
|
| 137 |
+
inv_norm * grad_normals[3 * f + 2];
|
| 138 |
+
atomicAdd(grad_verts + 3 * i0 + 0, grad_v0_x);
|
| 139 |
+
|
| 140 |
+
const float grad_v0_y =
|
| 141 |
+
((-bz + az) * cx + (-ax + bx) * cz) / 2.0 * inv_norm * grad_areas[f] +
|
| 142 |
+
((-bz + az) - cx * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) *
|
| 143 |
+
inv_norm * grad_normals[3 * f + 0] +
|
| 144 |
+
-cy * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_3 *
|
| 145 |
+
grad_normals[3 * f + 1] +
|
| 146 |
+
((-ax + bx) - cz * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) *
|
| 147 |
+
inv_norm * grad_normals[3 * f + 2];
|
| 148 |
+
atomicAdd(grad_verts + 3 * i0 + 1, grad_v0_y);
|
| 149 |
+
|
| 150 |
+
const float grad_v0_z =
|
| 151 |
+
((-ay + by) * cx + (-bx + ax) * cy) / 2.0 * inv_norm * grad_areas[f] +
|
| 152 |
+
((-ay + by) - cx * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) *
|
| 153 |
+
inv_norm * grad_normals[3 * f + 0] +
|
| 154 |
+
((-bx + ax) - cy * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) *
|
| 155 |
+
inv_norm * grad_normals[3 * f + 1] +
|
| 156 |
+
-cz * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_3 *
|
| 157 |
+
grad_normals[3 * f + 2];
|
| 158 |
+
atomicAdd(grad_verts + 3 * i0 + 2, grad_v0_z);
|
| 159 |
+
|
| 160 |
+
// grad v1 coming from grad areas and grad normals
|
| 161 |
+
const float grad_v1_x =
|
| 162 |
+
(by * cz - bz * cy) / 2.0 * inv_norm * grad_areas[f] +
|
| 163 |
+
-cx * (by * cz - bz * cy) * inv_norm_3 * grad_normals[3 * f + 0] +
|
| 164 |
+
(-bz - cy * (by * cz - bz * cy) * inv_norm_2) * inv_norm *
|
| 165 |
+
grad_normals[3 * f + 1] +
|
| 166 |
+
(by - cz * (by * cz - bz * cy) * inv_norm_2) * inv_norm *
|
| 167 |
+
grad_normals[3 * f + 2];
|
| 168 |
+
atomicAdd(grad_verts + 3 * i1 + 0, grad_v1_x);
|
| 169 |
+
|
| 170 |
+
const float grad_v1_y =
|
| 171 |
+
(bz * cx - bx * cz) / 2.0 * inv_norm * grad_areas[f] +
|
| 172 |
+
(bz - cx * (bz * cx - bx * cz) * inv_norm_2) * inv_norm *
|
| 173 |
+
grad_normals[3 * f + 0] +
|
| 174 |
+
-cy * (bz * cx - bx * cz) * inv_norm_3 * grad_normals[3 * f + 1] +
|
| 175 |
+
(-bx - cz * (bz * cx - bx * cz) * inv_norm_2) * inv_norm *
|
| 176 |
+
grad_normals[3 * f + 2];
|
| 177 |
+
atomicAdd(grad_verts + 3 * i1 + 1, grad_v1_y);
|
| 178 |
+
|
| 179 |
+
const float grad_v1_z =
|
| 180 |
+
(bx * cy - by * cx) / 2.0 * inv_norm * grad_areas[f] +
|
| 181 |
+
(-by - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm *
|
| 182 |
+
grad_normals[3 * f + 0] +
|
| 183 |
+
(bx - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm *
|
| 184 |
+
grad_normals[3 * f + 1] +
|
| 185 |
+
-cz * (bx * cy - by * cx) * inv_norm_3 * grad_normals[3 * f + 2];
|
| 186 |
+
atomicAdd(grad_verts + 3 * i1 + 2, grad_v1_z);
|
| 187 |
+
|
| 188 |
+
// grad v2 coming from grad areas
|
| 189 |
+
const float grad_v2_x =
|
| 190 |
+
(az * cy - ay * cz) / 2.0 * inv_norm * grad_areas[f] +
|
| 191 |
+
-cx * (az * cy - ay * cz) * inv_norm_3 * grad_normals[3 * f + 0] +
|
| 192 |
+
(az - cy * (az * cy - ay * cz) * inv_norm_2) * inv_norm *
|
| 193 |
+
grad_normals[3 * f + 1] +
|
| 194 |
+
(-ay - cz * (az * cy - ay * cz) * inv_norm_2) * inv_norm *
|
| 195 |
+
grad_normals[3 * f + 2];
|
| 196 |
+
atomicAdd(grad_verts + 3 * i2 + 0, grad_v2_x);
|
| 197 |
+
|
| 198 |
+
const float grad_v2_y =
|
| 199 |
+
(ax * cz - az * cx) / 2.0 * inv_norm * grad_areas[f] +
|
| 200 |
+
(-az - cx * (ax * cz - az * cx) * inv_norm_2) * inv_norm *
|
| 201 |
+
grad_normals[3 * f + 0] +
|
| 202 |
+
-cy * (ax * cz - az * cx) * inv_norm_3 * grad_normals[3 * f + 1] +
|
| 203 |
+
(ax - cz * (ax * cz - az * cx) * inv_norm_2) * inv_norm *
|
| 204 |
+
grad_normals[3 * f + 2];
|
| 205 |
+
atomicAdd(grad_verts + 3 * i2 + 1, grad_v2_y);
|
| 206 |
+
|
| 207 |
+
const float grad_v2_z =
|
| 208 |
+
(ay * cx - ax * cy) / 2.0 * inv_norm * grad_areas[f] +
|
| 209 |
+
(ay - cx * (ay * cx - ax * cy) * inv_norm_2) * inv_norm *
|
| 210 |
+
grad_normals[3 * f + 0] +
|
| 211 |
+
(-ax - cy * (ay * cx - ax * cy) * inv_norm_2) * inv_norm *
|
| 212 |
+
grad_normals[3 * f + 1] +
|
| 213 |
+
-cz * (ay * cx - ax * cy) * inv_norm_3 * grad_normals[3 * f + 2];
|
| 214 |
+
atomicAdd(grad_verts + 3 * i2 + 2, grad_v2_z);
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCuda(
|
| 219 |
+
const at::Tensor verts,
|
| 220 |
+
const at::Tensor faces) {
|
| 221 |
+
const auto V = verts.size(0);
|
| 222 |
+
const auto F = faces.size(0);
|
| 223 |
+
|
| 224 |
+
// Check inputs are on the same device
|
| 225 |
+
at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2};
|
| 226 |
+
at::CheckedFrom c = "FaceAreasNormalsForwardCuda";
|
| 227 |
+
at::checkAllSameGPU(c, {verts_t, faces_t});
|
| 228 |
+
|
| 229 |
+
// Set the device for the kernel launch based on the device of verts
|
| 230 |
+
at::cuda::CUDAGuard device_guard(verts.device());
|
| 231 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 232 |
+
|
| 233 |
+
at::Tensor areas = at::empty({F}, verts.options());
|
| 234 |
+
at::Tensor normals = at::empty({F, 3}, verts.options());
|
| 235 |
+
|
| 236 |
+
if (areas.numel() == 0) {
|
| 237 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 238 |
+
return std::make_tuple(areas, normals);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
const int blocks = 64;
|
| 242 |
+
const int threads = 512;
|
| 243 |
+
|
| 244 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 245 |
+
verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] {
|
| 246 |
+
FaceAreasNormalsForwardKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 247 |
+
verts.contiguous().data_ptr<scalar_t>(),
|
| 248 |
+
faces.contiguous().data_ptr<int64_t>(),
|
| 249 |
+
areas.data_ptr<scalar_t>(),
|
| 250 |
+
normals.data_ptr<scalar_t>(),
|
| 251 |
+
V,
|
| 252 |
+
F);
|
| 253 |
+
}));
|
| 254 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 255 |
+
return std::make_tuple(areas, normals);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
at::Tensor FaceAreasNormalsBackwardCuda(
|
| 259 |
+
const at::Tensor grad_areas,
|
| 260 |
+
const at::Tensor grad_normals,
|
| 261 |
+
const at::Tensor verts,
|
| 262 |
+
const at::Tensor faces) {
|
| 263 |
+
// Check inputs are on the same device
|
| 264 |
+
at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2},
|
| 265 |
+
grad_areas_t{grad_areas, "grad_areas", 3},
|
| 266 |
+
grad_normals_t{grad_normals, "grad_normals", 4};
|
| 267 |
+
at::CheckedFrom c = "FaceAreasNormalsBackwardCuda";
|
| 268 |
+
at::checkAllSameGPU(c, {verts_t, faces_t, grad_areas_t, grad_normals_t});
|
| 269 |
+
// This is nondeterministic because atomicAdd
|
| 270 |
+
at::globalContext().alertNotDeterministic("FaceAreasNormalsBackwardCuda");
|
| 271 |
+
|
| 272 |
+
// Set the device for the kernel launch based on the device of verts
|
| 273 |
+
at::cuda::CUDAGuard device_guard(verts.device());
|
| 274 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 275 |
+
|
| 276 |
+
const auto V = verts.size(0);
|
| 277 |
+
const auto F = faces.size(0);
|
| 278 |
+
|
| 279 |
+
at::Tensor grad_verts = at::zeros({V, 3}, grad_areas.options());
|
| 280 |
+
|
| 281 |
+
if (grad_verts.numel() == 0) {
|
| 282 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 283 |
+
return grad_verts;
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
const int blocks = 64;
|
| 287 |
+
const int threads = 512;
|
| 288 |
+
// TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
|
| 289 |
+
// doubles. Currently, support is for floats only.
|
| 290 |
+
FaceAreasNormalsBackwardKernel<<<blocks, threads, 0, stream>>>(
|
| 291 |
+
grad_areas.contiguous().data_ptr<float>(),
|
| 292 |
+
grad_normals.contiguous().data_ptr<float>(),
|
| 293 |
+
verts.contiguous().data_ptr<float>(),
|
| 294 |
+
faces.contiguous().data_ptr<int64_t>(),
|
| 295 |
+
grad_verts.data_ptr<float>(),
|
| 296 |
+
V,
|
| 297 |
+
F);
|
| 298 |
+
|
| 299 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 300 |
+
return grad_verts;
|
| 301 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include <tuple>
|
| 12 |
+
#include "utils/pytorch3d_cutils.h"
|
| 13 |
+
|
| 14 |
+
// Compute areas of mesh faces using packed representation.
|
| 15 |
+
//
|
| 16 |
+
// Inputs:
|
| 17 |
+
// verts: FloatTensor of shape (V, 3) giving vertex positions.
|
| 18 |
+
// faces: LongTensor of shape (F, 3) giving faces.
|
| 19 |
+
//
|
| 20 |
+
// Returns:
|
| 21 |
+
// areas: FloatTensor of shape (F,) where areas[f] is the area of faces[f].
|
| 22 |
+
// normals: FloatTensor of shape (F, 3) where normals[f] is the normal of
|
| 23 |
+
// faces[f]
|
| 24 |
+
//
|
| 25 |
+
|
| 26 |
+
// Cpu implementation.
|
| 27 |
+
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCpu(
|
| 28 |
+
const at::Tensor verts,
|
| 29 |
+
const at::Tensor faces);
|
| 30 |
+
// Cpu implementation
|
| 31 |
+
at::Tensor FaceAreasNormalsBackwardCpu(
|
| 32 |
+
const at::Tensor grad_areas,
|
| 33 |
+
const at::Tensor grad_normals,
|
| 34 |
+
const at::Tensor verts,
|
| 35 |
+
const at::Tensor faces);
|
| 36 |
+
|
| 37 |
+
#ifdef WITH_CUDA
|
| 38 |
+
// Cuda implementation.
|
| 39 |
+
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCuda(
|
| 40 |
+
const at::Tensor verts,
|
| 41 |
+
const at::Tensor faces);
|
| 42 |
+
// Cuda implementation.
|
| 43 |
+
at::Tensor FaceAreasNormalsBackwardCuda(
|
| 44 |
+
const at::Tensor grad_areas,
|
| 45 |
+
const at::Tensor grad_normals,
|
| 46 |
+
const at::Tensor verts,
|
| 47 |
+
const at::Tensor faces);
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
// Implementation which is exposed.
|
| 51 |
+
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward(
|
| 52 |
+
const at::Tensor verts,
|
| 53 |
+
const at::Tensor faces) {
|
| 54 |
+
if (verts.is_cuda() && faces.is_cuda()) {
|
| 55 |
+
#ifdef WITH_CUDA
|
| 56 |
+
CHECK_CUDA(verts);
|
| 57 |
+
CHECK_CUDA(faces);
|
| 58 |
+
return FaceAreasNormalsForwardCuda(verts, faces);
|
| 59 |
+
#else
|
| 60 |
+
AT_ERROR("Not compiled with GPU support.");
|
| 61 |
+
#endif
|
| 62 |
+
}
|
| 63 |
+
return FaceAreasNormalsForwardCpu(verts, faces);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// Implementation which is exposed.
|
| 67 |
+
at::Tensor FaceAreasNormalsBackward(
|
| 68 |
+
const at::Tensor grad_areas,
|
| 69 |
+
const at::Tensor grad_normals,
|
| 70 |
+
const at::Tensor verts,
|
| 71 |
+
const at::Tensor faces) {
|
| 72 |
+
if (verts.is_cuda() && faces.is_cuda()) {
|
| 73 |
+
#ifdef WITH_CUDA
|
| 74 |
+
CHECK_CUDA(verts);
|
| 75 |
+
CHECK_CUDA(faces);
|
| 76 |
+
CHECK_CUDA(grad_areas);
|
| 77 |
+
CHECK_CUDA(grad_normals);
|
| 78 |
+
return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces);
|
| 79 |
+
#else
|
| 80 |
+
AT_ERROR("Not compiled with GPU support.");
|
| 81 |
+
#endif
|
| 82 |
+
}
|
| 83 |
+
return FaceAreasNormalsBackwardCpu(grad_areas, grad_normals, verts, faces);
|
| 84 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include <tuple>
|
| 11 |
+
|
| 12 |
+
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCpu(
|
| 13 |
+
const at::Tensor verts,
|
| 14 |
+
const at::Tensor faces) {
|
| 15 |
+
const int F = faces.size(0);
|
| 16 |
+
|
| 17 |
+
at::Tensor areas = at::empty({F}, verts.options());
|
| 18 |
+
at::Tensor normals = at::empty({F, 3}, verts.options());
|
| 19 |
+
|
| 20 |
+
auto verts_a = verts.accessor<float, 2>();
|
| 21 |
+
auto faces_a = faces.accessor<int64_t, 2>();
|
| 22 |
+
auto areas_a = areas.accessor<float, 1>();
|
| 23 |
+
auto normals_a = normals.accessor<float, 2>();
|
| 24 |
+
|
| 25 |
+
for (int f = 0; f < F; ++f) {
|
| 26 |
+
const int64_t i0 = faces_a[f][0];
|
| 27 |
+
const int64_t i1 = faces_a[f][1];
|
| 28 |
+
const int64_t i2 = faces_a[f][2];
|
| 29 |
+
|
| 30 |
+
const float v0_x = verts_a[i0][0];
|
| 31 |
+
const float v0_y = verts_a[i0][1];
|
| 32 |
+
const float v0_z = verts_a[i0][2];
|
| 33 |
+
|
| 34 |
+
const float v1_x = verts_a[i1][0];
|
| 35 |
+
const float v1_y = verts_a[i1][1];
|
| 36 |
+
const float v1_z = verts_a[i1][2];
|
| 37 |
+
|
| 38 |
+
const float v2_x = verts_a[i2][0];
|
| 39 |
+
const float v2_y = verts_a[i2][1];
|
| 40 |
+
const float v2_z = verts_a[i2][2];
|
| 41 |
+
|
| 42 |
+
const float ax = v1_x - v0_x;
|
| 43 |
+
const float ay = v1_y - v0_y;
|
| 44 |
+
const float az = v1_z - v0_z;
|
| 45 |
+
|
| 46 |
+
const float bx = v2_x - v0_x;
|
| 47 |
+
const float by = v2_y - v0_y;
|
| 48 |
+
const float bz = v2_z - v0_z;
|
| 49 |
+
|
| 50 |
+
const float cx = ay * bz - az * by;
|
| 51 |
+
const float cy = az * bx - ax * bz;
|
| 52 |
+
const float cz = ax * by - ay * bx;
|
| 53 |
+
|
| 54 |
+
float norm = sqrt(cx * cx + cy * cy + cz * cz);
|
| 55 |
+
areas_a[f] = norm / 2.0;
|
| 56 |
+
norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6)
|
| 57 |
+
normals_a[f][0] = cx / norm;
|
| 58 |
+
normals_a[f][1] = cy / norm;
|
| 59 |
+
normals_a[f][2] = cz / norm;
|
| 60 |
+
}
|
| 61 |
+
return std::make_tuple(areas, normals);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
at::Tensor FaceAreasNormalsBackwardCpu(
|
| 65 |
+
const at::Tensor grad_areas,
|
| 66 |
+
const at::Tensor grad_normals,
|
| 67 |
+
const at::Tensor verts,
|
| 68 |
+
const at::Tensor faces) {
|
| 69 |
+
const int V = verts.size(0);
|
| 70 |
+
const int F = faces.size(0);
|
| 71 |
+
|
| 72 |
+
at::Tensor grad_verts = at::zeros({V, 3}, grad_areas.options());
|
| 73 |
+
|
| 74 |
+
auto grad_areas_a = grad_areas.accessor<float, 1>();
|
| 75 |
+
auto grad_normals_a = grad_normals.accessor<float, 2>();
|
| 76 |
+
auto verts_a = verts.accessor<float, 2>();
|
| 77 |
+
auto faces_a = faces.accessor<int64_t, 2>();
|
| 78 |
+
auto grad_verts_a = grad_verts.accessor<float, 2>();
|
| 79 |
+
|
| 80 |
+
for (int f = 0; f < F; ++f) {
|
| 81 |
+
const int64_t i0 = faces_a[f][0];
|
| 82 |
+
const int64_t i1 = faces_a[f][1];
|
| 83 |
+
const int64_t i2 = faces_a[f][2];
|
| 84 |
+
|
| 85 |
+
const float v0_x = verts_a[i0][0];
|
| 86 |
+
const float v0_y = verts_a[i0][1];
|
| 87 |
+
const float v0_z = verts_a[i0][2];
|
| 88 |
+
|
| 89 |
+
const float v1_x = verts_a[i1][0];
|
| 90 |
+
const float v1_y = verts_a[i1][1];
|
| 91 |
+
const float v1_z = verts_a[i1][2];
|
| 92 |
+
|
| 93 |
+
const float v2_x = verts_a[i2][0];
|
| 94 |
+
const float v2_y = verts_a[i2][1];
|
| 95 |
+
const float v2_z = verts_a[i2][2];
|
| 96 |
+
|
| 97 |
+
const float ax = v1_x - v0_x;
|
| 98 |
+
const float ay = v1_y - v0_y;
|
| 99 |
+
const float az = v1_z - v0_z;
|
| 100 |
+
|
| 101 |
+
const float bx = v2_x - v0_x;
|
| 102 |
+
const float by = v2_y - v0_y;
|
| 103 |
+
const float bz = v2_z - v0_z;
|
| 104 |
+
|
| 105 |
+
const float cx = ay * bz - az * by;
|
| 106 |
+
const float cy = az * bx - ax * bz;
|
| 107 |
+
const float cz = ax * by - ay * bx;
|
| 108 |
+
|
| 109 |
+
float norm = sqrt(cx * cx + cy * cy + cz * cz);
|
| 110 |
+
norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6)
|
| 111 |
+
float inv_norm = 1. / norm;
|
| 112 |
+
float inv_norm_2 = pow(inv_norm, 2.0f);
|
| 113 |
+
float inv_norm_3 = pow(inv_norm, 3.0f);
|
| 114 |
+
|
| 115 |
+
// We compute gradients with respect to the input vertices.
|
| 116 |
+
// For each vertex, gradients come from grad_areas and grad_normals.
|
| 117 |
+
// eg, grad_v0_x = (d / d v0_x)
|
| 118 |
+
// = \sum_f (d / d areas[f]) * (d areas[f] / d v0_x)
|
| 119 |
+
// + (d / d normals[f, 0]) * (d normals[f, 0] / d v0_x)
|
| 120 |
+
// + (d / d normals[f, 1]) * (d normals[f, 1] / d v0_x)
|
| 121 |
+
// + (d / d normals[f, 2]) * (d normals[f, 2] / d v0_x)
|
| 122 |
+
// with (d / d areas[f]) = grad_areas[f] and
|
| 123 |
+
// (d / d normals[f, j]) = grad_normals[f][j].
|
| 124 |
+
// The equations below are derived after taking
|
| 125 |
+
// derivatives wrt to the vertices (fun times!).
|
| 126 |
+
|
| 127 |
+
// grad v0 coming from grad areas and grad normals
|
| 128 |
+
const float grad_v0_x =
|
| 129 |
+
((-az + bz) * cy + (-by + ay) * cz) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 130 |
+
-cx * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_3 *
|
| 131 |
+
grad_normals_a[f][0] +
|
| 132 |
+
((-az + bz) - cy * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) *
|
| 133 |
+
inv_norm * grad_normals_a[f][1] +
|
| 134 |
+
((-by + ay) - cz * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) *
|
| 135 |
+
inv_norm * grad_normals_a[f][2];
|
| 136 |
+
grad_verts_a[i0][0] += grad_v0_x;
|
| 137 |
+
|
| 138 |
+
const float grad_v0_y =
|
| 139 |
+
((-bz + az) * cx + (-ax + bx) * cz) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 140 |
+
((-bz + az) - cx * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) *
|
| 141 |
+
inv_norm * grad_normals_a[f][0] +
|
| 142 |
+
-cy * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_3 *
|
| 143 |
+
grad_normals_a[f][1] +
|
| 144 |
+
((-ax + bx) - cz * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) *
|
| 145 |
+
inv_norm * grad_normals_a[f][2];
|
| 146 |
+
grad_verts[i0][1] += grad_v0_y;
|
| 147 |
+
|
| 148 |
+
const float grad_v0_z =
|
| 149 |
+
((-ay + by) * cx + (-bx + ax) * cy) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 150 |
+
((-ay + by) - cx * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) *
|
| 151 |
+
inv_norm * grad_normals_a[f][0] +
|
| 152 |
+
((-bx + ax) - cy * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) *
|
| 153 |
+
inv_norm * grad_normals_a[f][1] +
|
| 154 |
+
-cz * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_3 *
|
| 155 |
+
grad_normals_a[f][2];
|
| 156 |
+
grad_verts[i0][2] += grad_v0_z;
|
| 157 |
+
|
| 158 |
+
// grad v1 coming from grad areas and grad normals
|
| 159 |
+
const float grad_v1_x =
|
| 160 |
+
(by * cz - bz * cy) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 161 |
+
-cx * (by * cz - bz * cy) * inv_norm_3 * grad_normals_a[f][0] +
|
| 162 |
+
(-bz - cy * (by * cz - bz * cy) * inv_norm_2) * inv_norm *
|
| 163 |
+
grad_normals_a[f][1] +
|
| 164 |
+
(by - cz * (by * cz - bz * cy) * inv_norm_2) * inv_norm *
|
| 165 |
+
grad_normals_a[f][2];
|
| 166 |
+
grad_verts[i1][0] += grad_v1_x;
|
| 167 |
+
|
| 168 |
+
const float grad_v1_y =
|
| 169 |
+
(bz * cx - bx * cz) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 170 |
+
(bz - cx * (bz * cx - bx * cz) * inv_norm_2) * inv_norm *
|
| 171 |
+
grad_normals_a[f][0] +
|
| 172 |
+
-cy * (bz * cx - bx * cz) * inv_norm_3 * grad_normals_a[f][1] +
|
| 173 |
+
(-bx - cz * (bz * cx - bx * cz) * inv_norm_2) * inv_norm *
|
| 174 |
+
grad_normals_a[f][2];
|
| 175 |
+
grad_verts[i1][1] += grad_v1_y;
|
| 176 |
+
|
| 177 |
+
const float grad_v1_z =
|
| 178 |
+
(bx * cy - by * cx) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 179 |
+
(-by - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm *
|
| 180 |
+
grad_normals_a[f][0] +
|
| 181 |
+
(bx - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm *
|
| 182 |
+
grad_normals_a[f][1] +
|
| 183 |
+
-cz * (bx * cy - by * cx) * inv_norm_3 * grad_normals_a[f][2];
|
| 184 |
+
grad_verts[i1][2] += grad_v1_z;
|
| 185 |
+
|
| 186 |
+
// grad v2 coming from grad areas
|
| 187 |
+
const float grad_v2_x =
|
| 188 |
+
(az * cy - ay * cz) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 189 |
+
-cx * (az * cy - ay * cz) * inv_norm_3 * grad_normals_a[f][0] +
|
| 190 |
+
(az - cy * (az * cy - ay * cz) * inv_norm_2) * inv_norm *
|
| 191 |
+
grad_normals_a[f][1] +
|
| 192 |
+
(-ay - cz * (az * cy - ay * cz) * inv_norm_2) * inv_norm *
|
| 193 |
+
grad_normals_a[f][2];
|
| 194 |
+
grad_verts[i2][0] += grad_v2_x;
|
| 195 |
+
|
| 196 |
+
const float grad_v2_y =
|
| 197 |
+
(ax * cz - az * cx) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 198 |
+
(-az - cx * (ax * cz - az * cx) * inv_norm_2) * inv_norm *
|
| 199 |
+
grad_normals_a[f][0] +
|
| 200 |
+
-cy * (ax * cz - az * cx) * inv_norm_3 * grad_normals_a[f][1] +
|
| 201 |
+
(ax - cz * (ax * cz - az * cx) * inv_norm_2) * inv_norm *
|
| 202 |
+
grad_normals_a[f][2];
|
| 203 |
+
grad_verts[i2][1] += grad_v2_y;
|
| 204 |
+
|
| 205 |
+
const float grad_v2_z =
|
| 206 |
+
(ay * cx - ax * cy) / 2.0 * inv_norm * grad_areas_a[f] +
|
| 207 |
+
(ay - cx * (ay * cx - ax * cy) * inv_norm_2) * inv_norm *
|
| 208 |
+
grad_normals_a[f][0] +
|
| 209 |
+
(-ax - cy * (ay * cx - ax * cy) * inv_norm_2) * inv_norm *
|
| 210 |
+
grad_normals_a[f][1] +
|
| 211 |
+
-cz * (ay * cx - ax * cy) * inv_norm_3 * grad_normals_a[f][2];
|
| 212 |
+
grad_verts[i2][2] += grad_v2_z;
|
| 213 |
+
}
|
| 214 |
+
return grad_verts;
|
| 215 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 11 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 12 |
+
|
| 13 |
+
// TODO(T47953967) to make this cuda kernel support all datatypes.
|
| 14 |
+
__global__ void GatherScatterCudaKernel(
|
| 15 |
+
const float* __restrict__ input,
|
| 16 |
+
const int64_t* __restrict__ edges,
|
| 17 |
+
float* __restrict__ output,
|
| 18 |
+
bool directed,
|
| 19 |
+
bool backward,
|
| 20 |
+
const size_t V,
|
| 21 |
+
const size_t D,
|
| 22 |
+
const size_t E) {
|
| 23 |
+
const int tid = threadIdx.x;
|
| 24 |
+
|
| 25 |
+
// Reverse the vertex order if backward.
|
| 26 |
+
const int v0_idx = backward ? 1 : 0;
|
| 27 |
+
const int v1_idx = backward ? 0 : 1;
|
| 28 |
+
|
| 29 |
+
// Edges are split evenly across the blocks.
|
| 30 |
+
for (int e = blockIdx.x; e < E; e += gridDim.x) {
|
| 31 |
+
// Get indices of vertices which form the edge.
|
| 32 |
+
const int64_t v0 = edges[2 * e + v0_idx];
|
| 33 |
+
const int64_t v1 = edges[2 * e + v1_idx];
|
| 34 |
+
|
| 35 |
+
// Split vertex features evenly across threads.
|
| 36 |
+
// This implementation will be quite wasteful when D<128 since there will be
|
| 37 |
+
// a lot of threads doing nothing.
|
| 38 |
+
for (int d = tid; d < D; d += blockDim.x) {
|
| 39 |
+
const float val = input[v1 * D + d];
|
| 40 |
+
float* address = output + v0 * D + d;
|
| 41 |
+
atomicAdd(address, val);
|
| 42 |
+
if (!directed) {
|
| 43 |
+
const float val = input[v0 * D + d];
|
| 44 |
+
float* address = output + v1 * D + d;
|
| 45 |
+
atomicAdd(address, val);
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
__syncthreads();
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
at::Tensor GatherScatterCuda(
|
| 53 |
+
const at::Tensor& input,
|
| 54 |
+
const at::Tensor& edges,
|
| 55 |
+
bool directed,
|
| 56 |
+
bool backward) {
|
| 57 |
+
// Check inputs are on the same device
|
| 58 |
+
at::TensorArg input_t{input, "input", 1}, edges_t{edges, "edges", 2};
|
| 59 |
+
at::CheckedFrom c = "GatherScatterCuda";
|
| 60 |
+
at::checkAllSameGPU(c, {input_t, edges_t});
|
| 61 |
+
|
| 62 |
+
// Set the device for the kernel launch based on the device of the input
|
| 63 |
+
at::cuda::CUDAGuard device_guard(input.device());
|
| 64 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 65 |
+
|
| 66 |
+
const auto num_vertices = input.size(0);
|
| 67 |
+
const auto input_feature_dim = input.size(1);
|
| 68 |
+
const auto num_edges = edges.size(0);
|
| 69 |
+
|
| 70 |
+
auto output = at::zeros({num_vertices, input_feature_dim}, input.options());
|
| 71 |
+
const size_t threads = 128;
|
| 72 |
+
const size_t max_blocks = 1920;
|
| 73 |
+
const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks;
|
| 74 |
+
|
| 75 |
+
if (output.numel() == 0) {
|
| 76 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 77 |
+
return output;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
GatherScatterCudaKernel<<<blocks, threads, 0, stream>>>(
|
| 81 |
+
input.contiguous().data_ptr<float>(),
|
| 82 |
+
edges.contiguous().data_ptr<int64_t>(),
|
| 83 |
+
output.data_ptr<float>(),
|
| 84 |
+
directed,
|
| 85 |
+
backward,
|
| 86 |
+
num_vertices,
|
| 87 |
+
input_feature_dim,
|
| 88 |
+
num_edges);
|
| 89 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 90 |
+
return output;
|
| 91 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include <queue>
|
| 11 |
+
#include <tuple>
|
| 12 |
+
|
| 13 |
+
std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCpu(
|
| 14 |
+
const at::Tensor& p1,
|
| 15 |
+
const at::Tensor& p2,
|
| 16 |
+
const at::Tensor& lengths1,
|
| 17 |
+
const at::Tensor& lengths2,
|
| 18 |
+
const int norm,
|
| 19 |
+
const int K) {
|
| 20 |
+
const int N = p1.size(0);
|
| 21 |
+
const int P1 = p1.size(1);
|
| 22 |
+
const int D = p1.size(2);
|
| 23 |
+
|
| 24 |
+
auto long_opts = lengths1.options().dtype(torch::kInt64);
|
| 25 |
+
torch::Tensor idxs = torch::full({N, P1, K}, 0, long_opts);
|
| 26 |
+
torch::Tensor dists = torch::full({N, P1, K}, 0, p1.options());
|
| 27 |
+
|
| 28 |
+
auto p1_a = p1.accessor<float, 3>();
|
| 29 |
+
auto p2_a = p2.accessor<float, 3>();
|
| 30 |
+
auto lengths1_a = lengths1.accessor<int64_t, 1>();
|
| 31 |
+
auto lengths2_a = lengths2.accessor<int64_t, 1>();
|
| 32 |
+
auto idxs_a = idxs.accessor<int64_t, 3>();
|
| 33 |
+
auto dists_a = dists.accessor<float, 3>();
|
| 34 |
+
|
| 35 |
+
for (int n = 0; n < N; ++n) {
|
| 36 |
+
const int64_t length1 = lengths1_a[n];
|
| 37 |
+
const int64_t length2 = lengths2_a[n];
|
| 38 |
+
for (int64_t i1 = 0; i1 < length1; ++i1) {
|
| 39 |
+
// Use a priority queue to store (distance, index) tuples.
|
| 40 |
+
std::priority_queue<std::tuple<float, int>> q;
|
| 41 |
+
for (int64_t i2 = 0; i2 < length2; ++i2) {
|
| 42 |
+
float dist = 0;
|
| 43 |
+
for (int d = 0; d < D; ++d) {
|
| 44 |
+
float diff = p1_a[n][i1][d] - p2_a[n][i2][d];
|
| 45 |
+
if (norm == 1) {
|
| 46 |
+
dist += abs(diff);
|
| 47 |
+
} else { // norm is 2 (default)
|
| 48 |
+
dist += diff * diff;
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
int size = static_cast<int>(q.size());
|
| 52 |
+
if (size < K || dist < std::get<0>(q.top())) {
|
| 53 |
+
q.emplace(dist, i2);
|
| 54 |
+
if (size >= K) {
|
| 55 |
+
q.pop();
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
while (!q.empty()) {
|
| 60 |
+
auto t = q.top();
|
| 61 |
+
q.pop();
|
| 62 |
+
const int k = q.size();
|
| 63 |
+
dists_a[n][i1][k] = std::get<0>(t);
|
| 64 |
+
idxs_a[n][i1][k] = std::get<1>(t);
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
return std::make_tuple(idxs, dists);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
// ------------------------------------------------------------- //
|
| 72 |
+
// Backward Operators //
|
| 73 |
+
// ------------------------------------------------------------- //
|
| 74 |
+
|
| 75 |
+
std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCpu(
|
| 76 |
+
const at::Tensor& p1,
|
| 77 |
+
const at::Tensor& p2,
|
| 78 |
+
const at::Tensor& lengths1,
|
| 79 |
+
const at::Tensor& lengths2,
|
| 80 |
+
const at::Tensor& idxs,
|
| 81 |
+
const int norm,
|
| 82 |
+
const at::Tensor& grad_dists) {
|
| 83 |
+
const int N = p1.size(0);
|
| 84 |
+
const int P1 = p1.size(1);
|
| 85 |
+
const int D = p1.size(2);
|
| 86 |
+
const int P2 = p2.size(1);
|
| 87 |
+
const int K = idxs.size(2);
|
| 88 |
+
|
| 89 |
+
torch::Tensor grad_p1 = torch::full({N, P1, D}, 0, p1.options());
|
| 90 |
+
torch::Tensor grad_p2 = torch::full({N, P2, D}, 0, p2.options());
|
| 91 |
+
|
| 92 |
+
auto p1_a = p1.accessor<float, 3>();
|
| 93 |
+
auto p2_a = p2.accessor<float, 3>();
|
| 94 |
+
auto lengths1_a = lengths1.accessor<int64_t, 1>();
|
| 95 |
+
auto lengths2_a = lengths2.accessor<int64_t, 1>();
|
| 96 |
+
auto idxs_a = idxs.accessor<int64_t, 3>();
|
| 97 |
+
auto grad_dists_a = grad_dists.accessor<float, 3>();
|
| 98 |
+
auto grad_p1_a = grad_p1.accessor<float, 3>();
|
| 99 |
+
auto grad_p2_a = grad_p2.accessor<float, 3>();
|
| 100 |
+
|
| 101 |
+
for (int n = 0; n < N; ++n) {
|
| 102 |
+
const int64_t length1 = lengths1_a[n];
|
| 103 |
+
int64_t length2 = lengths2_a[n];
|
| 104 |
+
length2 = (length2 < K) ? length2 : K;
|
| 105 |
+
for (int64_t i1 = 0; i1 < length1; ++i1) {
|
| 106 |
+
for (int64_t k = 0; k < length2; ++k) {
|
| 107 |
+
const int64_t i2 = idxs_a[n][i1][k];
|
| 108 |
+
// If the index is the pad value of -1 then ignore it
|
| 109 |
+
if (i2 == -1) {
|
| 110 |
+
continue;
|
| 111 |
+
}
|
| 112 |
+
for (int64_t d = 0; d < D; ++d) {
|
| 113 |
+
float diff = 0.0;
|
| 114 |
+
if (norm == 1) {
|
| 115 |
+
float sign = (p1_a[n][i1][d] > p2_a[n][i2][d]) ? 1.0 : -1.0;
|
| 116 |
+
diff = grad_dists_a[n][i1][k] * sign;
|
| 117 |
+
} else { // norm is 2 (default)
|
| 118 |
+
diff = 2.0f * grad_dists_a[n][i1][k] *
|
| 119 |
+
(p1_a[n][i1][d] - p2_a[n][i2][d]);
|
| 120 |
+
}
|
| 121 |
+
grad_p1_a[n][i1][d] += diff;
|
| 122 |
+
grad_p2_a[n][i2][d] += -1.0f * diff;
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
return std::make_tuple(grad_p1, grad_p2);
|
| 128 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu
ADDED
|
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 11 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 12 |
+
#include <cstdio>
|
| 13 |
+
#include "marching_cubes/tables.h"
|
| 14 |
+
|
| 15 |
+
/*
|
| 16 |
+
Parallelized marching cubes for pytorch extension
|
| 17 |
+
referenced and adapted from CUDA-Samples:
|
| 18 |
+
(https://github.com/NVIDIA/cuda-samples/tree/master/Samples/5_Domain_Specific/marchingCubes)
|
| 19 |
+
We divide the algorithm into two forward-passes:
|
| 20 |
+
(1) The first forward-pass executes "ClassifyVoxelKernel" to
|
| 21 |
+
evaluate volume scalar field for each cube and pre-compute
|
| 22 |
+
two arrays -- number of vertices per cube (d_voxelVerts) and
|
| 23 |
+
occupied or not per cube (d_voxelOccupied).
|
| 24 |
+
|
| 25 |
+
Some prepration steps:
|
| 26 |
+
With d_voxelOccupied, an exclusive scan is performed to compute
|
| 27 |
+
the number of activeVoxels, which can be used to accelerate
|
| 28 |
+
computation. With d_voxelVerts, another exclusive scan
|
| 29 |
+
is performed to compute the accumulated sum of vertices in the 3d
|
| 30 |
+
grid and totalVerts.
|
| 31 |
+
|
| 32 |
+
(2) The second forward-pass calls "GenerateFacesKernel" to
|
| 33 |
+
generate interpolated vertex positions and face indices by "marching
|
| 34 |
+
through" each cube in the grid.
|
| 35 |
+
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
// EPS: Used to indicate if two float values are close
|
| 39 |
+
__constant__ const float EPSILON = 1e-5;
|
| 40 |
+
|
| 41 |
+
// Linearly interpolate the position where an isosurface cuts an edge
|
| 42 |
+
// between two vertices, based on their scalar values
|
| 43 |
+
//
|
| 44 |
+
// Args:
|
| 45 |
+
// isolevel: float value used as threshold
|
| 46 |
+
// p1: position of point1
|
| 47 |
+
// p2: position of point2
|
| 48 |
+
// valp1: field value for p1
|
| 49 |
+
// valp2: field value for p2
|
| 50 |
+
//
|
| 51 |
+
// Returns:
|
| 52 |
+
// point: interpolated verte
|
| 53 |
+
//
|
| 54 |
+
__device__ float3
|
| 55 |
+
vertexInterp(float isolevel, float3 p1, float3 p2, float valp1, float valp2) {
|
| 56 |
+
float ratio;
|
| 57 |
+
float3 p;
|
| 58 |
+
|
| 59 |
+
if (abs(isolevel - valp1) < EPSILON) {
|
| 60 |
+
return p1;
|
| 61 |
+
} else if (abs(isolevel - valp2) < EPSILON) {
|
| 62 |
+
return p2;
|
| 63 |
+
} else if (abs(valp1 - valp2) < EPSILON) {
|
| 64 |
+
return p1;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
ratio = (isolevel - valp1) / (valp2 - valp1);
|
| 68 |
+
|
| 69 |
+
p.x = p1.x * (1 - ratio) + p2.x * ratio;
|
| 70 |
+
p.y = p1.y * (1 - ratio) + p2.y * ratio;
|
| 71 |
+
p.z = p1.z * (1 - ratio) + p2.z * ratio;
|
| 72 |
+
|
| 73 |
+
return p;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// Determine if the triangle is degenerate
|
| 77 |
+
// A triangle is degenerate when at least two of the vertices
|
| 78 |
+
// share the same position.
|
| 79 |
+
//
|
| 80 |
+
// Args:
|
| 81 |
+
// p1: position of vertex p1
|
| 82 |
+
// p2: position of vertex p2
|
| 83 |
+
// p3: position of vertex p3
|
| 84 |
+
//
|
| 85 |
+
// Returns:
|
| 86 |
+
// boolean indicator if the triangle is degenerate
|
| 87 |
+
__device__ bool isDegenerate(float3 p1, float3 p2, float3 p3) {
|
| 88 |
+
if ((abs(p1.x - p2.x) < EPSILON && abs(p1.y - p2.y) < EPSILON &&
|
| 89 |
+
abs(p1.z - p2.z) < EPSILON) ||
|
| 90 |
+
(abs(p2.x - p3.x) < EPSILON && abs(p2.y - p3.y) < EPSILON &&
|
| 91 |
+
abs(p2.z - p3.z) < EPSILON) ||
|
| 92 |
+
(abs(p3.x - p1.x) < EPSILON && abs(p3.y - p1.y) < EPSILON &&
|
| 93 |
+
abs(p3.z - p1.z) < EPSILON)) {
|
| 94 |
+
return true;
|
| 95 |
+
} else {
|
| 96 |
+
return false;
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// Convert from local vertex id to global vertex id, given position
|
| 101 |
+
// of the cube where the vertex resides. The function ensures vertices
|
| 102 |
+
// shared from adjacent cubes are mapped to the same global id.
|
| 103 |
+
|
| 104 |
+
// Args:
|
| 105 |
+
// v: local vertex id
|
| 106 |
+
// x: x position of the cube where the vertex belongs
|
| 107 |
+
// y: y position of the cube where the vertex belongs
|
| 108 |
+
// z: z position of the cube where the vertex belongs
|
| 109 |
+
// W: width of x dimension
|
| 110 |
+
// H: height of y dimension
|
| 111 |
+
|
| 112 |
+
// Returns:
|
| 113 |
+
// global vertex id represented by its x/y/z offsets
|
| 114 |
+
__device__ uint localToGlobal(int v, int x, int y, int z, int W, int H) {
|
| 115 |
+
const int dx = v & 1;
|
| 116 |
+
const int dy = v >> 1 & 1;
|
| 117 |
+
const int dz = v >> 2 & 1;
|
| 118 |
+
return (x + dx) + (y + dy) * W + (z + dz) * W * H;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
// Hash_combine a pair of global vertex id to a single integer.
|
| 122 |
+
//
|
| 123 |
+
// Args:
|
| 124 |
+
// v1_id: global id of vertex 1
|
| 125 |
+
// v2_id: global id of vertex 2
|
| 126 |
+
// W: width of the 3d grid
|
| 127 |
+
// H: height of the 3d grid
|
| 128 |
+
// Z: depth of the 3d grid
|
| 129 |
+
//
|
| 130 |
+
// Returns:
|
| 131 |
+
// hashing for a pair of vertex ids
|
| 132 |
+
//
|
| 133 |
+
__device__ int64_t hashVpair(uint v1_id, uint v2_id, int W, int H, int D) {
|
| 134 |
+
return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
// precompute number of vertices and occupancy
|
| 138 |
+
// for each voxel in the grid.
|
| 139 |
+
//
|
| 140 |
+
// Args:
|
| 141 |
+
// voxelVerts: pointer to device array to store number
|
| 142 |
+
// of verts per voxel
|
| 143 |
+
// voxelOccupied: pointer to device array to store
|
| 144 |
+
// occupancy state per voxel
|
| 145 |
+
// vol: torch tensor stored with 3D scalar field
|
| 146 |
+
// isolevel: threshold to determine isosurface intersection
|
| 147 |
+
//
|
| 148 |
+
__global__ void ClassifyVoxelKernel(
|
| 149 |
+
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> voxelVerts,
|
| 150 |
+
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> voxelOccupied,
|
| 151 |
+
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
| 152 |
+
// const at::PackedTensorAccessor<int, 1, at::RestrictPtrTraits>
|
| 153 |
+
// numVertsTable,
|
| 154 |
+
float isolevel) {
|
| 155 |
+
const int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6};
|
| 156 |
+
const uint D = vol.size(0) - 1;
|
| 157 |
+
const uint H = vol.size(1) - 1;
|
| 158 |
+
const uint W = vol.size(2) - 1;
|
| 159 |
+
|
| 160 |
+
// 1-d grid
|
| 161 |
+
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 162 |
+
uint num_threads = gridDim.x * blockDim.x;
|
| 163 |
+
|
| 164 |
+
// Table mapping from cubeindex to number of vertices in the configuration
|
| 165 |
+
const unsigned char numVertsTable[256] = {
|
| 166 |
+
0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6,
|
| 167 |
+
6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9,
|
| 168 |
+
6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12,
|
| 169 |
+
12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12,
|
| 170 |
+
6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12,
|
| 171 |
+
12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15,
|
| 172 |
+
12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6,
|
| 173 |
+
12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9,
|
| 174 |
+
6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9,
|
| 175 |
+
9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9,
|
| 176 |
+
12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12,
|
| 177 |
+
12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6,
|
| 178 |
+
12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15,
|
| 179 |
+
15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3,
|
| 180 |
+
6, 3, 3, 0,
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
for (uint tid = id; tid < D * H * W; tid += num_threads) {
|
| 184 |
+
// compute global location of the voxel
|
| 185 |
+
const int gx = tid % W;
|
| 186 |
+
const int gy = tid / W % H;
|
| 187 |
+
const int gz = tid / (W * H);
|
| 188 |
+
|
| 189 |
+
int cubeindex = 0;
|
| 190 |
+
for (int i = 0; i < 8; i++) {
|
| 191 |
+
const int dx = i & 1;
|
| 192 |
+
const int dy = i >> 1 & 1;
|
| 193 |
+
const int dz = i >> 2 & 1;
|
| 194 |
+
|
| 195 |
+
const int x = gx + dx;
|
| 196 |
+
const int y = gy + dy;
|
| 197 |
+
const int z = gz + dz;
|
| 198 |
+
|
| 199 |
+
if (vol[z][y][x] < isolevel) {
|
| 200 |
+
cubeindex |= 1 << indexTable[i];
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
// collect number of vertices for each voxel
|
| 204 |
+
unsigned char numVerts = numVertsTable[cubeindex];
|
| 205 |
+
voxelVerts[tid] = numVerts;
|
| 206 |
+
voxelOccupied[tid] = (numVerts > 0);
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
// extract compact voxel array for acceleration
|
| 211 |
+
//
|
| 212 |
+
// Args:
|
| 213 |
+
// compactedVoxelArray: tensor of shape (activeVoxels,) which maps
|
| 214 |
+
// from accumulated non-empty voxel index to original 3d grid index
|
| 215 |
+
// voxelOccupied: tensor of shape (numVoxels,) which stores
|
| 216 |
+
// the occupancy state per voxel
|
| 217 |
+
// voxelOccupiedScan: tensor of shape (numVoxels,) which
|
| 218 |
+
// stores the accumulated occupied voxel counts
|
| 219 |
+
// numVoxels: number of total voxels in the grid
|
| 220 |
+
//
|
| 221 |
+
__global__ void CompactVoxelsKernel(
|
| 222 |
+
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
| 223 |
+
compactedVoxelArray,
|
| 224 |
+
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
| 225 |
+
voxelOccupied,
|
| 226 |
+
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
| 227 |
+
voxelOccupiedScan,
|
| 228 |
+
uint numVoxels) {
|
| 229 |
+
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 230 |
+
uint num_threads = gridDim.x * blockDim.x;
|
| 231 |
+
for (uint tid = id; tid < numVoxels; tid += num_threads) {
|
| 232 |
+
if (voxelOccupied[tid]) {
|
| 233 |
+
compactedVoxelArray[voxelOccupiedScan[tid]] = tid;
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
// generate triangles for each voxel using marching cubes
|
| 239 |
+
//
|
| 240 |
+
// Args:
|
| 241 |
+
// verts: torch tensor of shape (V, 3) to store interpolated mesh vertices
|
| 242 |
+
// faces: torch tensor of shape (F, 3) to store indices for mesh faces
|
| 243 |
+
// ids: torch tensor of shape (V) to store id of each vertex
|
| 244 |
+
// compactedVoxelArray: tensor of shape (activeVoxels,) which stores
|
| 245 |
+
// non-empty voxel index.
|
| 246 |
+
// numVertsScanned: tensor of shape (numVoxels,) which stores accumulated
|
| 247 |
+
// vertices count in the voxel
|
| 248 |
+
// activeVoxels: number of active voxels used for acceleration
|
| 249 |
+
// vol: torch tensor stored with 3D scalar field
|
| 250 |
+
// isolevel: threshold to determine isosurface intersection
|
| 251 |
+
//
|
| 252 |
+
__global__ void GenerateFacesKernel(
|
| 253 |
+
at::PackedTensorAccessor32<float, 2, at::RestrictPtrTraits> verts,
|
| 254 |
+
at::PackedTensorAccessor<int64_t, 2, at::RestrictPtrTraits> faces,
|
| 255 |
+
at::PackedTensorAccessor<int64_t, 1, at::RestrictPtrTraits> ids,
|
| 256 |
+
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
| 257 |
+
compactedVoxelArray,
|
| 258 |
+
at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
| 259 |
+
numVertsScanned,
|
| 260 |
+
const uint activeVoxels,
|
| 261 |
+
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
| 262 |
+
const at::PackedTensorAccessor32<int, 2, at::RestrictPtrTraits> faceTable,
|
| 263 |
+
// const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
| 264 |
+
// numVertsTable,
|
| 265 |
+
const float isolevel) {
|
| 266 |
+
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 267 |
+
uint num_threads = gridDim.x * blockDim.x;
|
| 268 |
+
const int faces_size = faces.size(0);
|
| 269 |
+
// Table mapping each edge to the corresponding cube vertices offsets
|
| 270 |
+
const int edgeToVertsTable[12][2] = {
|
| 271 |
+
{0, 1},
|
| 272 |
+
{1, 5},
|
| 273 |
+
{4, 5},
|
| 274 |
+
{0, 4},
|
| 275 |
+
{2, 3},
|
| 276 |
+
{3, 7},
|
| 277 |
+
{6, 7},
|
| 278 |
+
{2, 6},
|
| 279 |
+
{0, 2},
|
| 280 |
+
{1, 3},
|
| 281 |
+
{5, 7},
|
| 282 |
+
{4, 6},
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
// Table mapping from cubeindex to number of vertices in the configuration
|
| 286 |
+
const unsigned char numVertsTable[256] = {
|
| 287 |
+
0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6,
|
| 288 |
+
6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9,
|
| 289 |
+
6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12,
|
| 290 |
+
12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12,
|
| 291 |
+
6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12,
|
| 292 |
+
12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15,
|
| 293 |
+
12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6,
|
| 294 |
+
12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9,
|
| 295 |
+
6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9,
|
| 296 |
+
9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9,
|
| 297 |
+
12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12,
|
| 298 |
+
12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6,
|
| 299 |
+
12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15,
|
| 300 |
+
15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3,
|
| 301 |
+
6, 3, 3, 0,
|
| 302 |
+
};
|
| 303 |
+
|
| 304 |
+
for (uint tid = id; tid < activeVoxels; tid += num_threads) {
|
| 305 |
+
uint voxel = compactedVoxelArray[tid]; // maps from accumulated id to
|
| 306 |
+
// original 3d voxel id
|
| 307 |
+
// mapping from offsets to vi index
|
| 308 |
+
int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6};
|
| 309 |
+
// field value for each vertex
|
| 310 |
+
float val[8];
|
| 311 |
+
// position for each vertex
|
| 312 |
+
float3 p[8];
|
| 313 |
+
// 3d address
|
| 314 |
+
const uint D = vol.size(0) - 1;
|
| 315 |
+
const uint H = vol.size(1) - 1;
|
| 316 |
+
const uint W = vol.size(2) - 1;
|
| 317 |
+
|
| 318 |
+
const int gx = voxel % W;
|
| 319 |
+
const int gy = voxel / W % H;
|
| 320 |
+
const int gz = voxel / (W * H);
|
| 321 |
+
|
| 322 |
+
// recalculate cubeindex;
|
| 323 |
+
uint cubeindex = 0;
|
| 324 |
+
for (int i = 0; i < 8; i++) {
|
| 325 |
+
const int dx = i & 1;
|
| 326 |
+
const int dy = i >> 1 & 1;
|
| 327 |
+
const int dz = i >> 2 & 1;
|
| 328 |
+
|
| 329 |
+
const int x = gx + dx;
|
| 330 |
+
const int y = gy + dy;
|
| 331 |
+
const int z = gz + dz;
|
| 332 |
+
|
| 333 |
+
if (vol[z][y][x] < isolevel) {
|
| 334 |
+
cubeindex |= 1 << indexTable[i];
|
| 335 |
+
}
|
| 336 |
+
val[indexTable[i]] = vol[z][y][x]; // maps from vi to volume
|
| 337 |
+
p[indexTable[i]] = make_float3(x, y, z); // maps from vi to position
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
// Interpolate vertices where the surface intersects the cube
|
| 341 |
+
float3 vertlist[12];
|
| 342 |
+
vertlist[0] = vertexInterp(isolevel, p[0], p[1], val[0], val[1]);
|
| 343 |
+
vertlist[1] = vertexInterp(isolevel, p[1], p[2], val[1], val[2]);
|
| 344 |
+
vertlist[2] = vertexInterp(isolevel, p[3], p[2], val[3], val[2]);
|
| 345 |
+
vertlist[3] = vertexInterp(isolevel, p[0], p[3], val[0], val[3]);
|
| 346 |
+
|
| 347 |
+
vertlist[4] = vertexInterp(isolevel, p[4], p[5], val[4], val[5]);
|
| 348 |
+
vertlist[5] = vertexInterp(isolevel, p[5], p[6], val[5], val[6]);
|
| 349 |
+
vertlist[6] = vertexInterp(isolevel, p[7], p[6], val[7], val[6]);
|
| 350 |
+
vertlist[7] = vertexInterp(isolevel, p[4], p[7], val[4], val[7]);
|
| 351 |
+
|
| 352 |
+
vertlist[8] = vertexInterp(isolevel, p[0], p[4], val[0], val[4]);
|
| 353 |
+
vertlist[9] = vertexInterp(isolevel, p[1], p[5], val[1], val[5]);
|
| 354 |
+
vertlist[10] = vertexInterp(isolevel, p[2], p[6], val[2], val[6]);
|
| 355 |
+
vertlist[11] = vertexInterp(isolevel, p[3], p[7], val[3], val[7]);
|
| 356 |
+
|
| 357 |
+
// output triangle faces
|
| 358 |
+
uint numVerts = numVertsTable[cubeindex];
|
| 359 |
+
|
| 360 |
+
for (int i = 0; i < numVerts; i++) {
|
| 361 |
+
int index = numVertsScanned[voxel] + i;
|
| 362 |
+
unsigned char edge = faceTable[cubeindex][i];
|
| 363 |
+
|
| 364 |
+
uint v1 = edgeToVertsTable[edge][0];
|
| 365 |
+
uint v2 = edgeToVertsTable[edge][1];
|
| 366 |
+
uint v1_id = localToGlobal(v1, gx, gy, gz, W + 1, H + 1);
|
| 367 |
+
uint v2_id = localToGlobal(v2, gx, gy, gz, W + 1, H + 1);
|
| 368 |
+
int64_t edge_id = hashVpair(v1_id, v2_id, W + 1, H + 1, D + 1);
|
| 369 |
+
|
| 370 |
+
verts[index][0] = vertlist[edge].x;
|
| 371 |
+
verts[index][1] = vertlist[edge].y;
|
| 372 |
+
verts[index][2] = vertlist[edge].z;
|
| 373 |
+
|
| 374 |
+
if (index < faces_size) {
|
| 375 |
+
faces[index][0] = index * 3 + 0;
|
| 376 |
+
faces[index][1] = index * 3 + 1;
|
| 377 |
+
faces[index][2] = index * 3 + 2;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
ids[index] = edge_id;
|
| 381 |
+
}
|
| 382 |
+
} // end for grid-strided kernel
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
// ATen/Torch does not have an exclusive-scan operator. Additionally, in the
|
| 386 |
+
// code below we need to get the "total number of items to work on" after
|
| 387 |
+
// a scan, which with an inclusive-scan would simply be the value of the last
|
| 388 |
+
// element in the tensor.
|
| 389 |
+
//
|
| 390 |
+
// This utility function hits two birds with one stone, by running
|
| 391 |
+
// an inclusive-scan into a right-shifted view of a tensor that's
|
| 392 |
+
// allocated to be one element bigger than the input tensor.
|
| 393 |
+
//
|
| 394 |
+
// Note; return tensor is `int64_t` per element, even if the input
|
| 395 |
+
// tensor is only 32-bit. Also, the return tensor is one element bigger
|
| 396 |
+
// than the input one.
|
| 397 |
+
//
|
| 398 |
+
// Secondary optional argument is an output argument that gets the
|
| 399 |
+
// value of the last element of the return tensor (because you almost
|
| 400 |
+
// always need this CPU-side right after this function anyway).
|
| 401 |
+
static at::Tensor ExclusiveScanAndTotal(
|
| 402 |
+
const at::Tensor& inTensor,
|
| 403 |
+
int64_t* optTotal = nullptr) {
|
| 404 |
+
const auto inSize = inTensor.sizes()[0];
|
| 405 |
+
auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device());
|
| 406 |
+
|
| 407 |
+
using at::indexing::None;
|
| 408 |
+
using at::indexing::Slice;
|
| 409 |
+
auto rightShiftedView = retTensor.index({Slice(1, None)});
|
| 410 |
+
|
| 411 |
+
// Do an (inclusive-scan) cumulative sum in to the view that's
|
| 412 |
+
// shifted one element to the right...
|
| 413 |
+
at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong);
|
| 414 |
+
|
| 415 |
+
if (optTotal) {
|
| 416 |
+
*optTotal = retTensor[inSize].cpu().item<int64_t>();
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
// ...so that the not-shifted tensor holds the exclusive-scan
|
| 420 |
+
return retTensor;
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to
|
| 424 |
+
// create triangle meshes from an implicit function (one of the form f(x, y, z)
|
| 425 |
+
// = 0). It works by iteratively checking a grid of cubes superimposed over a
|
| 426 |
+
// region of the function. The number of faces and positions of the vertices in
|
| 427 |
+
// each cube are determined by the the isolevel as well as the volume values
|
| 428 |
+
// from the eight vertices of the cube.
|
| 429 |
+
//
|
| 430 |
+
// We implement this algorithm with two forward passes where the first pass
|
| 431 |
+
// checks the occupancy and collects number of vertices for each cube. The
|
| 432 |
+
// second pass will skip empty voxels and generate vertices as well as faces for
|
| 433 |
+
// each cube through table lookup. The vertex positions, faces and identifiers
|
| 434 |
+
// for each vertex will be returned.
|
| 435 |
+
//
|
| 436 |
+
//
|
| 437 |
+
// Args:
|
| 438 |
+
// vol: torch tensor of shape (D, H, W) for volume scalar field
|
| 439 |
+
// isolevel: threshold to determine isosurface intesection
|
| 440 |
+
//
|
| 441 |
+
// Returns:
|
| 442 |
+
// tuple of <verts, faces, ids>: which stores vertex positions, face
|
| 443 |
+
// indices and integer identifiers for each vertex.
|
| 444 |
+
// verts: (N_verts, 3) FloatTensor for vertex positions
|
| 445 |
+
// faces: (N_faces, 3) LongTensor of face indices
|
| 446 |
+
// ids: (N_verts,) LongTensor used to identify each vertex. Vertices from
|
| 447 |
+
// adjacent edges can share the same 3d position. To reduce memory
|
| 448 |
+
// redudancy, we tag each vertex with a unique id for deduplication. In
|
| 449 |
+
// contrast to deduping on vertices, this has the benefit to avoid
|
| 450 |
+
// floating point precision issues.
|
| 451 |
+
//
|
| 452 |
+
std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
| 453 |
+
const at::Tensor& vol,
|
| 454 |
+
const float isolevel) {
|
| 455 |
+
// Set the device for the kernel launch based on the device of vol
|
| 456 |
+
at::cuda::CUDAGuard device_guard(vol.device());
|
| 457 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 458 |
+
|
| 459 |
+
// transfer _FACE_TABLE data to device
|
| 460 |
+
at::Tensor face_table_tensor = at::zeros(
|
| 461 |
+
{256, 16}, at::TensorOptions().dtype(at::kInt).device(at::kCPU));
|
| 462 |
+
auto face_table_a = face_table_tensor.accessor<int, 2>();
|
| 463 |
+
for (int i = 0; i < 256; i++) {
|
| 464 |
+
for (int j = 0; j < 16; j++) {
|
| 465 |
+
face_table_a[i][j] = _FACE_TABLE[i][j];
|
| 466 |
+
}
|
| 467 |
+
}
|
| 468 |
+
at::Tensor faceTable = face_table_tensor.to(vol.device());
|
| 469 |
+
|
| 470 |
+
// get numVoxels
|
| 471 |
+
int threads = 128;
|
| 472 |
+
const uint D = vol.size(0);
|
| 473 |
+
const uint H = vol.size(1);
|
| 474 |
+
const uint W = vol.size(2);
|
| 475 |
+
const int numVoxels = (D - 1) * (H - 1) * (W - 1);
|
| 476 |
+
dim3 grid((numVoxels + threads - 1) / threads, 1, 1);
|
| 477 |
+
if (grid.x > 65535) {
|
| 478 |
+
grid.x = 65535;
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
using at::indexing::None;
|
| 482 |
+
using at::indexing::Slice;
|
| 483 |
+
|
| 484 |
+
auto d_voxelVerts =
|
| 485 |
+
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
| 486 |
+
.to(vol.device());
|
| 487 |
+
auto d_voxelOccupied =
|
| 488 |
+
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
| 489 |
+
.to(vol.device());
|
| 490 |
+
|
| 491 |
+
// Execute "ClassifyVoxelKernel" kernel to precompute
|
| 492 |
+
// two arrays - d_voxelOccupied and d_voxelVertices to global memory,
|
| 493 |
+
// which stores the occupancy state and number of voxel vertices per voxel.
|
| 494 |
+
ClassifyVoxelKernel<<<grid, threads, 0, stream>>>(
|
| 495 |
+
d_voxelVerts.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
| 496 |
+
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
| 497 |
+
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
| 498 |
+
isolevel);
|
| 499 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 500 |
+
cudaDeviceSynchronize();
|
| 501 |
+
|
| 502 |
+
// Scan "d_voxelOccupied" array to generate accumulated voxel occupancy
|
| 503 |
+
// count for voxels in the grid and compute the number of active voxels.
|
| 504 |
+
// If the number of active voxels is 0, return zero tensor for verts and
|
| 505 |
+
// faces.
|
| 506 |
+
int64_t activeVoxels = 0;
|
| 507 |
+
auto d_voxelOccupiedScan =
|
| 508 |
+
ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels);
|
| 509 |
+
|
| 510 |
+
const int device_id = vol.device().index();
|
| 511 |
+
auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id);
|
| 512 |
+
auto opt_long =
|
| 513 |
+
at::TensorOptions().dtype(at::kLong).device(at::kCUDA, device_id);
|
| 514 |
+
|
| 515 |
+
if (activeVoxels == 0) {
|
| 516 |
+
int ntris = 0;
|
| 517 |
+
at::Tensor verts = at::zeros({ntris * 3, 3}, vol.options());
|
| 518 |
+
at::Tensor faces = at::zeros({ntris, 3}, opt_long);
|
| 519 |
+
at::Tensor ids = at::zeros({ntris}, opt_long);
|
| 520 |
+
return std::make_tuple(verts, faces, ids);
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
// Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration.
|
| 524 |
+
// This allows us to run triangle generation on only the occupied voxels.
|
| 525 |
+
auto d_compVoxelArray = at::zeros({activeVoxels}, opt);
|
| 526 |
+
CompactVoxelsKernel<<<grid, threads, 0, stream>>>(
|
| 527 |
+
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
| 528 |
+
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
| 529 |
+
d_voxelOccupiedScan
|
| 530 |
+
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
| 531 |
+
numVoxels);
|
| 532 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 533 |
+
cudaDeviceSynchronize();
|
| 534 |
+
|
| 535 |
+
// Scan d_voxelVerts array to generate offsets of vertices for each voxel
|
| 536 |
+
int64_t totalVerts = 0;
|
| 537 |
+
auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts);
|
| 538 |
+
|
| 539 |
+
// Execute "GenerateFacesKernel" kernel
|
| 540 |
+
// This runs only on the occupied voxels.
|
| 541 |
+
// It looks up the field values and generates the triangle data.
|
| 542 |
+
at::Tensor verts = at::zeros({totalVerts, 3}, vol.options());
|
| 543 |
+
at::Tensor faces = at::zeros({totalVerts / 3, 3}, opt_long);
|
| 544 |
+
|
| 545 |
+
at::Tensor ids = at::zeros({totalVerts}, opt_long);
|
| 546 |
+
|
| 547 |
+
dim3 grid2((activeVoxels + threads - 1) / threads, 1, 1);
|
| 548 |
+
if (grid2.x > 65535) {
|
| 549 |
+
grid2.x = 65535;
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
GenerateFacesKernel<<<grid2, threads, 0, stream>>>(
|
| 553 |
+
verts.packed_accessor32<float, 2, at::RestrictPtrTraits>(),
|
| 554 |
+
faces.packed_accessor<int64_t, 2, at::RestrictPtrTraits>(),
|
| 555 |
+
ids.packed_accessor<int64_t, 1, at::RestrictPtrTraits>(),
|
| 556 |
+
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
| 557 |
+
d_voxelVertsScan.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
| 558 |
+
activeVoxels,
|
| 559 |
+
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
| 560 |
+
faceTable.packed_accessor32<int, 2, at::RestrictPtrTraits>(),
|
| 561 |
+
isolevel);
|
| 562 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 563 |
+
|
| 564 |
+
return std::make_tuple(verts, faces, ids);
|
| 565 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include <tuple>
|
| 12 |
+
#include <vector>
|
| 13 |
+
#include "utils/pytorch3d_cutils.h"
|
| 14 |
+
|
| 15 |
+
// Run Marching Cubes algorithm over a batch of volume scalar fields
|
| 16 |
+
// with a pre-defined threshold and return a mesh composed of vertices
|
| 17 |
+
// and faces for the mesh.
|
| 18 |
+
//
|
| 19 |
+
// Args:
|
| 20 |
+
// vol: FloatTensor of shape (D, H, W) giving a volume
|
| 21 |
+
// scalar grids.
|
| 22 |
+
// isolevel: isosurface value to use as the threshoold to determine whether
|
| 23 |
+
// the points are within a volume.
|
| 24 |
+
//
|
| 25 |
+
// Returns:
|
| 26 |
+
// vertices: (N_verts, 3) FloatTensor of vertices
|
| 27 |
+
// faces: (N_faces, 3) LongTensor of faces
|
| 28 |
+
// ids: (N_verts,) LongTensor used to identify each vertex and deduplication
|
| 29 |
+
// to avoid floating point precision issues.
|
| 30 |
+
// For Cuda, will be used to dedupe redundant vertices.
|
| 31 |
+
// For cpp implementation, this tensor is just a placeholder.
|
| 32 |
+
|
| 33 |
+
// CPU implementation
|
| 34 |
+
std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
| 35 |
+
const at::Tensor& vol,
|
| 36 |
+
const float isolevel);
|
| 37 |
+
|
| 38 |
+
// CUDA implementation
|
| 39 |
+
std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
| 40 |
+
const at::Tensor& vol,
|
| 41 |
+
const float isolevel);
|
| 42 |
+
|
| 43 |
+
// Implementation which is exposed
|
| 44 |
+
inline std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubes(
|
| 45 |
+
const at::Tensor& vol,
|
| 46 |
+
const float isolevel) {
|
| 47 |
+
if (vol.is_cuda()) {
|
| 48 |
+
#ifdef WITH_CUDA
|
| 49 |
+
CHECK_CUDA(vol);
|
| 50 |
+
const int D = vol.size(0);
|
| 51 |
+
const int H = vol.size(1);
|
| 52 |
+
const int W = vol.size(2);
|
| 53 |
+
if (D > 1024 || H > 1024 || W > 1024) {
|
| 54 |
+
AT_ERROR("Maximum volume size allowed 1K x 1K x 1K");
|
| 55 |
+
}
|
| 56 |
+
return MarchingCubesCuda(vol.contiguous(), isolevel);
|
| 57 |
+
#else
|
| 58 |
+
AT_ERROR("Not compiled with GPU support.");
|
| 59 |
+
#endif
|
| 60 |
+
}
|
| 61 |
+
return MarchingCubesCpu(vol.contiguous(), isolevel);
|
| 62 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
#include <algorithm>
|
| 11 |
+
#include <array>
|
| 12 |
+
#include <cstring>
|
| 13 |
+
#include <unordered_map>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include "marching_cubes/marching_cubes_utils.h"
|
| 16 |
+
#include "marching_cubes/tables.h"
|
| 17 |
+
|
| 18 |
+
// Cpu implementation for Marching Cubes
|
| 19 |
+
// Args:
|
| 20 |
+
// vol: a Tensor of size (D, H, W) corresponding to a 3D scalar field
|
| 21 |
+
// isolevel: the isosurface value to use as the threshold to determine
|
| 22 |
+
// whether points are within a volume.
|
| 23 |
+
//
|
| 24 |
+
// Returns:
|
| 25 |
+
// vertices: a float tensor of shape (N_verts, 3) for positions of the mesh
|
| 26 |
+
// faces: a long tensor of shape (N_faces, 3) for indices of the face
|
| 27 |
+
// ids: a long tensor of shape (N_verts) as placeholder
|
| 28 |
+
//
|
| 29 |
+
std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
| 30 |
+
const at::Tensor& vol,
|
| 31 |
+
const float isolevel) {
|
| 32 |
+
// volume shapes
|
| 33 |
+
const int D = vol.size(0);
|
| 34 |
+
const int H = vol.size(1);
|
| 35 |
+
const int W = vol.size(2);
|
| 36 |
+
|
| 37 |
+
// Create tensor accessors
|
| 38 |
+
auto vol_a = vol.accessor<float, 3>();
|
| 39 |
+
// edge_id_to_v maps from an edge id to a vertex position
|
| 40 |
+
std::unordered_map<int64_t, Vertex> edge_id_to_v;
|
| 41 |
+
// uniq_edge_id: used to remove redundant edge ids
|
| 42 |
+
std::unordered_map<int64_t, int64_t> uniq_edge_id;
|
| 43 |
+
std::vector<int64_t> faces; // store face indices
|
| 44 |
+
std::vector<Vertex> verts; // store vertex positions
|
| 45 |
+
// enumerate each cell in the 3d grid
|
| 46 |
+
for (int z = 0; z < D - 1; z++) {
|
| 47 |
+
for (int y = 0; y < H - 1; y++) {
|
| 48 |
+
for (int x = 0; x < W - 1; x++) {
|
| 49 |
+
Cube cube(x, y, z, vol_a, isolevel);
|
| 50 |
+
// Cube is entirely in/out of the surface
|
| 51 |
+
if (_FACE_TABLE[cube.cubeindex][0] == 255) {
|
| 52 |
+
continue;
|
| 53 |
+
}
|
| 54 |
+
// store all boundary vertices that intersect with the edges
|
| 55 |
+
std::array<Vertex, 12> interp_points;
|
| 56 |
+
// triangle vertex IDs and positions
|
| 57 |
+
std::vector<int64_t> tri;
|
| 58 |
+
std::vector<Vertex> ps;
|
| 59 |
+
|
| 60 |
+
// Interpolate the vertices where the surface intersects with the cube
|
| 61 |
+
for (int j = 0; _FACE_TABLE[cube.cubeindex][j] != 255; j++) {
|
| 62 |
+
const int e = _FACE_TABLE[cube.cubeindex][j];
|
| 63 |
+
interp_points[e] = cube.VertexInterp(isolevel, e, vol_a);
|
| 64 |
+
|
| 65 |
+
int64_t edge = cube.HashVpair(e, W, H, D);
|
| 66 |
+
tri.push_back(edge);
|
| 67 |
+
ps.push_back(interp_points[e]);
|
| 68 |
+
|
| 69 |
+
// Check if the triangle face is degenerate. A triangle face
|
| 70 |
+
// is degenerate if any of the two verices share the same 3D position
|
| 71 |
+
if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] &&
|
| 72 |
+
ps[2] != ps[0]) {
|
| 73 |
+
for (int k = 0; k < 3; k++) {
|
| 74 |
+
int64_t v = tri.at(k);
|
| 75 |
+
edge_id_to_v[v] = ps.at(k);
|
| 76 |
+
if (!uniq_edge_id.count(v)) {
|
| 77 |
+
uniq_edge_id[v] = verts.size();
|
| 78 |
+
verts.push_back(edge_id_to_v[v]);
|
| 79 |
+
}
|
| 80 |
+
faces.push_back(uniq_edge_id[v]);
|
| 81 |
+
}
|
| 82 |
+
tri.clear();
|
| 83 |
+
ps.clear();
|
| 84 |
+
} // endif
|
| 85 |
+
} // endfor edge enumeration
|
| 86 |
+
} // endfor x
|
| 87 |
+
} // endfor y
|
| 88 |
+
} // endfor z
|
| 89 |
+
// Collect returning tensor
|
| 90 |
+
const int n_vertices = verts.size();
|
| 91 |
+
const int64_t n_faces = (int64_t)faces.size() / 3;
|
| 92 |
+
auto vert_tensor = torch::zeros({n_vertices, 3}, torch::kFloat);
|
| 93 |
+
auto id_tensor = torch::zeros({n_vertices}, torch::kInt64); // placeholder
|
| 94 |
+
auto face_tensor = torch::zeros({n_faces, 3}, torch::kInt64);
|
| 95 |
+
|
| 96 |
+
auto vert_a = vert_tensor.accessor<float, 2>();
|
| 97 |
+
for (int i = 0; i < n_vertices; i++) {
|
| 98 |
+
vert_a[i][0] = verts.at(i).x;
|
| 99 |
+
vert_a[i][1] = verts.at(i).y;
|
| 100 |
+
vert_a[i][2] = verts.at(i).z;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
auto face_a = face_tensor.accessor<int64_t, 2>();
|
| 104 |
+
for (int64_t i = 0; i < n_faces; i++) {
|
| 105 |
+
face_a[i][0] = faces.at(i * 3 + 0);
|
| 106 |
+
face_a[i][1] = faces.at(i * 3 + 1);
|
| 107 |
+
face_a[i][2] = faces.at(i * 3 + 2);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
return std::make_tuple(vert_tensor, face_tensor, id_tensor);
|
| 111 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include <cmath>
|
| 12 |
+
#include <cstdint>
|
| 13 |
+
#include <vector>
|
| 14 |
+
#include "ATen/core/TensorAccessor.h"
|
| 15 |
+
#include "marching_cubes/tables.h"
|
| 16 |
+
|
| 17 |
+
// EPS: Used to assess whether two float values are close
|
| 18 |
+
const float EPS = 1e-5;
|
| 19 |
+
|
| 20 |
+
// Data structures for the marching cubes
|
| 21 |
+
struct Vertex {
|
| 22 |
+
// Constructor used when performing marching cube in each cell
|
| 23 |
+
explicit Vertex(float x = 0.0f, float y = 0.0f, float z = 0.0f)
|
| 24 |
+
: x(x), y(y), z(z) {}
|
| 25 |
+
|
| 26 |
+
// The */+ operator overrides are used for vertex interpolation
|
| 27 |
+
Vertex operator*(float s) const {
|
| 28 |
+
return Vertex(x * s, y * s, z * s);
|
| 29 |
+
}
|
| 30 |
+
Vertex operator+(const Vertex& xyz) const {
|
| 31 |
+
return Vertex(x + xyz.x, y + xyz.y, z + xyz.z);
|
| 32 |
+
}
|
| 33 |
+
// The =/!= operator overrides is used for checking degenerate triangles
|
| 34 |
+
bool operator==(const Vertex& xyz) const {
|
| 35 |
+
return (
|
| 36 |
+
std::abs(x - xyz.x) < EPS && std::abs(y - xyz.y) < EPS &&
|
| 37 |
+
std::abs(z - xyz.z) < EPS);
|
| 38 |
+
}
|
| 39 |
+
bool operator!=(const Vertex& xyz) const {
|
| 40 |
+
return (
|
| 41 |
+
std::abs(x - xyz.x) >= EPS || std::abs(y - xyz.y) >= EPS ||
|
| 42 |
+
std::abs(z - xyz.z) >= EPS);
|
| 43 |
+
}
|
| 44 |
+
// vertex position
|
| 45 |
+
float x, y, z;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
struct Cube {
|
| 49 |
+
// Edge and vertex convention:
|
| 50 |
+
// v4_______e4____________v5
|
| 51 |
+
// /| /|
|
| 52 |
+
// / | / |
|
| 53 |
+
// e7/ | e5/ |
|
| 54 |
+
// /___|______e6_________/ |
|
| 55 |
+
// v7| | |v6 |e9
|
| 56 |
+
// | | | |
|
| 57 |
+
// | |e8 |e10|
|
| 58 |
+
// e11| | | |
|
| 59 |
+
// | |_________________|___|
|
| 60 |
+
// | / v0 e0 | /v1
|
| 61 |
+
// | / | /
|
| 62 |
+
// | /e3 | /e1
|
| 63 |
+
// |/_____________________|/
|
| 64 |
+
// v3 e2 v2
|
| 65 |
+
|
| 66 |
+
Vertex p[8];
|
| 67 |
+
int x, y, z;
|
| 68 |
+
int cubeindex = 0;
|
| 69 |
+
Cube(
|
| 70 |
+
int x,
|
| 71 |
+
int y,
|
| 72 |
+
int z,
|
| 73 |
+
const at::TensorAccessor<float, 3>& vol_a,
|
| 74 |
+
const float isolevel)
|
| 75 |
+
: x(x), y(y), z(z) {
|
| 76 |
+
// vertex position (x, y, z) for v0-v1-v4-v5-v3-v2-v7-v6
|
| 77 |
+
for (int v = 0; v < 8; v++) {
|
| 78 |
+
p[v] = Vertex(x + (v & 1), y + (v >> 1 & 1), z + (v >> 2 & 1));
|
| 79 |
+
}
|
| 80 |
+
// Calculates cube configuration index given values of the cube vertices
|
| 81 |
+
for (int i = 0; i < 8; i++) {
|
| 82 |
+
const int idx = _INDEX_TABLE[i];
|
| 83 |
+
Vertex v = p[idx];
|
| 84 |
+
if (vol_a[v.z][v.y][v.x] < isolevel) {
|
| 85 |
+
cubeindex |= (1 << i);
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// Linearly interpolate the position where an isosurface cuts an edge
|
| 91 |
+
// between two vertices, based on their scalar values
|
| 92 |
+
//
|
| 93 |
+
// Args:
|
| 94 |
+
// isolevel: float value used as threshold
|
| 95 |
+
// edge: edge (ID) to interpolate
|
| 96 |
+
// cube: current cube vertices
|
| 97 |
+
// vol_a: 3D scalar field
|
| 98 |
+
//
|
| 99 |
+
// Returns:
|
| 100 |
+
// point: interpolated vertex
|
| 101 |
+
Vertex VertexInterp(
|
| 102 |
+
float isolevel,
|
| 103 |
+
const int edge,
|
| 104 |
+
const at::TensorAccessor<float, 3>& vol_a) {
|
| 105 |
+
const int v1 = _EDGE_TO_VERTICES[edge][0];
|
| 106 |
+
const int v2 = _EDGE_TO_VERTICES[edge][1];
|
| 107 |
+
Vertex p1 = p[v1];
|
| 108 |
+
Vertex p2 = p[v2];
|
| 109 |
+
float val1 = vol_a[p1.z][p1.y][p1.x];
|
| 110 |
+
float val2 = vol_a[p2.z][p2.y][p2.x];
|
| 111 |
+
|
| 112 |
+
float ratio = 1.0f;
|
| 113 |
+
if (std::abs(isolevel - val1) < EPS) {
|
| 114 |
+
return p1;
|
| 115 |
+
} else if (std::abs(isolevel - val2) < EPS) {
|
| 116 |
+
return p2;
|
| 117 |
+
} else if (std::abs(val1 - val2) < EPS) {
|
| 118 |
+
return p1;
|
| 119 |
+
}
|
| 120 |
+
// interpolate vertex p based on two vertices on the edge
|
| 121 |
+
ratio = (isolevel - val1) / (val2 - val1);
|
| 122 |
+
return p1 * (1 - ratio) + p2 * ratio;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
// Hash an edge into a global edge_id. The function binds an
|
| 126 |
+
// edge with an integer to address floating point precision issue.
|
| 127 |
+
//
|
| 128 |
+
// Args:
|
| 129 |
+
// v1_id: global id of vertex 1
|
| 130 |
+
// v2_id: global id of vertex 2
|
| 131 |
+
// W: width of the 3d grid
|
| 132 |
+
// H: height of the 3d grid
|
| 133 |
+
// D: depth of the 3d grid
|
| 134 |
+
//
|
| 135 |
+
// Returns:
|
| 136 |
+
// hashing for a pair of vertex ids
|
| 137 |
+
//
|
| 138 |
+
int64_t HashVpair(const int edge, int W, int H, int D) {
|
| 139 |
+
const int v1 = _EDGE_TO_VERTICES[edge][0];
|
| 140 |
+
const int v2 = _EDGE_TO_VERTICES[edge][1];
|
| 141 |
+
const int v1_id = p[v1].x + p[v1].y * W + p[v1].z * W * H;
|
| 142 |
+
const int v2_id = p[v2].x + p[v2].y * W + p[v2].z * W * H;
|
| 143 |
+
return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id;
|
| 144 |
+
}
|
| 145 |
+
};
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
using uint = unsigned int;
|
| 11 |
+
|
| 12 |
+
// A table mapping from cubeindex to a list of face configurations.
|
| 13 |
+
// Each list contains at most 5 faces, where each face is represented with
|
| 14 |
+
// 3 consecutive numbers
|
| 15 |
+
// Table adapted from http://paulbourke.net/geometry/polygonise/
|
| 16 |
+
//
|
| 17 |
+
#define X 255
|
| 18 |
+
const unsigned char _FACE_TABLE[256][16] = {
|
| 19 |
+
{X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 20 |
+
{0, 8, 3, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 21 |
+
{0, 1, 9, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 22 |
+
{1, 8, 3, 9, 8, 1, X, X, X, X, X, X, X, X, X, X},
|
| 23 |
+
{1, 2, 10, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 24 |
+
{0, 8, 3, 1, 2, 10, X, X, X, X, X, X, X, X, X, X},
|
| 25 |
+
{9, 2, 10, 0, 2, 9, X, X, X, X, X, X, X, X, X, X},
|
| 26 |
+
{2, 8, 3, 2, 10, 8, 10, 9, 8, X, X, X, X, X, X, X},
|
| 27 |
+
{3, 11, 2, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 28 |
+
{0, 11, 2, 8, 11, 0, X, X, X, X, X, X, X, X, X, X},
|
| 29 |
+
{1, 9, 0, 2, 3, 11, X, X, X, X, X, X, X, X, X, X},
|
| 30 |
+
{1, 11, 2, 1, 9, 11, 9, 8, 11, X, X, X, X, X, X, X},
|
| 31 |
+
{3, 10, 1, 11, 10, 3, X, X, X, X, X, X, X, X, X, X},
|
| 32 |
+
{0, 10, 1, 0, 8, 10, 8, 11, 10, X, X, X, X, X, X, X},
|
| 33 |
+
{3, 9, 0, 3, 11, 9, 11, 10, 9, X, X, X, X, X, X, X},
|
| 34 |
+
{9, 8, 10, 10, 8, 11, X, X, X, X, X, X, X, X, X, X},
|
| 35 |
+
{4, 7, 8, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 36 |
+
{4, 3, 0, 7, 3, 4, X, X, X, X, X, X, X, X, X, X},
|
| 37 |
+
{0, 1, 9, 8, 4, 7, X, X, X, X, X, X, X, X, X, X},
|
| 38 |
+
{4, 1, 9, 4, 7, 1, 7, 3, 1, X, X, X, X, X, X, X},
|
| 39 |
+
{1, 2, 10, 8, 4, 7, X, X, X, X, X, X, X, X, X, X},
|
| 40 |
+
{3, 4, 7, 3, 0, 4, 1, 2, 10, X, X, X, X, X, X, X},
|
| 41 |
+
{9, 2, 10, 9, 0, 2, 8, 4, 7, X, X, X, X, X, X, X},
|
| 42 |
+
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, X, X, X, X},
|
| 43 |
+
{8, 4, 7, 3, 11, 2, X, X, X, X, X, X, X, X, X, X},
|
| 44 |
+
{11, 4, 7, 11, 2, 4, 2, 0, 4, X, X, X, X, X, X, X},
|
| 45 |
+
{9, 0, 1, 8, 4, 7, 2, 3, 11, X, X, X, X, X, X, X},
|
| 46 |
+
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, X, X, X, X},
|
| 47 |
+
{3, 10, 1, 3, 11, 10, 7, 8, 4, X, X, X, X, X, X, X},
|
| 48 |
+
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, X, X, X, X},
|
| 49 |
+
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, X, X, X, X},
|
| 50 |
+
{4, 7, 11, 4, 11, 9, 9, 11, 10, X, X, X, X, X, X, X},
|
| 51 |
+
{9, 5, 4, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 52 |
+
{9, 5, 4, 0, 8, 3, X, X, X, X, X, X, X, X, X, X},
|
| 53 |
+
{0, 5, 4, 1, 5, 0, X, X, X, X, X, X, X, X, X, X},
|
| 54 |
+
{8, 5, 4, 8, 3, 5, 3, 1, 5, X, X, X, X, X, X, X},
|
| 55 |
+
{1, 2, 10, 9, 5, 4, X, X, X, X, X, X, X, X, X, X},
|
| 56 |
+
{3, 0, 8, 1, 2, 10, 4, 9, 5, X, X, X, X, X, X, X},
|
| 57 |
+
{5, 2, 10, 5, 4, 2, 4, 0, 2, X, X, X, X, X, X, X},
|
| 58 |
+
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, X, X, X, X},
|
| 59 |
+
{9, 5, 4, 2, 3, 11, X, X, X, X, X, X, X, X, X, X},
|
| 60 |
+
{0, 11, 2, 0, 8, 11, 4, 9, 5, X, X, X, X, X, X, X},
|
| 61 |
+
{0, 5, 4, 0, 1, 5, 2, 3, 11, X, X, X, X, X, X, X},
|
| 62 |
+
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, X, X, X, X},
|
| 63 |
+
{10, 3, 11, 10, 1, 3, 9, 5, 4, X, X, X, X, X, X, X},
|
| 64 |
+
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, X, X, X, X},
|
| 65 |
+
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, X, X, X, X},
|
| 66 |
+
{5, 4, 8, 5, 8, 10, 10, 8, 11, X, X, X, X, X, X, X},
|
| 67 |
+
{9, 7, 8, 5, 7, 9, X, X, X, X, X, X, X, X, X, X},
|
| 68 |
+
{9, 3, 0, 9, 5, 3, 5, 7, 3, X, X, X, X, X, X, X},
|
| 69 |
+
{0, 7, 8, 0, 1, 7, 1, 5, 7, X, X, X, X, X, X, X},
|
| 70 |
+
{1, 5, 3, 3, 5, 7, X, X, X, X, X, X, X, X, X, X},
|
| 71 |
+
{9, 7, 8, 9, 5, 7, 10, 1, 2, X, X, X, X, X, X, X},
|
| 72 |
+
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, X, X, X, X},
|
| 73 |
+
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, X, X, X, X},
|
| 74 |
+
{2, 10, 5, 2, 5, 3, 3, 5, 7, X, X, X, X, X, X, X},
|
| 75 |
+
{7, 9, 5, 7, 8, 9, 3, 11, 2, X, X, X, X, X, X, X},
|
| 76 |
+
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, X, X, X, X},
|
| 77 |
+
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, X, X, X, X},
|
| 78 |
+
{11, 2, 1, 11, 1, 7, 7, 1, 5, X, X, X, X, X, X, X},
|
| 79 |
+
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, X, X, X, X},
|
| 80 |
+
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, X},
|
| 81 |
+
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, X},
|
| 82 |
+
{11, 10, 5, 7, 11, 5, X, X, X, X, X, X, X, X, X, X},
|
| 83 |
+
{10, 6, 5, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 84 |
+
{0, 8, 3, 5, 10, 6, X, X, X, X, X, X, X, X, X, X},
|
| 85 |
+
{9, 0, 1, 5, 10, 6, X, X, X, X, X, X, X, X, X, X},
|
| 86 |
+
{1, 8, 3, 1, 9, 8, 5, 10, 6, X, X, X, X, X, X, X},
|
| 87 |
+
{1, 6, 5, 2, 6, 1, X, X, X, X, X, X, X, X, X, X},
|
| 88 |
+
{1, 6, 5, 1, 2, 6, 3, 0, 8, X, X, X, X, X, X, X},
|
| 89 |
+
{9, 6, 5, 9, 0, 6, 0, 2, 6, X, X, X, X, X, X, X},
|
| 90 |
+
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, X, X, X, X},
|
| 91 |
+
{2, 3, 11, 10, 6, 5, X, X, X, X, X, X, X, X, X, X},
|
| 92 |
+
{11, 0, 8, 11, 2, 0, 10, 6, 5, X, X, X, X, X, X, X},
|
| 93 |
+
{0, 1, 9, 2, 3, 11, 5, 10, 6, X, X, X, X, X, X, X},
|
| 94 |
+
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, X, X, X, X},
|
| 95 |
+
{6, 3, 11, 6, 5, 3, 5, 1, 3, X, X, X, X, X, X, X},
|
| 96 |
+
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, X, X, X, X},
|
| 97 |
+
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, X, X, X, X},
|
| 98 |
+
{6, 5, 9, 6, 9, 11, 11, 9, 8, X, X, X, X, X, X, X},
|
| 99 |
+
{5, 10, 6, 4, 7, 8, X, X, X, X, X, X, X, X, X, X},
|
| 100 |
+
{4, 3, 0, 4, 7, 3, 6, 5, 10, X, X, X, X, X, X, X},
|
| 101 |
+
{1, 9, 0, 5, 10, 6, 8, 4, 7, X, X, X, X, X, X, X},
|
| 102 |
+
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, X, X, X, X},
|
| 103 |
+
{6, 1, 2, 6, 5, 1, 4, 7, 8, X, X, X, X, X, X, X},
|
| 104 |
+
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, X, X, X, X},
|
| 105 |
+
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, X, X, X, X},
|
| 106 |
+
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, X},
|
| 107 |
+
{3, 11, 2, 7, 8, 4, 10, 6, 5, X, X, X, X, X, X, X},
|
| 108 |
+
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, X, X, X, X},
|
| 109 |
+
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, X, X, X, X},
|
| 110 |
+
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, X},
|
| 111 |
+
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, X, X, X, X},
|
| 112 |
+
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, X},
|
| 113 |
+
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, X},
|
| 114 |
+
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, X, X, X, X},
|
| 115 |
+
{10, 4, 9, 6, 4, 10, X, X, X, X, X, X, X, X, X, X},
|
| 116 |
+
{4, 10, 6, 4, 9, 10, 0, 8, 3, X, X, X, X, X, X, X},
|
| 117 |
+
{10, 0, 1, 10, 6, 0, 6, 4, 0, X, X, X, X, X, X, X},
|
| 118 |
+
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, X, X, X, X},
|
| 119 |
+
{1, 4, 9, 1, 2, 4, 2, 6, 4, X, X, X, X, X, X, X},
|
| 120 |
+
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, X, X, X, X},
|
| 121 |
+
{0, 2, 4, 4, 2, 6, X, X, X, X, X, X, X, X, X, X},
|
| 122 |
+
{8, 3, 2, 8, 2, 4, 4, 2, 6, X, X, X, X, X, X, X},
|
| 123 |
+
{10, 4, 9, 10, 6, 4, 11, 2, 3, X, X, X, X, X, X, X},
|
| 124 |
+
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, X, X, X, X},
|
| 125 |
+
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, X, X, X, X},
|
| 126 |
+
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, X},
|
| 127 |
+
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, X, X, X, X},
|
| 128 |
+
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, X},
|
| 129 |
+
{3, 11, 6, 3, 6, 0, 0, 6, 4, X, X, X, X, X, X, X},
|
| 130 |
+
{6, 4, 8, 11, 6, 8, X, X, X, X, X, X, X, X, X, X},
|
| 131 |
+
{7, 10, 6, 7, 8, 10, 8, 9, 10, X, X, X, X, X, X, X},
|
| 132 |
+
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, X, X, X, X},
|
| 133 |
+
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, X, X, X, X},
|
| 134 |
+
{10, 6, 7, 10, 7, 1, 1, 7, 3, X, X, X, X, X, X, X},
|
| 135 |
+
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, X, X, X, X},
|
| 136 |
+
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, X},
|
| 137 |
+
{7, 8, 0, 7, 0, 6, 6, 0, 2, X, X, X, X, X, X, X},
|
| 138 |
+
{7, 3, 2, 6, 7, 2, X, X, X, X, X, X, X, X, X, X},
|
| 139 |
+
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, X, X, X, X},
|
| 140 |
+
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, X},
|
| 141 |
+
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, X},
|
| 142 |
+
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, X, X, X, X},
|
| 143 |
+
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, X},
|
| 144 |
+
{0, 9, 1, 11, 6, 7, X, X, X, X, X, X, X, X, X, X},
|
| 145 |
+
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, X, X, X, X},
|
| 146 |
+
{7, 11, 6, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 147 |
+
{7, 6, 11, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 148 |
+
{3, 0, 8, 11, 7, 6, X, X, X, X, X, X, X, X, X, X},
|
| 149 |
+
{0, 1, 9, 11, 7, 6, X, X, X, X, X, X, X, X, X, X},
|
| 150 |
+
{8, 1, 9, 8, 3, 1, 11, 7, 6, X, X, X, X, X, X, X},
|
| 151 |
+
{10, 1, 2, 6, 11, 7, X, X, X, X, X, X, X, X, X, X},
|
| 152 |
+
{1, 2, 10, 3, 0, 8, 6, 11, 7, X, X, X, X, X, X, X},
|
| 153 |
+
{2, 9, 0, 2, 10, 9, 6, 11, 7, X, X, X, X, X, X, X},
|
| 154 |
+
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, X, X, X, X},
|
| 155 |
+
{7, 2, 3, 6, 2, 7, X, X, X, X, X, X, X, X, X, X},
|
| 156 |
+
{7, 0, 8, 7, 6, 0, 6, 2, 0, X, X, X, X, X, X, X},
|
| 157 |
+
{2, 7, 6, 2, 3, 7, 0, 1, 9, X, X, X, X, X, X, X},
|
| 158 |
+
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, X, X, X, X},
|
| 159 |
+
{10, 7, 6, 10, 1, 7, 1, 3, 7, X, X, X, X, X, X, X},
|
| 160 |
+
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, X, X, X, X},
|
| 161 |
+
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, X, X, X, X},
|
| 162 |
+
{7, 6, 10, 7, 10, 8, 8, 10, 9, X, X, X, X, X, X, X},
|
| 163 |
+
{6, 8, 4, 11, 8, 6, X, X, X, X, X, X, X, X, X, X},
|
| 164 |
+
{3, 6, 11, 3, 0, 6, 0, 4, 6, X, X, X, X, X, X, X},
|
| 165 |
+
{8, 6, 11, 8, 4, 6, 9, 0, 1, X, X, X, X, X, X, X},
|
| 166 |
+
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, X, X, X, X},
|
| 167 |
+
{6, 8, 4, 6, 11, 8, 2, 10, 1, X, X, X, X, X, X, X},
|
| 168 |
+
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, X, X, X, X},
|
| 169 |
+
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, X, X, X, X},
|
| 170 |
+
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, X},
|
| 171 |
+
{8, 2, 3, 8, 4, 2, 4, 6, 2, X, X, X, X, X, X, X},
|
| 172 |
+
{0, 4, 2, 4, 6, 2, X, X, X, X, X, X, X, X, X, X},
|
| 173 |
+
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, X, X, X, X},
|
| 174 |
+
{1, 9, 4, 1, 4, 2, 2, 4, 6, X, X, X, X, X, X, X},
|
| 175 |
+
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, X, X, X, X},
|
| 176 |
+
{10, 1, 0, 10, 0, 6, 6, 0, 4, X, X, X, X, X, X, X},
|
| 177 |
+
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, X},
|
| 178 |
+
{10, 9, 4, 6, 10, 4, X, X, X, X, X, X, X, X, X, X},
|
| 179 |
+
{4, 9, 5, 7, 6, 11, X, X, X, X, X, X, X, X, X, X},
|
| 180 |
+
{0, 8, 3, 4, 9, 5, 11, 7, 6, X, X, X, X, X, X, X},
|
| 181 |
+
{5, 0, 1, 5, 4, 0, 7, 6, 11, X, X, X, X, X, X, X},
|
| 182 |
+
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, X, X, X, X},
|
| 183 |
+
{9, 5, 4, 10, 1, 2, 7, 6, 11, X, X, X, X, X, X, X},
|
| 184 |
+
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, X, X, X, X},
|
| 185 |
+
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, X, X, X, X},
|
| 186 |
+
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, X},
|
| 187 |
+
{7, 2, 3, 7, 6, 2, 5, 4, 9, X, X, X, X, X, X, X},
|
| 188 |
+
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, X, X, X, X},
|
| 189 |
+
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, X, X, X, X},
|
| 190 |
+
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, X},
|
| 191 |
+
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, X, X, X, X},
|
| 192 |
+
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, X},
|
| 193 |
+
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, X},
|
| 194 |
+
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, X, X, X, X},
|
| 195 |
+
{6, 9, 5, 6, 11, 9, 11, 8, 9, X, X, X, X, X, X, X},
|
| 196 |
+
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, X, X, X, X},
|
| 197 |
+
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, X, X, X, X},
|
| 198 |
+
{6, 11, 3, 6, 3, 5, 5, 3, 1, X, X, X, X, X, X, X},
|
| 199 |
+
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, X, X, X, X},
|
| 200 |
+
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, X},
|
| 201 |
+
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, X},
|
| 202 |
+
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, X, X, X, X},
|
| 203 |
+
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, X, X, X, X},
|
| 204 |
+
{9, 5, 6, 9, 6, 0, 0, 6, 2, X, X, X, X, X, X, X},
|
| 205 |
+
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, X},
|
| 206 |
+
{1, 5, 6, 2, 1, 6, X, X, X, X, X, X, X, X, X, X},
|
| 207 |
+
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, X},
|
| 208 |
+
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, X, X, X, X},
|
| 209 |
+
{0, 3, 8, 5, 6, 10, X, X, X, X, X, X, X, X, X, X},
|
| 210 |
+
{10, 5, 6, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 211 |
+
{11, 5, 10, 7, 5, 11, X, X, X, X, X, X, X, X, X, X},
|
| 212 |
+
{11, 5, 10, 11, 7, 5, 8, 3, 0, X, X, X, X, X, X, X},
|
| 213 |
+
{5, 11, 7, 5, 10, 11, 1, 9, 0, X, X, X, X, X, X, X},
|
| 214 |
+
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, X, X, X, X},
|
| 215 |
+
{11, 1, 2, 11, 7, 1, 7, 5, 1, X, X, X, X, X, X, X},
|
| 216 |
+
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, X, X, X, X},
|
| 217 |
+
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, X, X, X, X},
|
| 218 |
+
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, X},
|
| 219 |
+
{2, 5, 10, 2, 3, 5, 3, 7, 5, X, X, X, X, X, X, X},
|
| 220 |
+
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, X, X, X, X},
|
| 221 |
+
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, X, X, X, X},
|
| 222 |
+
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, X},
|
| 223 |
+
{1, 3, 5, 3, 7, 5, X, X, X, X, X, X, X, X, X, X},
|
| 224 |
+
{0, 8, 7, 0, 7, 1, 1, 7, 5, X, X, X, X, X, X, X},
|
| 225 |
+
{9, 0, 3, 9, 3, 5, 5, 3, 7, X, X, X, X, X, X, X},
|
| 226 |
+
{9, 8, 7, 5, 9, 7, X, X, X, X, X, X, X, X, X, X},
|
| 227 |
+
{5, 8, 4, 5, 10, 8, 10, 11, 8, X, X, X, X, X, X, X},
|
| 228 |
+
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, X, X, X, X},
|
| 229 |
+
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, X, X, X, X},
|
| 230 |
+
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, X},
|
| 231 |
+
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, X, X, X, X},
|
| 232 |
+
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, X},
|
| 233 |
+
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, X},
|
| 234 |
+
{9, 4, 5, 2, 11, 3, X, X, X, X, X, X, X, X, X, X},
|
| 235 |
+
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, X, X, X, X},
|
| 236 |
+
{5, 10, 2, 5, 2, 4, 4, 2, 0, X, X, X, X, X, X, X},
|
| 237 |
+
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, X},
|
| 238 |
+
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, X, X, X, X},
|
| 239 |
+
{8, 4, 5, 8, 5, 3, 3, 5, 1, X, X, X, X, X, X, X},
|
| 240 |
+
{0, 4, 5, 1, 0, 5, X, X, X, X, X, X, X, X, X, X},
|
| 241 |
+
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, X, X, X, X},
|
| 242 |
+
{9, 4, 5, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 243 |
+
{4, 11, 7, 4, 9, 11, 9, 10, 11, X, X, X, X, X, X, X},
|
| 244 |
+
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, X, X, X, X},
|
| 245 |
+
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, X, X, X, X},
|
| 246 |
+
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, X},
|
| 247 |
+
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, X, X, X, X},
|
| 248 |
+
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, X},
|
| 249 |
+
{11, 7, 4, 11, 4, 2, 2, 4, 0, X, X, X, X, X, X, X},
|
| 250 |
+
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, X, X, X, X},
|
| 251 |
+
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, X, X, X, X},
|
| 252 |
+
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, X},
|
| 253 |
+
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, X},
|
| 254 |
+
{1, 10, 2, 8, 7, 4, X, X, X, X, X, X, X, X, X, X},
|
| 255 |
+
{4, 9, 1, 4, 1, 7, 7, 1, 3, X, X, X, X, X, X, X},
|
| 256 |
+
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, X, X, X, X},
|
| 257 |
+
{4, 0, 3, 7, 4, 3, X, X, X, X, X, X, X, X, X, X},
|
| 258 |
+
{4, 8, 7, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 259 |
+
{9, 10, 8, 10, 11, 8, X, X, X, X, X, X, X, X, X, X},
|
| 260 |
+
{3, 0, 9, 3, 9, 11, 11, 9, 10, X, X, X, X, X, X, X},
|
| 261 |
+
{0, 1, 10, 0, 10, 8, 8, 10, 11, X, X, X, X, X, X, X},
|
| 262 |
+
{3, 1, 10, 11, 3, 10, X, X, X, X, X, X, X, X, X, X},
|
| 263 |
+
{1, 2, 11, 1, 11, 9, 9, 11, 8, X, X, X, X, X, X, X},
|
| 264 |
+
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, X, X, X, X},
|
| 265 |
+
{0, 2, 11, 8, 0, 11, X, X, X, X, X, X, X, X, X, X},
|
| 266 |
+
{3, 2, 11, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 267 |
+
{2, 3, 8, 2, 8, 10, 10, 8, 9, X, X, X, X, X, X, X},
|
| 268 |
+
{9, 10, 2, 0, 9, 2, X, X, X, X, X, X, X, X, X, X},
|
| 269 |
+
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, X, X, X, X},
|
| 270 |
+
{1, 10, 2, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 271 |
+
{1, 3, 8, 9, 1, 8, X, X, X, X, X, X, X, X, X, X},
|
| 272 |
+
{0, 9, 1, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 273 |
+
{0, 3, 8, X, X, X, X, X, X, X, X, X, X, X, X, X},
|
| 274 |
+
{X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}};
|
| 275 |
+
#undef X
|
| 276 |
+
|
| 277 |
+
// Table mapping each edge to the corresponding cube vertices offsets
|
| 278 |
+
const uint _EDGE_TO_VERTICES[12][2] = {
|
| 279 |
+
{0, 1},
|
| 280 |
+
{1, 5},
|
| 281 |
+
{4, 5},
|
| 282 |
+
{0, 4},
|
| 283 |
+
{2, 3},
|
| 284 |
+
{3, 7},
|
| 285 |
+
{6, 7},
|
| 286 |
+
{2, 6},
|
| 287 |
+
{0, 2},
|
| 288 |
+
{1, 3},
|
| 289 |
+
{5, 7},
|
| 290 |
+
{4, 6},
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
// Table mapping from 0-7 to v0-v7 in cube.vertices
|
| 294 |
+
const int _INDEX_TABLE[8] = {0, 1, 5, 4, 2, 3, 7, 6};
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#pragma once
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include "utils/pytorch3d_cutils.h"
|
| 12 |
+
|
| 13 |
+
// For mesh_normal_consistency, find pairs of vertices opposite the same edge.
|
| 14 |
+
//
|
| 15 |
+
// Args:
|
| 16 |
+
// edge_num: int64 Tensor of shape (E,) giving the number of vertices
|
| 17 |
+
// corresponding to each edge.
|
| 18 |
+
//
|
| 19 |
+
// Returns:
|
| 20 |
+
// pairs: int64 Tensor of shape (N,2)
|
| 21 |
+
|
| 22 |
+
at::Tensor MeshNormalConsistencyFindVerticesCpu(const at::Tensor& edge_num);
|
| 23 |
+
|
| 24 |
+
// Exposed implementation.
|
| 25 |
+
at::Tensor MeshNormalConsistencyFindVertices(const at::Tensor& edge_num) {
|
| 26 |
+
if (edge_num.is_cuda()) {
|
| 27 |
+
AT_ERROR("This function needs a CPU tensor.");
|
| 28 |
+
}
|
| 29 |
+
return MeshNormalConsistencyFindVerticesCpu(edge_num);
|
| 30 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency_cpu.cpp
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <utility>
|
| 11 |
+
#include <vector>
|
| 12 |
+
|
| 13 |
+
at::Tensor MeshNormalConsistencyFindVerticesCpu(const at::Tensor& edge_num) {
|
| 14 |
+
// We take a LongTensor of shape (E,) giving the number of things intersecting
|
| 15 |
+
// each edge. The things are taken to be numbered in order.
|
| 16 |
+
// (In fact, the "things" are opposite vertices to edges, renumbered).
|
| 17 |
+
// We return a tensor of shape (?, 2) where for every pair of things which
|
| 18 |
+
// intersect the same edge there is a row of their numbers in the output.
|
| 19 |
+
|
| 20 |
+
// Example possible inputs and outputs (order of output is not specified):
|
| 21 |
+
// [1,0,1,1,0] => [[]]
|
| 22 |
+
// [3] => [[0,1], [0,2], [1,2]]
|
| 23 |
+
// [0,3] => [[0,1], [0,2], [1,2]]
|
| 24 |
+
// [1,3] => [[1,2], [1,3], [2,3]]
|
| 25 |
+
//[1,0,2,1,0,2] => [[1,2], [4,5]]
|
| 26 |
+
|
| 27 |
+
const auto num_edges = edge_num.size(0);
|
| 28 |
+
auto edges_a = edge_num.accessor<int64_t, 1>();
|
| 29 |
+
|
| 30 |
+
int64_t vert_idx = 0;
|
| 31 |
+
std::vector<std::pair<int64_t, int64_t>> pairs;
|
| 32 |
+
for (int64_t i_edge = 0; i_edge < num_edges; ++i_edge) {
|
| 33 |
+
int64_t e = edges_a[i_edge];
|
| 34 |
+
for (int64_t j = 0; j < e; ++j) {
|
| 35 |
+
for (int64_t i = 0; i < j; ++i) {
|
| 36 |
+
pairs.emplace_back(vert_idx + i, vert_idx + j);
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
vert_idx += e;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Convert from std::vector by copying over the items to a new empty torch
|
| 43 |
+
// tensor.
|
| 44 |
+
auto pairs_tensor = at::empty({(int64_t)pairs.size(), 2}, edge_num.options());
|
| 45 |
+
auto pairs_a = pairs_tensor.accessor<int64_t, 2>();
|
| 46 |
+
for (int64_t i_pair = 0; i_pair < pairs.size(); ++i_pair) {
|
| 47 |
+
auto accessor = pairs_a[i_pair];
|
| 48 |
+
accessor[0] = pairs[i_pair].first;
|
| 49 |
+
accessor[1] = pairs[i_pair].second;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
return pairs_tensor;
|
| 53 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 11 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 12 |
+
|
| 13 |
+
// Kernel for inputs_packed of shape (F, D), where D > 1
|
| 14 |
+
template <typename scalar_t>
|
| 15 |
+
__global__ void PackedToPaddedKernel(
|
| 16 |
+
const scalar_t* __restrict__ inputs_packed,
|
| 17 |
+
const int64_t* __restrict__ first_idxs,
|
| 18 |
+
scalar_t* __restrict__ inputs_padded,
|
| 19 |
+
const size_t batch_size,
|
| 20 |
+
const size_t max_size,
|
| 21 |
+
const size_t num_inputs,
|
| 22 |
+
const size_t D) {
|
| 23 |
+
// Batch elements split evenly across blocks (num blocks = batch_size) and
|
| 24 |
+
// values for each element split across threads in the block. Each thread adds
|
| 25 |
+
// the values of its respective input elements to the global inputs_padded
|
| 26 |
+
// tensor.
|
| 27 |
+
const size_t tid = threadIdx.x;
|
| 28 |
+
const size_t batch_idx = blockIdx.x;
|
| 29 |
+
|
| 30 |
+
const int64_t start = first_idxs[batch_idx];
|
| 31 |
+
const int64_t end =
|
| 32 |
+
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
|
| 33 |
+
const int num = end - start;
|
| 34 |
+
for (size_t f = tid; f < num; f += blockDim.x) {
|
| 35 |
+
for (size_t j = 0; j < D; ++j) {
|
| 36 |
+
inputs_padded[batch_idx * max_size * D + f * D + j] =
|
| 37 |
+
inputs_packed[(start + f) * D + j];
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Kernel for inputs of shape (F, 1)
|
| 43 |
+
template <typename scalar_t>
|
| 44 |
+
__global__ void PackedToPaddedKernelD1(
|
| 45 |
+
const scalar_t* __restrict__ inputs_packed,
|
| 46 |
+
const int64_t* __restrict__ first_idxs,
|
| 47 |
+
scalar_t* __restrict__ inputs_padded,
|
| 48 |
+
const size_t batch_size,
|
| 49 |
+
const size_t max_size,
|
| 50 |
+
const size_t num_inputs) {
|
| 51 |
+
// Batch elements split evenly across blocks (num blocks = batch_size) and
|
| 52 |
+
// values for each element split across threads in the block. Each thread adds
|
| 53 |
+
// the values of its respective input elements to the global inputs_padded
|
| 54 |
+
// tensor.
|
| 55 |
+
const size_t tid = threadIdx.x;
|
| 56 |
+
const size_t batch_idx = blockIdx.x;
|
| 57 |
+
|
| 58 |
+
const int64_t start = first_idxs[batch_idx];
|
| 59 |
+
const int64_t end =
|
| 60 |
+
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
|
| 61 |
+
const int num = end - start;
|
| 62 |
+
for (size_t f = tid; f < num; f += blockDim.x) {
|
| 63 |
+
inputs_padded[batch_idx * max_size + f] = inputs_packed[start + f];
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// Kernel for inputs_padded of shape (B, F, D), where D > 1
|
| 68 |
+
template <typename scalar_t>
|
| 69 |
+
__global__ void PaddedToPackedKernel(
|
| 70 |
+
const scalar_t* __restrict__ inputs_padded,
|
| 71 |
+
const int64_t* __restrict__ first_idxs,
|
| 72 |
+
scalar_t* __restrict__ inputs_packed,
|
| 73 |
+
const size_t batch_size,
|
| 74 |
+
const size_t max_size,
|
| 75 |
+
const size_t num_inputs,
|
| 76 |
+
const size_t D) {
|
| 77 |
+
// Batch elements split evenly across blocks (num blocks = batch_size) and
|
| 78 |
+
// values for each element split across threads in the block. Each thread adds
|
| 79 |
+
// the values of its respective input elements to the global inputs_packed
|
| 80 |
+
// tensor.
|
| 81 |
+
const size_t tid = threadIdx.x;
|
| 82 |
+
const size_t batch_idx = blockIdx.x;
|
| 83 |
+
|
| 84 |
+
const int64_t start = first_idxs[batch_idx];
|
| 85 |
+
const int64_t end =
|
| 86 |
+
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
|
| 87 |
+
const int num = end - start;
|
| 88 |
+
for (size_t f = tid; f < num; f += blockDim.x) {
|
| 89 |
+
for (size_t j = 0; j < D; ++j) {
|
| 90 |
+
inputs_packed[(start + f) * D + j] =
|
| 91 |
+
inputs_padded[batch_idx * max_size * D + f * D + j];
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
// Kernel for inputs_padded of shape (B, F, 1)
|
| 97 |
+
template <typename scalar_t>
|
| 98 |
+
__global__ void PaddedToPackedKernelD1(
|
| 99 |
+
const scalar_t* __restrict__ inputs_padded,
|
| 100 |
+
const int64_t* __restrict__ first_idxs,
|
| 101 |
+
scalar_t* __restrict__ inputs_packed,
|
| 102 |
+
const size_t batch_size,
|
| 103 |
+
const size_t max_size,
|
| 104 |
+
const size_t num_inputs) {
|
| 105 |
+
// Batch elements split evenly across blocks (num blocks = batch_size) and
|
| 106 |
+
// values for each element split across threads in the block. Each thread adds
|
| 107 |
+
// the values of its respective input elements to the global inputs_packed
|
| 108 |
+
// tensor.
|
| 109 |
+
const size_t tid = threadIdx.x;
|
| 110 |
+
const size_t batch_idx = blockIdx.x;
|
| 111 |
+
|
| 112 |
+
const int64_t start = first_idxs[batch_idx];
|
| 113 |
+
const int64_t end =
|
| 114 |
+
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
|
| 115 |
+
const int num = end - start;
|
| 116 |
+
for (size_t f = tid; f < num; f += blockDim.x) {
|
| 117 |
+
inputs_packed[start + f] = inputs_padded[batch_idx * max_size + f];
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
at::Tensor PackedToPaddedCuda(
|
| 122 |
+
const at::Tensor inputs_packed,
|
| 123 |
+
const at::Tensor first_idxs,
|
| 124 |
+
const int64_t max_size) {
|
| 125 |
+
// Check inputs are on the same device
|
| 126 |
+
at::TensorArg inputs_packed_t{inputs_packed, "inputs_packed", 1},
|
| 127 |
+
first_idxs_t{first_idxs, "first_idxs", 2};
|
| 128 |
+
at::CheckedFrom c = "PackedToPaddedCuda";
|
| 129 |
+
at::checkAllSameGPU(c, {inputs_packed_t, first_idxs_t});
|
| 130 |
+
|
| 131 |
+
// Set the device for the kernel launch based on the device of the input
|
| 132 |
+
at::cuda::CUDAGuard device_guard(inputs_packed.device());
|
| 133 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 134 |
+
|
| 135 |
+
const int64_t num_inputs = inputs_packed.size(0);
|
| 136 |
+
const int64_t batch_size = first_idxs.size(0);
|
| 137 |
+
|
| 138 |
+
TORCH_CHECK(
|
| 139 |
+
inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor");
|
| 140 |
+
const int64_t D = inputs_packed.size(1);
|
| 141 |
+
at::Tensor inputs_padded =
|
| 142 |
+
at::zeros({batch_size, max_size, D}, inputs_packed.options());
|
| 143 |
+
|
| 144 |
+
if (inputs_padded.numel() == 0) {
|
| 145 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 146 |
+
return inputs_padded;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
const int threads = 512;
|
| 150 |
+
const int blocks = batch_size;
|
| 151 |
+
if (D == 1) {
|
| 152 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 153 |
+
inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] {
|
| 154 |
+
PackedToPaddedKernelD1<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 155 |
+
inputs_packed.contiguous().data_ptr<scalar_t>(),
|
| 156 |
+
first_idxs.contiguous().data_ptr<int64_t>(),
|
| 157 |
+
inputs_padded.data_ptr<scalar_t>(),
|
| 158 |
+
batch_size,
|
| 159 |
+
max_size,
|
| 160 |
+
num_inputs);
|
| 161 |
+
}));
|
| 162 |
+
} else {
|
| 163 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 164 |
+
inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] {
|
| 165 |
+
PackedToPaddedKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 166 |
+
inputs_packed.contiguous().data_ptr<scalar_t>(),
|
| 167 |
+
first_idxs.contiguous().data_ptr<int64_t>(),
|
| 168 |
+
inputs_padded.data_ptr<scalar_t>(),
|
| 169 |
+
batch_size,
|
| 170 |
+
max_size,
|
| 171 |
+
num_inputs,
|
| 172 |
+
D);
|
| 173 |
+
}));
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 177 |
+
return inputs_padded;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
at::Tensor PaddedToPackedCuda(
|
| 181 |
+
const at::Tensor inputs_padded,
|
| 182 |
+
const at::Tensor first_idxs,
|
| 183 |
+
const int64_t num_inputs) {
|
| 184 |
+
// Check inputs are on the same device
|
| 185 |
+
at::TensorArg inputs_padded_t{inputs_padded, "inputs_padded", 1},
|
| 186 |
+
first_idxs_t{first_idxs, "first_idxs", 2};
|
| 187 |
+
at::CheckedFrom c = "PaddedToPackedCuda";
|
| 188 |
+
at::checkAllSameGPU(c, {inputs_padded_t, first_idxs_t});
|
| 189 |
+
|
| 190 |
+
// Set the device for the kernel launch based on the device of the input
|
| 191 |
+
at::cuda::CUDAGuard device_guard(inputs_padded.device());
|
| 192 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 193 |
+
|
| 194 |
+
const int64_t batch_size = inputs_padded.size(0);
|
| 195 |
+
const int64_t max_size = inputs_padded.size(1);
|
| 196 |
+
|
| 197 |
+
TORCH_CHECK(batch_size == first_idxs.size(0), "sizes mismatch");
|
| 198 |
+
TORCH_CHECK(
|
| 199 |
+
inputs_padded.dim() == 3,
|
| 200 |
+
"inputs_padded must be a 3-dimensional tensor");
|
| 201 |
+
const int64_t D = inputs_padded.size(2);
|
| 202 |
+
|
| 203 |
+
at::Tensor inputs_packed =
|
| 204 |
+
at::zeros({num_inputs, D}, inputs_padded.options());
|
| 205 |
+
|
| 206 |
+
if (inputs_packed.numel() == 0) {
|
| 207 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 208 |
+
return inputs_packed;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
const int threads = 512;
|
| 212 |
+
const int blocks = batch_size;
|
| 213 |
+
|
| 214 |
+
if (D == 1) {
|
| 215 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 216 |
+
inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] {
|
| 217 |
+
PaddedToPackedKernelD1<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 218 |
+
inputs_padded.contiguous().data_ptr<scalar_t>(),
|
| 219 |
+
first_idxs.contiguous().data_ptr<int64_t>(),
|
| 220 |
+
inputs_packed.data_ptr<scalar_t>(),
|
| 221 |
+
batch_size,
|
| 222 |
+
max_size,
|
| 223 |
+
num_inputs);
|
| 224 |
+
}));
|
| 225 |
+
} else {
|
| 226 |
+
AT_DISPATCH_FLOATING_TYPES(
|
| 227 |
+
inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] {
|
| 228 |
+
PaddedToPackedKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 229 |
+
inputs_padded.contiguous().data_ptr<scalar_t>(),
|
| 230 |
+
first_idxs.contiguous().data_ptr<int64_t>(),
|
| 231 |
+
inputs_packed.data_ptr<scalar_t>(),
|
| 232 |
+
batch_size,
|
| 233 |
+
max_size,
|
| 234 |
+
num_inputs,
|
| 235 |
+
D);
|
| 236 |
+
}));
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 240 |
+
return inputs_packed;
|
| 241 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include <torch/extension.h>
|
| 10 |
+
|
| 11 |
+
at::Tensor PackedToPaddedCpu(
|
| 12 |
+
const at::Tensor inputs_packed,
|
| 13 |
+
const at::Tensor first_idxs,
|
| 14 |
+
const int64_t max_size) {
|
| 15 |
+
const int64_t num_inputs = inputs_packed.size(0);
|
| 16 |
+
const int64_t batch_size = first_idxs.size(0);
|
| 17 |
+
|
| 18 |
+
AT_ASSERTM(
|
| 19 |
+
inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor");
|
| 20 |
+
const int64_t D = inputs_packed.size(1);
|
| 21 |
+
|
| 22 |
+
torch::Tensor inputs_padded =
|
| 23 |
+
torch::zeros({batch_size, max_size, D}, inputs_packed.options());
|
| 24 |
+
|
| 25 |
+
auto inputs_packed_a = inputs_packed.accessor<float, 2>();
|
| 26 |
+
auto first_idxs_a = first_idxs.accessor<int64_t, 1>();
|
| 27 |
+
auto inputs_padded_a = inputs_padded.accessor<float, 3>();
|
| 28 |
+
|
| 29 |
+
for (int b = 0; b < batch_size; ++b) {
|
| 30 |
+
const int64_t start = first_idxs_a[b];
|
| 31 |
+
const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs;
|
| 32 |
+
const int64_t num = end - start;
|
| 33 |
+
for (int i = 0; i < num; ++i) {
|
| 34 |
+
for (int j = 0; j < D; ++j) {
|
| 35 |
+
inputs_padded_a[b][i][j] = inputs_packed_a[start + i][j];
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
return inputs_padded;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
at::Tensor PaddedToPackedCpu(
|
| 43 |
+
const at::Tensor inputs_padded,
|
| 44 |
+
const at::Tensor first_idxs,
|
| 45 |
+
const int64_t num_inputs) {
|
| 46 |
+
const int64_t batch_size = inputs_padded.size(0);
|
| 47 |
+
|
| 48 |
+
AT_ASSERTM(
|
| 49 |
+
inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor");
|
| 50 |
+
const int64_t D = inputs_padded.size(2);
|
| 51 |
+
|
| 52 |
+
torch::Tensor inputs_packed =
|
| 53 |
+
torch::zeros({num_inputs, D}, inputs_padded.options());
|
| 54 |
+
|
| 55 |
+
auto inputs_padded_a = inputs_padded.accessor<float, 3>();
|
| 56 |
+
auto first_idxs_a = first_idxs.accessor<int64_t, 1>();
|
| 57 |
+
auto inputs_packed_a = inputs_packed.accessor<float, 2>();
|
| 58 |
+
|
| 59 |
+
for (int b = 0; b < batch_size; ++b) {
|
| 60 |
+
const int64_t start = first_idxs_a[b];
|
| 61 |
+
const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs;
|
| 62 |
+
const int64_t num = end - start;
|
| 63 |
+
for (int i = 0; i < num; ++i) {
|
| 64 |
+
for (int j = 0; j < D; ++j) {
|
| 65 |
+
inputs_packed_a[start + i][j] = inputs_padded_a[b][i][j];
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
return inputs_packed;
|
| 70 |
+
}
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/constants.h
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#ifndef PULSAR_NATIVE_CONSTANTS_H_
|
| 10 |
+
#define PULSAR_NATIVE_CONSTANTS_H_
|
| 11 |
+
|
| 12 |
+
#define EPS 1E-6
|
| 13 |
+
#define FEPS 1E-6f
|
| 14 |
+
#define MAX_FLOAT 3.4E38f
|
| 15 |
+
#define MAX_INT 2147483647
|
| 16 |
+
#define MAX_UINT 4294967295u
|
| 17 |
+
#define MAX_USHORT 65535u
|
| 18 |
+
|
| 19 |
+
#endif
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/README.md
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CUDA device compilation units
|
| 2 |
+
|
| 3 |
+
This folder contains `.cu` files to create compilation units
|
| 4 |
+
for device-specific functions. See `../include/README.md` for
|
| 5 |
+
more information.
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/commands.h
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#ifndef PULSAR_NATIVE_CUDA_COMMANDS_H_
|
| 10 |
+
#define PULSAR_NATIVE_CUDA_COMMANDS_H_
|
| 11 |
+
|
| 12 |
+
// Definitions for GPU commands.
|
| 13 |
+
#include <cooperative_groups.h>
|
| 14 |
+
#include <cub/cub.cuh>
|
| 15 |
+
namespace cg = cooperative_groups;
|
| 16 |
+
|
| 17 |
+
#ifdef __DRIVER_TYPES_H__
|
| 18 |
+
#ifndef DEVICE_RESET
|
| 19 |
+
#define DEVICE_RESET cudaDeviceReset();
|
| 20 |
+
#endif
|
| 21 |
+
#else
|
| 22 |
+
#ifndef DEVICE_RESET
|
| 23 |
+
#define DEVICE_RESET
|
| 24 |
+
#endif
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
#define HANDLECUDA(CMD) CMD
|
| 28 |
+
// handleCudaError((CMD), __FILE__, __LINE__)
|
| 29 |
+
inline void
|
| 30 |
+
handleCudaError(const cudaError_t err, const char* file, const int line) {
|
| 31 |
+
if (err != cudaSuccess) {
|
| 32 |
+
#ifndef __NVCC__
|
| 33 |
+
fprintf(
|
| 34 |
+
stderr,
|
| 35 |
+
"%s(%i) : getLastCudaError() CUDA error :"
|
| 36 |
+
" (%d) %s.\n",
|
| 37 |
+
file,
|
| 38 |
+
line,
|
| 39 |
+
static_cast<int>(err),
|
| 40 |
+
cudaGetErrorString(err));
|
| 41 |
+
DEVICE_RESET
|
| 42 |
+
exit(1);
|
| 43 |
+
#endif
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
inline void
|
| 47 |
+
getLastCudaError(const char* errorMessage, const char* file, const int line) {
|
| 48 |
+
cudaError_t err = cudaGetLastError();
|
| 49 |
+
if (cudaSuccess != err) {
|
| 50 |
+
fprintf(stderr, "Error: %s.", errorMessage);
|
| 51 |
+
handleCudaError(err, file, line);
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
#define ALIGN(VAL) __align__(VAL)
|
| 56 |
+
#define SYNC() HANDLECUDE(cudaDeviceSynchronize())
|
| 57 |
+
#define THREADFENCE_B() __threadfence_block()
|
| 58 |
+
#define SHFL_SYNC(a, b, c) __shfl_sync((a), (b), (c))
|
| 59 |
+
#define SHARED __shared__
|
| 60 |
+
#define ACTIVEMASK() __activemask()
|
| 61 |
+
#define BALLOT(mask, val) __ballot_sync((mask), val)
|
| 62 |
+
/**
|
| 63 |
+
* Find the cumulative sum within a warp up to the current
|
| 64 |
+
* thread lane, with each mask thread contributing base.
|
| 65 |
+
*/
|
| 66 |
+
template <typename T>
|
| 67 |
+
DEVICE T
|
| 68 |
+
WARP_CUMSUM(const cg::coalesced_group& group, const uint& mask, const T& base) {
|
| 69 |
+
T ret = base;
|
| 70 |
+
T shfl_val;
|
| 71 |
+
shfl_val = __shfl_down_sync(mask, ret, 1u); // Deactivate the rightmost lane.
|
| 72 |
+
ret += (group.thread_rank() < 31) * shfl_val;
|
| 73 |
+
shfl_val = __shfl_down_sync(mask, ret, 2u);
|
| 74 |
+
ret += (group.thread_rank() < 30) * shfl_val;
|
| 75 |
+
shfl_val = __shfl_down_sync(mask, ret, 4u); // ...4
|
| 76 |
+
ret += (group.thread_rank() < 28) * shfl_val;
|
| 77 |
+
shfl_val = __shfl_down_sync(mask, ret, 8u); // ...8
|
| 78 |
+
ret += (group.thread_rank() < 24) * shfl_val;
|
| 79 |
+
shfl_val = __shfl_down_sync(mask, ret, 16u); // ...16
|
| 80 |
+
ret += (group.thread_rank() < 16) * shfl_val;
|
| 81 |
+
return ret;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <typename T>
|
| 85 |
+
DEVICE T
|
| 86 |
+
WARP_MAX(const cg::coalesced_group& group, const uint& mask, const T& base) {
|
| 87 |
+
T ret = base;
|
| 88 |
+
ret = max(ret, __shfl_down_sync(mask, ret, 16u));
|
| 89 |
+
ret = max(ret, __shfl_down_sync(mask, ret, 8u));
|
| 90 |
+
ret = max(ret, __shfl_down_sync(mask, ret, 4u));
|
| 91 |
+
ret = max(ret, __shfl_down_sync(mask, ret, 2u));
|
| 92 |
+
ret = max(ret, __shfl_down_sync(mask, ret, 1u));
|
| 93 |
+
return ret;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template <typename T>
|
| 97 |
+
DEVICE T
|
| 98 |
+
WARP_SUM(const cg::coalesced_group& group, const uint& mask, const T& base) {
|
| 99 |
+
T ret = base;
|
| 100 |
+
ret = ret + __shfl_down_sync(mask, ret, 16u);
|
| 101 |
+
ret = ret + __shfl_down_sync(mask, ret, 8u);
|
| 102 |
+
ret = ret + __shfl_down_sync(mask, ret, 4u);
|
| 103 |
+
ret = ret + __shfl_down_sync(mask, ret, 2u);
|
| 104 |
+
ret = ret + __shfl_down_sync(mask, ret, 1u);
|
| 105 |
+
return ret;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
| 109 |
+
const cg::coalesced_group& group,
|
| 110 |
+
const uint& mask,
|
| 111 |
+
const float3& base) {
|
| 112 |
+
float3 ret = base;
|
| 113 |
+
ret.x = WARP_SUM(group, mask, base.x);
|
| 114 |
+
ret.y = WARP_SUM(group, mask, base.y);
|
| 115 |
+
ret.z = WARP_SUM(group, mask, base.z);
|
| 116 |
+
return ret;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
// Floating point.
|
| 120 |
+
// #define FMUL(a, b) __fmul_rn((a), (b))
|
| 121 |
+
#define FMUL(a, b) ((a) * (b))
|
| 122 |
+
#define FDIV(a, b) __fdiv_rn((a), (b))
|
| 123 |
+
// #define FSUB(a, b) __fsub_rn((a), (b))
|
| 124 |
+
#define FSUB(a, b) ((a) - (b))
|
| 125 |
+
#define FADD(a, b) __fadd_rn((a), (b))
|
| 126 |
+
#define FSQRT(a) __fsqrt_rn(a)
|
| 127 |
+
#define FEXP(a) fasterexp(a)
|
| 128 |
+
#define FLN(a) fasterlog(a)
|
| 129 |
+
#define FPOW(a, b) __powf((a), (b))
|
| 130 |
+
#define FMAX(a, b) fmax((a), (b))
|
| 131 |
+
#define FMIN(a, b) fmin((a), (b))
|
| 132 |
+
#define FCEIL(a) ceilf(a)
|
| 133 |
+
#define FFLOOR(a) floorf(a)
|
| 134 |
+
#define FROUND(x) nearbyintf(x)
|
| 135 |
+
#define FSATURATE(x) __saturatef(x)
|
| 136 |
+
#define FABS(a) abs(a)
|
| 137 |
+
#define IASF(a, loc) (loc) = __int_as_float(a)
|
| 138 |
+
#define FASI(a, loc) (loc) = __float_as_int(a)
|
| 139 |
+
#define FABSLEQAS(a, b, c) \
|
| 140 |
+
((a) <= (b) ? FSUB((b), (a)) <= (c) : FSUB((a), (b)) < (c))
|
| 141 |
+
/** Calculates x*y+z. */
|
| 142 |
+
#define FMA(x, y, z) __fmaf_rn((x), (y), (z))
|
| 143 |
+
#define I2F(a) __int2float_rn(a)
|
| 144 |
+
#define FRCP(x) __frcp_rn(x)
|
| 145 |
+
__device__ static float atomicMax(float* address, float val) {
|
| 146 |
+
int* address_as_i = (int*)address;
|
| 147 |
+
int old = *address_as_i, assumed;
|
| 148 |
+
do {
|
| 149 |
+
assumed = old;
|
| 150 |
+
old = ::atomicCAS(
|
| 151 |
+
address_as_i,
|
| 152 |
+
assumed,
|
| 153 |
+
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
|
| 154 |
+
} while (assumed != old);
|
| 155 |
+
return __int_as_float(old);
|
| 156 |
+
}
|
| 157 |
+
__device__ static float atomicMin(float* address, float val) {
|
| 158 |
+
int* address_as_i = (int*)address;
|
| 159 |
+
int old = *address_as_i, assumed;
|
| 160 |
+
do {
|
| 161 |
+
assumed = old;
|
| 162 |
+
old = ::atomicCAS(
|
| 163 |
+
address_as_i,
|
| 164 |
+
assumed,
|
| 165 |
+
__float_as_int(::fminf(val, __int_as_float(assumed))));
|
| 166 |
+
} while (assumed != old);
|
| 167 |
+
return __int_as_float(old);
|
| 168 |
+
}
|
| 169 |
+
#define DMAX(a, b) FMAX(a, b)
|
| 170 |
+
#define DMIN(a, b) FMIN(a, b)
|
| 171 |
+
#define DSQRT(a) sqrt(a)
|
| 172 |
+
#define DSATURATE(a) DMIN(1., DMAX(0., (a)))
|
| 173 |
+
// half
|
| 174 |
+
#define HADD(a, b) __hadd((a), (b))
|
| 175 |
+
#define HSUB2(a, b) __hsub2((a), (b))
|
| 176 |
+
#define HMUL2(a, b) __hmul2((a), (b))
|
| 177 |
+
#define HSQRT(a) hsqrt(a)
|
| 178 |
+
|
| 179 |
+
// uint.
|
| 180 |
+
#define CLZ(VAL) __clz(VAL)
|
| 181 |
+
#define POPC(a) __popc(a)
|
| 182 |
+
//
|
| 183 |
+
//
|
| 184 |
+
//
|
| 185 |
+
//
|
| 186 |
+
//
|
| 187 |
+
//
|
| 188 |
+
//
|
| 189 |
+
//
|
| 190 |
+
//
|
| 191 |
+
#define ATOMICADD(PTR, VAL) atomicAdd((PTR), (VAL))
|
| 192 |
+
#define ATOMICADD_F3(PTR, VAL) \
|
| 193 |
+
ATOMICADD(&((PTR)->x), VAL.x); \
|
| 194 |
+
ATOMICADD(&((PTR)->y), VAL.y); \
|
| 195 |
+
ATOMICADD(&((PTR)->z), VAL.z);
|
| 196 |
+
#if (CUDART_VERSION >= 10000) && (__CUDA_ARCH__ >= 600)
|
| 197 |
+
#define ATOMICADD_B(PTR, VAL) atomicAdd_block((PTR), (VAL))
|
| 198 |
+
#else
|
| 199 |
+
#define ATOMICADD_B(PTR, VAL) ATOMICADD(PTR, VAL)
|
| 200 |
+
#endif
|
| 201 |
+
//
|
| 202 |
+
//
|
| 203 |
+
//
|
| 204 |
+
//
|
| 205 |
+
// int.
|
| 206 |
+
#define IMIN(a, b) min((a), (b))
|
| 207 |
+
#define IMAX(a, b) max((a), (b))
|
| 208 |
+
#define IABS(a) abs(a)
|
| 209 |
+
|
| 210 |
+
// Checks.
|
| 211 |
+
// like TORCH_CHECK_ARG in PyTorch > 1.10
|
| 212 |
+
#define ARGCHECK(cond, argN, ...) \
|
| 213 |
+
TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__)
|
| 214 |
+
|
| 215 |
+
// Math.
|
| 216 |
+
#define NORM3DF(x, y, z) norm3df(x, y, z)
|
| 217 |
+
#define RNORM3DF(x, y, z) rnorm3df(x, y, z)
|
| 218 |
+
|
| 219 |
+
// High level.
|
| 220 |
+
#define GET_SORT_WS_SIZE(RES_PTR, KEY_TYPE, VAL_TYPE, NUM_OBJECTS) \
|
| 221 |
+
cub::DeviceRadixSort::SortPairsDescending( \
|
| 222 |
+
(void*)NULL, \
|
| 223 |
+
*(RES_PTR), \
|
| 224 |
+
reinterpret_cast<KEY_TYPE*>(NULL), \
|
| 225 |
+
reinterpret_cast<KEY_TYPE*>(NULL), \
|
| 226 |
+
reinterpret_cast<VAL_TYPE*>(NULL), \
|
| 227 |
+
reinterpret_cast<VAL_TYPE*>(NULL), \
|
| 228 |
+
(NUM_OBJECTS));
|
| 229 |
+
#define GET_REDUCE_WS_SIZE(RES_PTR, TYPE, REDUCE_OP, NUM_OBJECTS) \
|
| 230 |
+
{ \
|
| 231 |
+
TYPE init = TYPE(); \
|
| 232 |
+
cub::DeviceReduce::Reduce( \
|
| 233 |
+
(void*)NULL, \
|
| 234 |
+
*(RES_PTR), \
|
| 235 |
+
(TYPE*)NULL, \
|
| 236 |
+
(TYPE*)NULL, \
|
| 237 |
+
(NUM_OBJECTS), \
|
| 238 |
+
(REDUCE_OP), \
|
| 239 |
+
init); \
|
| 240 |
+
}
|
| 241 |
+
#define GET_SELECT_WS_SIZE( \
|
| 242 |
+
RES_PTR, TYPE_SELECTOR, TYPE_SELECTION, NUM_OBJECTS) \
|
| 243 |
+
{ \
|
| 244 |
+
cub::DeviceSelect::Flagged( \
|
| 245 |
+
(void*)NULL, \
|
| 246 |
+
*(RES_PTR), \
|
| 247 |
+
(TYPE_SELECTION*)NULL, \
|
| 248 |
+
(TYPE_SELECTOR*)NULL, \
|
| 249 |
+
(TYPE_SELECTION*)NULL, \
|
| 250 |
+
(int*)NULL, \
|
| 251 |
+
(NUM_OBJECTS)); \
|
| 252 |
+
}
|
| 253 |
+
#define GET_SUM_WS_SIZE(RES_PTR, TYPE_SUM, NUM_OBJECTS) \
|
| 254 |
+
{ \
|
| 255 |
+
cub::DeviceReduce::Sum( \
|
| 256 |
+
(void*)NULL, \
|
| 257 |
+
*(RES_PTR), \
|
| 258 |
+
(TYPE_SUM*)NULL, \
|
| 259 |
+
(TYPE_SUM*)NULL, \
|
| 260 |
+
NUM_OBJECTS); \
|
| 261 |
+
}
|
| 262 |
+
#define GET_MM_WS_SIZE(RES_PTR, TYPE, NUM_OBJECTS) \
|
| 263 |
+
{ \
|
| 264 |
+
TYPE init = TYPE(); \
|
| 265 |
+
cub::DeviceReduce::Max( \
|
| 266 |
+
(void*)NULL, *(RES_PTR), (TYPE*)NULL, (TYPE*)NULL, (NUM_OBJECTS)); \
|
| 267 |
+
}
|
| 268 |
+
#define SORT_DESCENDING( \
|
| 269 |
+
TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) \
|
| 270 |
+
void* TMPN1 = NULL; \
|
| 271 |
+
size_t TMPN1##_bytes = 0; \
|
| 272 |
+
cub::DeviceRadixSort::SortPairsDescending( \
|
| 273 |
+
TMPN1, \
|
| 274 |
+
TMPN1##_bytes, \
|
| 275 |
+
(SORT_PTR), \
|
| 276 |
+
(SORTED_PTR), \
|
| 277 |
+
(VAL_PTR), \
|
| 278 |
+
(VAL_SORTED_PTR), \
|
| 279 |
+
(NUM_OBJECTS)); \
|
| 280 |
+
HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \
|
| 281 |
+
cub::DeviceRadixSort::SortPairsDescending( \
|
| 282 |
+
TMPN1, \
|
| 283 |
+
TMPN1##_bytes, \
|
| 284 |
+
(SORT_PTR), \
|
| 285 |
+
(SORTED_PTR), \
|
| 286 |
+
(VAL_PTR), \
|
| 287 |
+
(VAL_SORTED_PTR), \
|
| 288 |
+
(NUM_OBJECTS)); \
|
| 289 |
+
HANDLECUDA(cudaFree(TMPN1));
|
| 290 |
+
#define SORT_DESCENDING_WS( \
|
| 291 |
+
TMPN1, \
|
| 292 |
+
SORT_PTR, \
|
| 293 |
+
SORTED_PTR, \
|
| 294 |
+
VAL_PTR, \
|
| 295 |
+
VAL_SORTED_PTR, \
|
| 296 |
+
NUM_OBJECTS, \
|
| 297 |
+
WORKSPACE_PTR, \
|
| 298 |
+
WORKSPACE_BYTES) \
|
| 299 |
+
cub::DeviceRadixSort::SortPairsDescending( \
|
| 300 |
+
(WORKSPACE_PTR), \
|
| 301 |
+
(WORKSPACE_BYTES), \
|
| 302 |
+
(SORT_PTR), \
|
| 303 |
+
(SORTED_PTR), \
|
| 304 |
+
(VAL_PTR), \
|
| 305 |
+
(VAL_SORTED_PTR), \
|
| 306 |
+
(NUM_OBJECTS));
|
| 307 |
+
#define SORT_ASCENDING_WS( \
|
| 308 |
+
SORT_PTR, \
|
| 309 |
+
SORTED_PTR, \
|
| 310 |
+
VAL_PTR, \
|
| 311 |
+
VAL_SORTED_PTR, \
|
| 312 |
+
NUM_OBJECTS, \
|
| 313 |
+
WORKSPACE_PTR, \
|
| 314 |
+
WORKSPACE_BYTES, \
|
| 315 |
+
STREAM) \
|
| 316 |
+
cub::DeviceRadixSort::SortPairs( \
|
| 317 |
+
(WORKSPACE_PTR), \
|
| 318 |
+
(WORKSPACE_BYTES), \
|
| 319 |
+
(SORT_PTR), \
|
| 320 |
+
(SORTED_PTR), \
|
| 321 |
+
(VAL_PTR), \
|
| 322 |
+
(VAL_SORTED_PTR), \
|
| 323 |
+
(NUM_OBJECTS), \
|
| 324 |
+
0, \
|
| 325 |
+
sizeof(*(SORT_PTR)) * 8, \
|
| 326 |
+
(STREAM));
|
| 327 |
+
#define SUM_WS( \
|
| 328 |
+
SUM_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \
|
| 329 |
+
cub::DeviceReduce::Sum( \
|
| 330 |
+
(WORKSPACE_PTR), \
|
| 331 |
+
(WORKSPACE_BYTES), \
|
| 332 |
+
(SUM_PTR), \
|
| 333 |
+
(OUT_PTR), \
|
| 334 |
+
(NUM_OBJECTS), \
|
| 335 |
+
(STREAM));
|
| 336 |
+
#define MIN_WS( \
|
| 337 |
+
MIN_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \
|
| 338 |
+
cub::DeviceReduce::Min( \
|
| 339 |
+
(WORKSPACE_PTR), \
|
| 340 |
+
(WORKSPACE_BYTES), \
|
| 341 |
+
(MIN_PTR), \
|
| 342 |
+
(OUT_PTR), \
|
| 343 |
+
(NUM_OBJECTS), \
|
| 344 |
+
(STREAM));
|
| 345 |
+
#define MAX_WS( \
|
| 346 |
+
MAX_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \
|
| 347 |
+
cub::DeviceReduce::Min( \
|
| 348 |
+
(WORKSPACE_PTR), \
|
| 349 |
+
(WORKSPACE_BYTES), \
|
| 350 |
+
(MAX_PTR), \
|
| 351 |
+
(OUT_PTR), \
|
| 352 |
+
(NUM_OBJECTS), \
|
| 353 |
+
(STREAM));
|
| 354 |
+
//
|
| 355 |
+
//
|
| 356 |
+
//
|
| 357 |
+
// TODO: rewrite using nested contexts instead of temporary names.
|
| 358 |
+
#define REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) \
|
| 359 |
+
cub::DeviceReduce::Reduce( \
|
| 360 |
+
TMPN1, \
|
| 361 |
+
TMPN1##_bytes, \
|
| 362 |
+
(REDUCE_PTR), \
|
| 363 |
+
(RESULT_PTR), \
|
| 364 |
+
(NUM_ITEMS), \
|
| 365 |
+
(REDUCE_OP), \
|
| 366 |
+
(REDUCE_INIT)); \
|
| 367 |
+
HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \
|
| 368 |
+
cub::DeviceReduce::Reduce( \
|
| 369 |
+
TMPN1, \
|
| 370 |
+
TMPN1##_bytes, \
|
| 371 |
+
(REDUCE_PTR), \
|
| 372 |
+
(RESULT_PTR), \
|
| 373 |
+
(NUM_ITEMS), \
|
| 374 |
+
(REDUCE_OP), \
|
| 375 |
+
(REDUCE_INIT)); \
|
| 376 |
+
HANDLECUDA(cudaFree(TMPN1));
|
| 377 |
+
#define REDUCE_WS( \
|
| 378 |
+
REDUCE_PTR, \
|
| 379 |
+
RESULT_PTR, \
|
| 380 |
+
NUM_ITEMS, \
|
| 381 |
+
REDUCE_OP, \
|
| 382 |
+
REDUCE_INIT, \
|
| 383 |
+
WORKSPACE_PTR, \
|
| 384 |
+
WORSPACE_BYTES, \
|
| 385 |
+
STREAM) \
|
| 386 |
+
cub::DeviceReduce::Reduce( \
|
| 387 |
+
(WORKSPACE_PTR), \
|
| 388 |
+
(WORSPACE_BYTES), \
|
| 389 |
+
(REDUCE_PTR), \
|
| 390 |
+
(RESULT_PTR), \
|
| 391 |
+
(NUM_ITEMS), \
|
| 392 |
+
(REDUCE_OP), \
|
| 393 |
+
(REDUCE_INIT), \
|
| 394 |
+
(STREAM));
|
| 395 |
+
#define SELECT_FLAGS_WS( \
|
| 396 |
+
FLAGS_PTR, \
|
| 397 |
+
ITEM_PTR, \
|
| 398 |
+
OUT_PTR, \
|
| 399 |
+
NUM_SELECTED_PTR, \
|
| 400 |
+
NUM_ITEMS, \
|
| 401 |
+
WORKSPACE_PTR, \
|
| 402 |
+
WORSPACE_BYTES, \
|
| 403 |
+
STREAM) \
|
| 404 |
+
cub::DeviceSelect::Flagged( \
|
| 405 |
+
(WORKSPACE_PTR), \
|
| 406 |
+
(WORSPACE_BYTES), \
|
| 407 |
+
(ITEM_PTR), \
|
| 408 |
+
(FLAGS_PTR), \
|
| 409 |
+
(OUT_PTR), \
|
| 410 |
+
(NUM_SELECTED_PTR), \
|
| 411 |
+
(NUM_ITEMS), \
|
| 412 |
+
stream = (STREAM));
|
| 413 |
+
|
| 414 |
+
#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \
|
| 415 |
+
HANDLECUDA(cudaMemcpy( \
|
| 416 |
+
(PTR_D), (PTR_H), sizeof(TYPE) * (SIZE), cudaMemcpyHostToDevice))
|
| 417 |
+
#define COPY_DEV_HOST(PTR_H, PTR_D, TYPE, SIZE) \
|
| 418 |
+
HANDLECUDA(cudaMemcpy( \
|
| 419 |
+
(PTR_H), (PTR_D), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToHost))
|
| 420 |
+
#define COPY_DEV_DEV(PTR_T, PTR_S, TYPE, SIZE) \
|
| 421 |
+
HANDLECUDA(cudaMemcpy( \
|
| 422 |
+
(PTR_T), (PTR_S), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToDevice))
|
| 423 |
+
//
|
| 424 |
+
// We *must* use cudaMallocManaged for pointers on device that should
|
| 425 |
+
// interact with pytorch. However, this comes at a significant speed penalty.
|
| 426 |
+
// We're using plain CUDA pointers for the rendering operations and
|
| 427 |
+
// explicitly copy results to managed pointers wrapped for pytorch (see
|
| 428 |
+
// pytorch/util.h).
|
| 429 |
+
#define MALLOC(VAR, TYPE, SIZE) cudaMalloc(&(VAR), sizeof(TYPE) * (SIZE))
|
| 430 |
+
#define FREE(PTR) HANDLECUDA(cudaFree(PTR))
|
| 431 |
+
#define MEMSET(VAR, VAL, TYPE, SIZE, STREAM) \
|
| 432 |
+
HANDLECUDA(cudaMemsetAsync((VAR), (VAL), sizeof(TYPE) * (SIZE), (STREAM)))
|
| 433 |
+
|
| 434 |
+
#define LAUNCH_MAX_PARALLEL_1D(FUNC, N, STREAM, ...) \
|
| 435 |
+
{ \
|
| 436 |
+
int64_t max_threads = \
|
| 437 |
+
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \
|
| 438 |
+
uint num_threads = min((N), max_threads); \
|
| 439 |
+
uint num_blocks = iDivCeil((N), num_threads); \
|
| 440 |
+
FUNC<<<num_blocks, num_threads, 0, (STREAM)>>>(__VA_ARGS__); \
|
| 441 |
+
}
|
| 442 |
+
#define LAUNCH_PARALLEL_1D(FUNC, N, TN, STREAM, ...) \
|
| 443 |
+
{ \
|
| 444 |
+
uint num_threads = min(static_cast<int>(N), static_cast<int>(TN)); \
|
| 445 |
+
uint num_blocks = iDivCeil((N), num_threads); \
|
| 446 |
+
FUNC<<<num_blocks, num_threads, 0, (STREAM)>>>(__VA_ARGS__); \
|
| 447 |
+
}
|
| 448 |
+
#define LAUNCH_MAX_PARALLEL_2D(FUNC, NX, NY, STREAM, ...) \
|
| 449 |
+
{ \
|
| 450 |
+
int64_t max_threads = \
|
| 451 |
+
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \
|
| 452 |
+
int64_t max_threads_sqrt = static_cast<int64_t>(sqrt(max_threads)); \
|
| 453 |
+
dim3 num_threads, num_blocks; \
|
| 454 |
+
num_threads.x = min((NX), max_threads_sqrt); \
|
| 455 |
+
num_blocks.x = iDivCeil((NX), num_threads.x); \
|
| 456 |
+
num_threads.y = min((NY), max_threads_sqrt); \
|
| 457 |
+
num_blocks.y = iDivCeil((NY), num_threads.y); \
|
| 458 |
+
num_threads.z = 1; \
|
| 459 |
+
num_blocks.z = 1; \
|
| 460 |
+
FUNC<<<num_blocks, num_threads, 0, (STREAM)>>>(__VA_ARGS__); \
|
| 461 |
+
}
|
| 462 |
+
#define LAUNCH_PARALLEL_2D(FUNC, NX, NY, TX, TY, STREAM, ...) \
|
| 463 |
+
{ \
|
| 464 |
+
dim3 num_threads, num_blocks; \
|
| 465 |
+
num_threads.x = min((NX), (TX)); \
|
| 466 |
+
num_blocks.x = iDivCeil((NX), num_threads.x); \
|
| 467 |
+
num_threads.y = min((NY), (TY)); \
|
| 468 |
+
num_blocks.y = iDivCeil((NY), num_threads.y); \
|
| 469 |
+
num_threads.z = 1; \
|
| 470 |
+
num_blocks.z = 1; \
|
| 471 |
+
FUNC<<<num_blocks, num_threads, 0, (STREAM)>>>(__VA_ARGS__); \
|
| 472 |
+
}
|
| 473 |
+
|
| 474 |
+
#define GET_PARALLEL_IDX_1D(VARNAME, N) \
|
| 475 |
+
const uint VARNAME = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \
|
| 476 |
+
if (VARNAME >= (N)) { \
|
| 477 |
+
return; \
|
| 478 |
+
}
|
| 479 |
+
#define GET_PARALLEL_IDS_2D(VAR_X, VAR_Y, WIDTH, HEIGHT) \
|
| 480 |
+
const uint VAR_X = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \
|
| 481 |
+
const uint VAR_Y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; \
|
| 482 |
+
if (VAR_X >= (WIDTH) || VAR_Y >= (HEIGHT)) \
|
| 483 |
+
return;
|
| 484 |
+
#define END_PARALLEL()
|
| 485 |
+
#define END_PARALLEL_NORET()
|
| 486 |
+
#define END_PARALLEL_2D_NORET()
|
| 487 |
+
#define END_PARALLEL_2D()
|
| 488 |
+
#define RETURN_PARALLEL() return
|
| 489 |
+
#define CHECKLAUNCH() C10_CUDA_CHECK(cudaGetLastError());
|
| 490 |
+
#define ISONDEVICE true
|
| 491 |
+
#define SYNCDEVICE() HANDLECUDA(cudaDeviceSynchronize())
|
| 492 |
+
#define START_TIME(TN) \
|
| 493 |
+
cudaEvent_t __time_start_##TN, __time_stop_##TN; \
|
| 494 |
+
cudaEventCreate(&__time_start_##TN); \
|
| 495 |
+
cudaEventCreate(&__time_stop_##TN); \
|
| 496 |
+
cudaEventRecord(__time_start_##TN);
|
| 497 |
+
#define STOP_TIME(TN) cudaEventRecord(__time_stop_##TN);
|
| 498 |
+
#define GET_TIME(TN, TOPTR) \
|
| 499 |
+
cudaEventSynchronize(__time_stop_##TN); \
|
| 500 |
+
cudaEventElapsedTime((TOPTR), __time_start_##TN, __time_stop_##TN);
|
| 501 |
+
#define START_TIME_CU(TN) START_TIME(CN)
|
| 502 |
+
#define STOP_TIME_CU(TN) STOP_TIME(TN)
|
| 503 |
+
#define GET_TIME_CU(TN, TOPTR) GET_TIME(TN, TOPTR)
|
| 504 |
+
|
| 505 |
+
#endif
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.backward.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward_dbg.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.backward_dbg.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_gradients.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.calc_gradients.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_signature.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.calc_signature.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.construct.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.construct.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.create_selector.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.create_selector.instantiate.h"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.destruct.gpu.cu
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* This source code is licensed under the BSD-style license found in the
|
| 6 |
+
* LICENSE file in the root directory of this source tree.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
#include "../include/renderer.destruct.instantiate.h"
|